repo_name
stringlengths 7
92
| path
stringlengths 5
149
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 911
693k
| license
stringclasses 15
values |
---|---|---|---|---|---|
avistous/QSTK | qstkutil/bollinger.py | 2 | 1494 | '''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on Jan 1, 2011
@author:Drew Bratcher
@contact: dbratcher@gatech.edu
@summary: Contains tutorial for backtester and report.
'''
# python imports
import cPickle
from pylab import *
from pandas import *
import datetime as dt
#qstk imports
import qstkutil.DataAccess as da
import qstkutil.qsdateutil as du
def calcavg(period):
sum = zeros(len(period.columns))
count=0
for day in period.index:
sum+=period.xs(day)
count+=1
return(sum/count)
def calcdev(period):
avg=calcavg(period)
devs=zeros(len(period.columns))
count=0
for day in period.index:
devs+=(period.xs(day)-avg*ones(len(period.columns)))*(period.xs(day)-avg*ones(len(period.columns)))
count+=1
return(sqrt(devs/count))
def calcbvals(adjclose, timestamps, stocks, lookback):
for i in adjclose.values:
if i == 'NaN':
adjclose.values[adjclose.values.index(i)]=1;
bvals=DataMatrix(index=[timestamps[0]],columns=stocks,data=[zeros(len(stocks))])
for i in range(1,len(timestamps)):
s=i-lookback
if s<0:
s=0
lookbackperiod=adjclose[s:i]
avg = calcavg(lookbackperiod)
stddevs = calcdev(lookbackperiod)
if not(0 in stddevs):
b=(adjclose[i:i+1]-avg*ones(len(lookbackperiod.columns)))/stddevs
bvals=bvals.append(DataMatrix(index=[timestamps[i]],columns=stocks,data=b))
return(bvals)
| bsd-3-clause |
ZENGXH/scikit-learn | examples/calibration/plot_compare_calibration.py | 241 | 5008 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilties to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilties closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
RachitKansal/scikit-learn | sklearn/decomposition/pca.py | 192 | 23117 | """ Principal Component Analysis
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <d.engemann@fz-juelich.de>
# Michael Eickenberg <michael.eickenberg@inria.fr>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd
from ..utils.validation import check_is_fitted
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
Data spectrum.
rank: int
Tested rank value.
n_samples: int
Number of samples.
n_features: int
Number of features.
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
components_ : array, [n_components, n_features]
Principal axes in feature space, representing the directions of
maximum variance in the data.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
n_components_ : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = check_array(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < n_features:
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mean_')
if self.whiten:
return fast_dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Read more in the :ref:`User Guide <RandomizedPCA>`.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, 'mean_')
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
| bsd-3-clause |
dvro/UnbalancedDataset | imblearn/under_sampling/tests/test_nearmiss_3.py | 2 | 7992 | """Test the module nearmiss."""
from __future__ import print_function
import os
import numpy as np
from numpy.testing import assert_raises
from numpy.testing import assert_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_warns
from sklearn.datasets import make_classification
from sklearn.utils.estimator_checks import check_estimator
from collections import Counter
from imblearn.under_sampling import NearMiss
# Generate a global dataset to use
RND_SEED = 0
X = np.array([[1.17737838, -0.2002118],
[0.4960075, 0.86130762],
[-0.05903827, 0.10947647],
[0.91464286, 1.61369212],
[-0.54619583, 1.73009918],
[-0.60413357, 0.24628718],
[0.45713638, 1.31069295],
[-0.04032409, 3.01186964],
[0.03142011, 0.12323596],
[0.50701028, -0.17636928],
[-0.80809175, -1.09917302],
[-0.20497017, -0.26630228],
[0.99272351, -0.11631728],
[-1.95581933, 0.69609604],
[1.15157493, -1.2981518]])
Y = np.array([1, 2, 1, 0, 2, 1, 2, 2, 1, 2, 0, 0, 2, 1, 2])
VERSION_NEARMISS = 3
def test_nearmiss_sk_estimator():
"""Test the sklearn estimator compatibility"""
check_estimator(NearMiss)
def test_nearmiss_bad_ratio():
"""Test either if an error is raised with a wrong decimal value for
the ratio"""
# Define a negative ratio
ratio = -1.0
nm1 = NearMiss(ratio=ratio, random_state=RND_SEED)
assert_raises(ValueError, nm1.fit, X, Y)
# Define a ratio greater than 1
ratio = 100.0
nm1 = NearMiss(ratio=ratio, random_state=RND_SEED)
assert_raises(ValueError, nm1.fit, X, Y)
# Define ratio as an unknown string
ratio = 'rnd'
nm1 = NearMiss(ratio=ratio, random_state=RND_SEED)
assert_raises(ValueError, nm1.fit, X, Y)
# Define ratio as a list which is not supported
ratio = [.5, .5]
nm1 = NearMiss(ratio=ratio, random_state=RND_SEED)
assert_raises(ValueError, nm1.fit, X, Y)
def test_nearmiss_wrong_version():
"""Test either if an error is raised when the version is unknown."""
version = 1000
nm3 = NearMiss(version=version, random_state=RND_SEED)
assert_raises(ValueError, nm3.fit_sample, X, Y)
def test_nearmiss_init():
"""Test the initialisation of the object"""
# Define a ratio
ratio = 1.
nm3 = NearMiss(ratio=ratio, random_state=RND_SEED,
version=VERSION_NEARMISS)
assert_equal(nm3.version, VERSION_NEARMISS)
assert_equal(nm3.size_ngh, 3)
assert_equal(nm3.ratio, ratio)
assert_equal(nm3.random_state, RND_SEED)
def test_nearmiss_fit_single_class():
"""Test either if an error when there is a single class"""
# Define the parameter for the under-sampling
ratio = 'auto'
# Create the object
nm3 = NearMiss(ratio=ratio, random_state=RND_SEED,
version=VERSION_NEARMISS)
# Resample the data
# Create a wrong y
y_single_class = np.zeros((X.shape[0], ))
assert_warns(RuntimeWarning, nm3.fit, X, y_single_class)
def test_nm_fit_invalid_ratio():
"""Test either if an error is raised when the balancing ratio to fit is
smaller than the one of the data"""
# Create the object
ratio = 1. / 10000.
nm = NearMiss(ratio=ratio, random_state=RND_SEED)
# Fit the data
assert_raises(RuntimeError, nm.fit, X, Y)
def test_nm3_fit():
"""Test the fitting method"""
# Define the parameter for the under-sampling
ratio = 'auto'
# Create the object
nm3 = NearMiss(ratio=ratio, random_state=RND_SEED,
version=VERSION_NEARMISS)
# Fit the data
nm3.fit(X, Y)
# Check if the data information have been computed
assert_equal(nm3.min_c_, 0)
assert_equal(nm3.maj_c_, 2)
assert_equal(nm3.stats_c_[0], 3)
assert_equal(nm3.stats_c_[1], 5)
assert_equal(nm3.stats_c_[2], 7)
def test_nm3_sample_wt_fit():
"""Test either if an error is raised when sample is called before
fitting"""
# Define the parameter for the under-sampling
ratio = 'auto'
# Create the object
nm3 = NearMiss(ratio=ratio, random_state=RND_SEED,
version=VERSION_NEARMISS)
assert_raises(RuntimeError, nm3.sample, X, Y)
def test_nm3_fit_sample_auto():
"""Test fit and sample routines with auto ratio"""
# Define the parameter for the under-sampling
ratio = 'auto'
# Create the object
nm3 = NearMiss(ratio=ratio, random_state=RND_SEED,
version=VERSION_NEARMISS)
# Fit and sample
X_resampled, y_resampled = nm3.fit_sample(X, Y)
X_gt = np.array([[0.91464286, 1.61369212],
[-0.80809175, -1.09917302],
[-0.20497017, -0.26630228],
[1.17737838, -0.2002118],
[-0.60413357, 0.24628718],
[0.03142011, 0.12323596],
[1.15157493, -1.2981518],
[-0.54619583, 1.73009918],
[0.99272351, -0.11631728]])
y_gt = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_nm3_fit_sample_auto_indices():
"""Test fit and sample routines with auto ratio and indices support"""
# Define the parameter for the under-sampling
ratio = 'auto'
# Create the object
nm3 = NearMiss(ratio=ratio, random_state=RND_SEED,
version=VERSION_NEARMISS, return_indices=True)
# Fit and sample
X_resampled, y_resampled, idx_under = nm3.fit_sample(X, Y)
X_gt = np.array([[0.91464286, 1.61369212],
[-0.80809175, -1.09917302],
[-0.20497017, -0.26630228],
[1.17737838, -0.2002118],
[-0.60413357, 0.24628718],
[0.03142011, 0.12323596],
[1.15157493, -1.2981518],
[-0.54619583, 1.73009918],
[0.99272351, -0.11631728]])
y_gt = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
idx_gt = np.array([3, 10, 11, 0, 2, 3, 5, 1, 4])
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
assert_array_equal(idx_under, idx_gt)
def test_nm3_fit_sample_half():
"""Test fit and sample routines with .5 ratio"""
# Define the parameter for the under-sampling
ratio = .7
# Create the object
nm3 = NearMiss(ratio=ratio, random_state=RND_SEED,
version=VERSION_NEARMISS)
# Fit and sample
X_resampled, y_resampled = nm3.fit_sample(X, Y)
X_gt = np.array([[0.91464286, 1.61369212],
[-0.80809175, -1.09917302],
[-0.20497017, -0.26630228],
[1.17737838, -0.2002118],
[-0.60413357, 0.24628718],
[0.03142011, 0.12323596],
[-0.05903827, 0.10947647],
[1.15157493, -1.2981518],
[-0.54619583, 1.73009918],
[0.99272351, -0.11631728],
[0.45713638, 1.31069295]])
y_gt = np.array([0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2])
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_nm3_sample_wrong_X():
"""Test either if an error is raised when X is different at fitting
and sampling"""
# Create the object
nm3 = NearMiss(random_state=RND_SEED, version=VERSION_NEARMISS)
nm3.fit(X, Y)
assert_raises(RuntimeError, nm3.sample, np.random.random((100, 40)),
np.array([0] * 50 + [1] * 50))
def test_continuous_error():
"""Test either if an error is raised when the target are continuous
type"""
# continuous case
y = np.linspace(0, 1, 15)
nm = NearMiss(random_state=RND_SEED, version=VERSION_NEARMISS)
assert_warns(UserWarning, nm.fit, X, y)
| mit |
ahoyosid/scikit-learn | sklearn/metrics/setup.py | 299 | 1024 | import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.c"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
CartoDB/crankshaft | src/py/crankshaft/test/test_clustering_kmeans.py | 6 | 2722 | import unittest
import numpy as np
from helper import fixture_file
from crankshaft.clustering import Kmeans
from crankshaft.analysis_data_provider import AnalysisDataProvider
import crankshaft.clustering as cc
from crankshaft import random_seeds
import json
from collections import OrderedDict
class FakeDataProvider(AnalysisDataProvider):
def __init__(self, mocked_result):
self.mocked_result = mocked_result
def get_spatial_kmeans(self, query):
return self.mocked_result
def get_nonspatial_kmeans(self, query):
return self.mocked_result
class KMeansTest(unittest.TestCase):
"""Testing class for k-means spatial"""
def setUp(self):
self.cluster_data = json.loads(
open(fixture_file('kmeans.json')).read())
self.params = {"subquery": "select * from table",
"no_clusters": "10"}
def test_kmeans(self):
"""
"""
data = [{'xs': d['xs'],
'ys': d['ys'],
'ids': d['ids']} for d in self.cluster_data]
random_seeds.set_random_seeds(1234)
kmeans = Kmeans(FakeDataProvider(data))
clusters = kmeans.spatial('subquery', 2)
labels = [a[1] for a in clusters]
c1 = [a for a in clusters if a[1] == 0]
c2 = [a for a in clusters if a[1] == 1]
self.assertEqual(len(np.unique(labels)), 2)
self.assertEqual(len(c1), 20)
self.assertEqual(len(c2), 20)
class KMeansNonspatialTest(unittest.TestCase):
"""Testing class for k-means non-spatial"""
def setUp(self):
self.params = {"subquery": "SELECT * FROM TABLE",
"n_clusters": 5}
def test_kmeans_nonspatial(self):
"""
test for k-means non-spatial
"""
# data from:
# http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html#sklearn-cluster-kmeans
data_raw = [OrderedDict([("arr_col1", [1, 1, 1, 4, 4, 4]),
("arr_col2", [2, 4, 0, 2, 4, 0]),
("rowid", [1, 2, 3, 4, 5, 6])])]
random_seeds.set_random_seeds(1234)
kmeans = Kmeans(FakeDataProvider(data_raw))
clusters = kmeans.nonspatial('subquery', ['col1', 'col2'], 2)
cl1 = clusters[0][0]
cl2 = clusters[3][0]
for idx, val in enumerate(clusters):
if idx < 3:
self.assertEqual(val[0], cl1)
else:
self.assertEqual(val[0], cl2)
# raises exception for no data
with self.assertRaises(Exception):
kmeans = Kmeans(FakeDataProvider([]))
kmeans.nonspatial('subquery', ['col1', 'col2'], 2)
| bsd-3-clause |
nismod/energy_demand | energy_demand/plotting/fig_p2_weather_val.py | 1 | 14554 | """Fig 2 figure
"""
import numpy as np
import matplotlib.pyplot as plt
#from scipy.stats import mstats
import pandas as pd
import geopandas as gpd
from scipy import stats
from shapely.geometry import Point
import matplotlib.pyplot as plt
from collections import defaultdict
from matplotlib.colors import Normalize
from energy_demand.plotting import result_mapping
from energy_demand.technologies import tech_related
from energy_demand.plotting import basic_plot_functions
def run(
data_input,
regions,
simulation_yr_to_plot,
population,
fueltype_str,
path_shapefile,
fig_name
):
"""
"""
fueltype_int = tech_related.get_fueltype_int(fueltype_str)
# -----------------------------------------------------------
# Iterate overall weather_yrs and store data in dataframe
# (columns = timestep, rows: value of year)
# -----------------------------------------------------------
# List of selected data for every weather year and region (which is then converted to array)
weather_yrs_data = defaultdict(dict)
print("Weather yrs: " + str(list(data_input.keys())), flush=True)
for weather_yr, data_weather_yr in data_input.items():
# Weather year specific data for every region
regions_fuel = data_weather_yr[simulation_yr_to_plot][fueltype_int] # Select fueltype
for region_nr, region_name in enumerate(regions):
try:
weather_yrs_data[region_name].append(regions_fuel[region_nr])
except (KeyError, AttributeError):
weather_yrs_data[region_name] = [regions_fuel[region_nr]]
regional_statistics_columns = [
'name',
'mean_peak_h',
'diff_av_max',
'mean_peak_h_pp',
'diff_av_max_pp',
'std_dev_average_every_h',
'std_dev_peak_h_norm_pop']
df_stats = pd.DataFrame(columns=regional_statistics_columns)
for region_name, region_data in weather_yrs_data.items():
# Convert regional data to dataframe
region_data_array = np.array(region_data)
df = pd.DataFrame(
region_data_array,
columns=range(8760))
# Calculate regional statistics
mean = df.mean(axis=0)
std_dev = df.std(axis=0) #standard deviation across every hour
# Get maximum per colum
#max_every_h = df.max()
#colum_max_h = max_every_h.argmax() #get colum (respesctively hour) of maximum value
# Average standard deviation across every hour
std_dev_average_every_h = np.std(list(std_dev))
max_entry = df.max(axis=0) #maximum entry for every hour
min_entry = df.min(axis=0) #maximum entry for every hour
# Get hour number with maximum demand
hour_nr_max = max_entry.argmax()
hour_nr_min = min_entry.argmin()
# standard deviation of peak hour
std_dev_peak_h = std_dev[hour_nr_max]
# Difference between average and max
diff_av_max = max_entry[hour_nr_max] - mean[hour_nr_max]
mean_peak_h = mean[hour_nr_max]
# Convert GW to KW
diff_av_max = diff_av_max * 1000000 #GW to KW
mean_peak_h = mean_peak_h * 1000000 #GW to KW
# Weight with population
for region_nr, n in enumerate(regions):
if region_name == n:
nr_of_reg = region_nr
break
pop = population[nr_of_reg]
# Divide standard deviation of peak hour by population
# which gives measure of weather variability in peak hour
std_dev_peak_h_norm_pop = std_dev_peak_h / pop
diff_av_max_pp = diff_av_max / pop
mean_peak_h_pp = mean_peak_h / pop
line_entry = [[
str(region_name),
mean_peak_h,
diff_av_max,
mean_peak_h_pp,
diff_av_max_pp,
std_dev_average_every_h,
std_dev_peak_h_norm_pop]]
line_df = pd.DataFrame(
line_entry, columns=regional_statistics_columns)
df_stats = df_stats.append(line_df)
print(df_stats['diff_av_max'].max())
print(df_stats['mean_peak_h'].max())
print(df_stats['std_dev_peak_h_norm_pop'].max())
print("-")
print(df_stats['diff_av_max_pp'].max())
print(df_stats['diff_av_max_pp'].min())
print("-")
print(df_stats['mean_peak_h_pp'].max())
print(df_stats['mean_peak_h_pp'].min())
# ---------------
# Create spatial maps
# http://darribas.org/gds15/content/labs/lab_03.html
# http://nbviewer.jupyter.org/gist/jorisvandenbossche/57d392c085901eb4981054402b37b6b1
# ---------------
# Load uk shapefile
uk_shapefile = gpd.read_file(path_shapefile)
# Merge stats to geopanda
shp_gdp_merged = uk_shapefile.merge(
df_stats,
on='name')
# Assign projection
crs = {'init': 'epsg:27700'} #27700: OSGB_1936_British_National_Grid
uk_gdf = gpd.GeoDataFrame(shp_gdp_merged, crs=crs)
ax = uk_gdf.plot()
# Assign bin colors according to defined cmap and whether
# plot with min_max values or only min/max values
#bin_values = [0, 0.0025, 0.005, 0.0075, 0.01]
#bin_values = [0, 0.02, 0.04, 0.06, 0.08, 0.1] #list(np.arange(0.0, 1.0, 0.1))
# Field to plot
field_to_plot = "diff_av_max_pp" # Difference between average and peak per person in KWh
#field_to_plot = "diff_av_max" # Difference between average and peak
field_to_plot = 'std_dev_peak_h_norm_pop'
nr_of_intervals = 6
bin_values = result_mapping.get_reasonable_bin_values_II(
data_to_plot=list(uk_gdf[field_to_plot]),
nr_of_intervals=nr_of_intervals)
print(float(uk_gdf[field_to_plot]))
print("BINS " + str(bin_values))
uk_gdf, cmap_rgb_colors, color_zero, min_value, max_value = user_defined_bin_classification(
uk_gdf,
field_to_plot,
bin_values=bin_values)
# plot with face color attribute
uk_gdf.plot(ax=ax, facecolor=uk_gdf['bin_color'], edgecolor='black', linewidth=0.5)
#shp_gdp_merged.plot(column='diff_av_max', scheme='QUANTILES', k=5, cmap='OrRd', linewidth=0.1)
#ax = uk_gdf.plot(column='diff_av_max', scheme='QUANTILES', k=5, cmap='OrRd', linewidth=0.1)
#uk_gdf[uk_gdf['name'] == 'E06000024'].plot(ax=ax, facecolor='green', edgecolor='black')
#uk_gdf[uk_gdf['diff_av_max'] < 0.01].plot(ax=ax, facecolor='blue', edgecolor='black')
# Get legend patches TODO IMPROVE
# TODO IMRPVE: MAKE CORRECT ONE FOR NEW PROCESSING
legend_handles = result_mapping.get_legend_handles(
bin_values[1:-1],
cmap_rgb_colors,
color_zero,
min_value,
max_value)
plt.legend(
handles=legend_handles,
title="tittel_elgend",
prop={'size': 8},
loc='upper center',
bbox_to_anchor=(0.5, -0.05),
frameon=False)
# PLot bins on plot
plt.text(
0,
-20,
bin_values[:-1], #leave away maximum value
fontsize=8)
plt.tight_layout()
plt.show()
raise Exception
plt.savefig(fig_name)
plt.close()
def norm_cmap(values, cmap, vmin=None, vmax=None):
"""
Normalize and set colormap
Parameters
----------
values : Series or array to be normalized
cmap : matplotlib Colormap
normalize : matplotlib.colors.Normalize
cm : matplotlib.cm
vmin : Minimum value of colormap. If None, uses min(values).
vmax : Maximum value of colormap. If None, uses max(values).
Returns
-------
n_cmap : mapping of normalized values to colormap (cmap)
Source
------
https://ocefpaf.github.io/python4oceanographers/blog/2015/08/24/choropleth/
"""
mn = vmin or min(values)
mx = vmax or max(values)
norm = Normalize(vmin=mn, vmax=mx)
n_cmap = plt.cm.ScalarMappable(norm=norm, cmap=cmap)
rgb_colors = [n_cmap.to_rgba(value) for value in values]
return n_cmap, rgb_colors
def plot_colors(rgb_colors):
"""function to plot colors
"""
nr_dots = len(rgb_colors)
dots = []
x = []
y = []
for i in range(nr_dots):
x.append(i + 20)
y.append(i + 20)
#plt.scatter(x, y, c=cmap, s=50)
plt.scatter(x, y, c=rgb_colors, s=50)
plt.show()
def user_defined_bin_classification(
input_df,
field_name,
bin_values,
cmap_diverging=None,
cmap_sequential=None
):
"""Classify values according to bins
Arguments
---------
input_df : dataframe
Dataframe to plot
higher_as_bin : int
Bin value of > than last bin
cmap_sequential : str
'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds','YlOrBr',
'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu', 'GnBu', 'PuBu', 'YlGnBu',
'PuBuGn', 'BuGn', 'YlGn'
cmap_diverging : str
'PiYG', 'PRGn', 'BrBG', 'PuOr', 'RdGy', 'RdBu', 'RdYlBu', 'RdYlGn',
'Spectral', 'coolwarm', 'bwr', 'seismic'
Info
-----
Include 0 in min_max_plot == False
Python colors:
https://matplotlib.org/1.4.1/examples/color/colormaps_reference.html
https://ocefpaf.github.io/python4oceanographers/blog/2015/08/24/choropleth/
https://matplotlib.org/examples/color/colormaps_reference.html
"""
# Check if largest value is large than last bin
max_real_value = float(input_df[field_name].max())
min_real_value = float(input_df[field_name].min())
if max_real_value > 0 and min_real_value < 0:
min_max_plot = True
else:
min_max_plot = False
if not min_max_plot:
# If only minus values
if max_real_value < 0: #only min values
if min_real_value > bin_values[0]:
# add "higher as bin"
bin_values.insert(0, min_real_value)
elif bin_values[0] < min_real_value:
crit_append_val = False
raise Exception("The minimum user defined bin smaller is larger than minimum existing value")
if not cmap_sequential:
cmap, cmap_rgb_colors = norm_cmap(bin_values[:1], cmap='Purples') #'YlOrBr'
else:
cmap, cmap_rgb_colors = norm_cmap(bin_values[:1], cmap=cmap_sequential) #'YlOrBr'
else: #only positive values
if max_real_value > bin_values[-1]:
# add "higher as bin"
bin_values.append(max_real_value)
elif bin_values[-1] > max_real_value:
raise Exception("The maximum user defined bin value is larger than maximum min: min: {} max: {}".format(bin_values[-1], max_real_value))
if not cmap_sequential:
cmap, cmap_rgb_colors = norm_cmap(bin_values[1:], cmap='Purples')
else:
cmap, cmap_rgb_colors = norm_cmap(bin_values[1:], cmap=cmap_sequential)
# e.g. [0, 3, 6] --> generates (0, 3], and (3, 6] bin
input_df['bin_color'] = pd.cut(
input_df[field_name],
bin_values,
include_lowest=True,
right=True,
labels=cmap_rgb_colors)
color_zero = 'grey' # default
else:
if max_real_value < bin_values[-1]:
raise Exception("The maximum user defined bin value is larger than maximum value {} {}".format(bin_values[-1], max_real_value))
elif min_real_value > bin_values[0]:
raise Exception("The minimum user defined bin smaller is larger than minimum existing value")
else:
pass
# Add minimum and maximum value
bin_values.append(max_real_value)
bin_values.insert(0, min_real_value)
if not cmap_diverging:
cmap, cmap_rgb_colors = norm_cmap(bin_values, cmap='coolwarm')
else:
cmap, cmap_rgb_colors = norm_cmap(bin_values, cmap=cmap_diverging)
# Reclassify zero value
positive_bin_colors = []
minus_bin_colors = []
minus_bins = []
positive_bins = [0]
for cnt, i in enumerate(bin_values):
if i < 0:
minus_bin_colors.append(cmap_rgb_colors[cnt])
minus_bins.append(i)
elif i == 0:
color_zero = cmap_rgb_colors[cnt]
else:
positive_bin_colors.append(cmap_rgb_colors[cnt])
positive_bins.append(i)
minus_bins.append(0)
# ----
# Classify
# ----
# Classify values in dataframe and assign color value as "bin" column
minus_dataframe = input_df[field_name][input_df[field_name] < 0].to_frame()
zero_dataframe = input_df[field_name][input_df[field_name] == 0].to_frame()
plus_dataframe = input_df[field_name][input_df[field_name] > 0].to_frame()
# e.g. [0, 3, 6] --> generates (0, 3], and (3, 6] bin
minus_dataframe['bin_color'] = pd.cut(
minus_dataframe[field_name],
minus_bins,
include_lowest=True,
right=True,
labels=minus_bin_colors)
zero_dataframe['bin_color'] = [color_zero for _ in range(len(zero_dataframe))] #create list with zero color
plus_dataframe['bin_color'] = pd.cut(
plus_dataframe[field_name],
positive_bins,
include_lowest=True,
right=True,
labels=positive_bin_colors)
# Add bins
input_df = minus_dataframe.append(zero_dataframe)
input_df = input_df.append(plus_dataframe)
return input_df, cmap_rgb_colors, color_zero, min_real_value, max_real_value
'''ax = input_df.plot()
# Calculate color values
#uk_gdf[uk_gdf['name'] == 'E06000024'].plot(ax=ax, facecolor='green', edgecolor='black')
#uk_gdf[uk_gdf['diff_av_max'] < 0.01].plot(ax=ax, facecolor='blue', edgecolor='black')
# Convert dict to dataframe
#df = pd.DataFrame.from_dict(input_df, orient='index')
#df['Coordinates'] = list(zip(df.longitude, df.latitude))
#df['Coordinates'] = df['Coordinates'].apply(Point)
# Load uk shapefile
uk_shapefile = gpd.read_file(path_shapefile)
# Assign correct projection
crs = {'init': 'epsg:27700'} #27700 == OSGB_1936_British_National_Grid
uk_gdf = gpd.GeoDataFrame(uk_shapefile, crs=crs)
# Transform
uk_gdf = uk_gdf.to_crs({'init' :'epsg:4326'})
# Plot
ax = uk_gdf.plot(color='white', edgecolor='black')
# print coordinates
#world.plot(column='gdp_per_cap', cmap='OrRd', scheme='quantiles');
plt.savefig(fig_path)'''
| mit |
cython-testbed/pandas | pandas/core/computation/pytables.py | 2 | 19478 | """ manage PyTables query interface via Expressions """
import ast
from functools import partial
import numpy as np
import pandas as pd
from pandas.core.dtypes.common import is_list_like
import pandas.core.common as com
from pandas.compat import u, string_types, DeepChainMap
from pandas.core.base import StringMixin
from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded
from pandas.core.computation import expr, ops
from pandas.core.computation.ops import is_term, UndefinedVariableError
from pandas.core.computation.expr import BaseExprVisitor
from pandas.core.computation.common import _ensure_decoded
from pandas.core.tools.timedeltas import _coerce_scalar_to_timedelta_type
class Scope(expr.Scope):
__slots__ = 'queryables',
def __init__(self, level, global_dict=None, local_dict=None,
queryables=None):
super(Scope, self).__init__(level + 1, global_dict=global_dict,
local_dict=local_dict)
self.queryables = queryables or dict()
class Term(ops.Term):
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, string_types) else cls
supr_new = StringMixin.__new__
return supr_new(klass)
def __init__(self, name, env, side=None, encoding=None):
super(Term, self).__init__(name, env, side=side, encoding=encoding)
def _resolve_name(self):
# must be a queryables
if self.side == 'left':
if self.name not in self.env.queryables:
raise NameError('name {name!r} is not defined'
.format(name=self.name))
return self.name
# resolve the rhs (and allow it to be None)
try:
return self.env.resolve(self.name, is_local=False)
except UndefinedVariableError:
return self.name
@property
def value(self):
return self._value
class Constant(Term):
def __init__(self, value, env, side=None, encoding=None):
super(Constant, self).__init__(value, env, side=side,
encoding=encoding)
def _resolve_name(self):
return self._name
class BinOp(ops.BinOp):
_max_selectors = 31
def __init__(self, op, lhs, rhs, queryables, encoding):
super(BinOp, self).__init__(op, lhs, rhs)
self.queryables = queryables
self.encoding = encoding
self.filter = None
self.condition = None
def _disallow_scalar_only_bool_ops(self):
pass
def prune(self, klass):
def pr(left, right):
""" create and return a new specialized BinOp from myself """
if left is None:
return right
elif right is None:
return left
k = klass
if isinstance(left, ConditionBinOp):
if (isinstance(left, ConditionBinOp) and
isinstance(right, ConditionBinOp)):
k = JointConditionBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
elif isinstance(left, FilterBinOp):
if (isinstance(left, FilterBinOp) and
isinstance(right, FilterBinOp)):
k = JointFilterBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
return k(self.op, left, right, queryables=self.queryables,
encoding=self.encoding).evaluate()
left, right = self.lhs, self.rhs
if is_term(left) and is_term(right):
res = pr(left.value, right.value)
elif not is_term(left) and is_term(right):
res = pr(left.prune(klass), right.value)
elif is_term(left) and not is_term(right):
res = pr(left.value, right.prune(klass))
elif not (is_term(left) or is_term(right)):
res = pr(left.prune(klass), right.prune(klass))
return res
def conform(self, rhs):
""" inplace conform rhs """
if not is_list_like(rhs):
rhs = [rhs]
if isinstance(rhs, np.ndarray):
rhs = rhs.ravel()
return rhs
@property
def is_valid(self):
""" return True if this is a valid field """
return self.lhs in self.queryables
@property
def is_in_table(self):
""" return True if this is a valid column name for generation (e.g. an
actual column in the table) """
return self.queryables.get(self.lhs) is not None
@property
def kind(self):
""" the kind of my field """
return getattr(self.queryables.get(self.lhs), 'kind', None)
@property
def meta(self):
""" the meta of my field """
return getattr(self.queryables.get(self.lhs), 'meta', None)
@property
def metadata(self):
""" the metadata of my field """
return getattr(self.queryables.get(self.lhs), 'metadata', None)
def generate(self, v):
""" create and return the op string for this TermValue """
val = v.tostring(self.encoding)
return "({lhs} {op} {val})".format(lhs=self.lhs, op=self.op, val=val)
def convert_value(self, v):
""" convert the expression that is in the term to something that is
accepted by pytables """
def stringify(value):
if self.encoding is not None:
encoder = partial(pprint_thing_encoded,
encoding=self.encoding)
else:
encoder = pprint_thing
return encoder(value)
kind = _ensure_decoded(self.kind)
meta = _ensure_decoded(self.meta)
if kind == u('datetime64') or kind == u('datetime'):
if isinstance(v, (int, float)):
v = stringify(v)
v = _ensure_decoded(v)
v = pd.Timestamp(v)
if v.tz is not None:
v = v.tz_convert('UTC')
return TermValue(v, v.value, kind)
elif kind == u('timedelta64') or kind == u('timedelta'):
v = _coerce_scalar_to_timedelta_type(v, unit='s').value
return TermValue(int(v), v, kind)
elif meta == u('category'):
metadata = com.values_from_object(self.metadata)
result = metadata.searchsorted(v, side='left')
# result returns 0 if v is first element or if v is not in metadata
# check that metadata contains v
if not result and v not in metadata:
result = -1
return TermValue(result, result, u('integer'))
elif kind == u('integer'):
v = int(float(v))
return TermValue(v, v, kind)
elif kind == u('float'):
v = float(v)
return TermValue(v, v, kind)
elif kind == u('bool'):
if isinstance(v, string_types):
v = not v.strip().lower() in [u('false'), u('f'), u('no'),
u('n'), u('none'), u('0'),
u('[]'), u('{}'), u('')]
else:
v = bool(v)
return TermValue(v, v, kind)
elif isinstance(v, string_types):
# string quoting
return TermValue(v, stringify(v), u('string'))
else:
raise TypeError("Cannot compare {v} of type {typ} to {kind} column"
.format(v=v, typ=type(v), kind=kind))
def convert_values(self):
pass
class FilterBinOp(BinOp):
def __unicode__(self):
return pprint_thing("[Filter : [{lhs}] -> [{op}]"
.format(lhs=self.filter[0], op=self.filter[1]))
def invert(self):
""" invert the filter """
if self.filter is not None:
f = list(self.filter)
f[1] = self.generate_filter_op(invert=True)
self.filter = tuple(f)
return self
def format(self):
""" return the actual filter format """
return [self.filter]
def evaluate(self):
if not self.is_valid:
raise ValueError("query term is not valid [{slf}]"
.format(slf=self))
rhs = self.conform(self.rhs)
values = [TermValue(v, v, self.kind) for v in rhs]
if self.is_in_table:
# if too many values to create the expression, use a filter instead
if self.op in ['==', '!='] and len(values) > self._max_selectors:
filter_op = self.generate_filter_op()
self.filter = (
self.lhs,
filter_op,
pd.Index([v.value for v in values]))
return self
return None
# equality conditions
if self.op in ['==', '!=']:
filter_op = self.generate_filter_op()
self.filter = (
self.lhs,
filter_op,
pd.Index([v.value for v in values]))
else:
raise TypeError("passing a filterable condition to a non-table "
"indexer [{slf}]".format(slf=self))
return self
def generate_filter_op(self, invert=False):
if (self.op == '!=' and not invert) or (self.op == '==' and invert):
return lambda axis, vals: ~axis.isin(vals)
else:
return lambda axis, vals: axis.isin(vals)
class JointFilterBinOp(FilterBinOp):
def format(self):
raise NotImplementedError("unable to collapse Joint Filters")
def evaluate(self):
return self
class ConditionBinOp(BinOp):
def __unicode__(self):
return pprint_thing("[Condition : [{cond}]]"
.format(cond=self.condition))
def invert(self):
""" invert the condition """
# if self.condition is not None:
# self.condition = "~(%s)" % self.condition
# return self
raise NotImplementedError("cannot use an invert condition when "
"passing to numexpr")
def format(self):
""" return the actual ne format """
return self.condition
def evaluate(self):
if not self.is_valid:
raise ValueError("query term is not valid [{slf}]"
.format(slf=self))
# convert values if we are in the table
if not self.is_in_table:
return None
rhs = self.conform(self.rhs)
values = [self.convert_value(v) for v in rhs]
# equality conditions
if self.op in ['==', '!=']:
# too many values to create the expression?
if len(values) <= self._max_selectors:
vs = [self.generate(v) for v in values]
self.condition = "({cond})".format(cond=' | '.join(vs))
# use a filter after reading
else:
return None
else:
self.condition = self.generate(values[0])
return self
class JointConditionBinOp(ConditionBinOp):
def evaluate(self):
self.condition = "({lhs} {op} {rhs})".format(lhs=self.lhs.condition,
op=self.op,
rhs=self.rhs.condition)
return self
class UnaryOp(ops.UnaryOp):
def prune(self, klass):
if self.op != '~':
raise NotImplementedError("UnaryOp only support invert type ops")
operand = self.operand
operand = operand.prune(klass)
if operand is not None:
if issubclass(klass, ConditionBinOp):
if operand.condition is not None:
return operand.invert()
elif issubclass(klass, FilterBinOp):
if operand.filter is not None:
return operand.invert()
return None
_op_classes = {'unary': UnaryOp}
class ExprVisitor(BaseExprVisitor):
const_type = Constant
term_type = Term
def __init__(self, env, engine, parser, **kwargs):
super(ExprVisitor, self).__init__(env, engine, parser)
for bin_op in self.binary_ops:
bin_node = self.binary_op_nodes_map[bin_op]
setattr(self, 'visit_{node}'.format(node=bin_node),
lambda node, bin_op=bin_op: partial(BinOp, bin_op,
**kwargs))
def visit_UnaryOp(self, node, **kwargs):
if isinstance(node.op, (ast.Not, ast.Invert)):
return UnaryOp('~', self.visit(node.operand))
elif isinstance(node.op, ast.USub):
return self.const_type(-self.visit(node.operand).value, self.env)
elif isinstance(node.op, ast.UAdd):
raise NotImplementedError('Unary addition not supported')
def visit_Index(self, node, **kwargs):
return self.visit(node.value).value
def visit_Assign(self, node, **kwargs):
cmpr = ast.Compare(ops=[ast.Eq()], left=node.targets[0],
comparators=[node.value])
return self.visit(cmpr)
def visit_Subscript(self, node, **kwargs):
# only allow simple suscripts
value = self.visit(node.value)
slobj = self.visit(node.slice)
try:
value = value.value
except AttributeError:
pass
try:
return self.const_type(value[slobj], self.env)
except TypeError:
raise ValueError("cannot subscript {value!r} with "
"{slobj!r}".format(value=value, slobj=slobj))
def visit_Attribute(self, node, **kwargs):
attr = node.attr
value = node.value
ctx = node.ctx.__class__
if ctx == ast.Load:
# resolve the value
resolved = self.visit(value)
# try to get the value to see if we are another expression
try:
resolved = resolved.value
except (AttributeError):
pass
try:
return self.term_type(getattr(resolved, attr), self.env)
except AttributeError:
# something like datetime.datetime where scope is overridden
if isinstance(value, ast.Name) and value.id == attr:
return resolved
raise ValueError("Invalid Attribute context {name}"
.format(name=ctx.__name__))
def translate_In(self, op):
return ast.Eq() if isinstance(op, ast.In) else op
def _rewrite_membership_op(self, node, left, right):
return self.visit(node.op), node.op, left, right
def _validate_where(w):
"""
Validate that the where statement is of the right type.
The type may either be String, Expr, or list-like of Exprs.
Parameters
----------
w : String term expression, Expr, or list-like of Exprs.
Returns
-------
where : The original where clause if the check was successful.
Raises
------
TypeError : An invalid data type was passed in for w (e.g. dict).
"""
if not (isinstance(w, (Expr, string_types)) or is_list_like(w)):
raise TypeError("where must be passed as a string, Expr, "
"or list-like of Exprs")
return w
class Expr(expr.Expr):
""" hold a pytables like expression, comprised of possibly multiple 'terms'
Parameters
----------
where : string term expression, Expr, or list-like of Exprs
queryables : a "kinds" map (dict of column name -> kind), or None if column
is non-indexable
encoding : an encoding that will encode the query terms
Returns
-------
an Expr object
Examples
--------
'index>=date'
"columns=['A', 'D']"
'columns=A'
'columns==A'
"~(columns=['A','B'])"
'index>df.index[3] & string="bar"'
'(index>df.index[3] & index<=df.index[6]) | string="bar"'
"ts>=Timestamp('2012-02-01')"
"major_axis>=20130101"
"""
def __init__(self, where, queryables=None, encoding=None, scope_level=0):
where = _validate_where(where)
self.encoding = encoding
self.condition = None
self.filter = None
self.terms = None
self._visitor = None
# capture the environment if needed
local_dict = DeepChainMap()
if isinstance(where, Expr):
local_dict = where.env.scope
where = where.expr
elif isinstance(where, (list, tuple)):
for idx, w in enumerate(where):
if isinstance(w, Expr):
local_dict = w.env.scope
else:
w = _validate_where(w)
where[idx] = w
where = ' & '.join(map('({})'.format, com.flatten(where))) # noqa
self.expr = where
self.env = Scope(scope_level + 1, local_dict=local_dict)
if queryables is not None and isinstance(self.expr, string_types):
self.env.queryables.update(queryables)
self._visitor = ExprVisitor(self.env, queryables=queryables,
parser='pytables', engine='pytables',
encoding=encoding)
self.terms = self.parse()
def __unicode__(self):
if self.terms is not None:
return pprint_thing(self.terms)
return pprint_thing(self.expr)
def evaluate(self):
""" create and return the numexpr condition and filter """
try:
self.condition = self.terms.prune(ConditionBinOp)
except AttributeError:
raise ValueError("cannot process expression [{expr}], [{slf}] "
"is not a valid condition".format(expr=self.expr,
slf=self))
try:
self.filter = self.terms.prune(FilterBinOp)
except AttributeError:
raise ValueError("cannot process expression [{expr}], [{slf}] "
"is not a valid filter".format(expr=self.expr,
slf=self))
return self.condition, self.filter
class TermValue(object):
""" hold a term value the we use to construct a condition/filter """
def __init__(self, value, converted, kind):
self.value = value
self.converted = converted
self.kind = kind
def tostring(self, encoding):
""" quote the string if not encoded
else encode and return """
if self.kind == u'string':
if encoding is not None:
return self.converted
return '"{converted}"'.format(converted=self.converted)
elif self.kind == u'float':
# python 2 str(float) is not always
# round-trippable so use repr()
return repr(self.converted)
return self.converted
def maybe_expression(s):
""" loose checking if s is a pytables-acceptable expression """
if not isinstance(s, string_types):
return False
ops = ExprVisitor.binary_ops + ExprVisitor.unary_ops + ('=',)
# make sure we have an op at least
return any(op in s for op in ops)
| bsd-3-clause |
DSLituiev/scikit-learn | sklearn/cross_decomposition/cca_.py | 151 | 3192 | from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
super(CCA, self).__init__(n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
| bsd-3-clause |
open-austin/capture | distance.py | 1 | 4386 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import argparse
import glob
import os
import numpy as np
import pandas as pd
from geopy.distance import vincenty as point_distance
def ingest(fn, route_id, begin_latlng, end_latlng):
df = pd.read_csv(fn, parse_dates=['timestamp'])
df = df.drop(['speed', 'trip_headsign'], axis=1)
df = df[df.route_id == route_id]
df['begin_distances'] = compute_distance(df, begin_latlng)
df['end_distances'] = compute_distance(df, end_latlng)
return df
def compute_distance(df, latlng):
df = df.copy()
starts = zip(df.latitude, df.longitude)
return [point_distance(latlng, s).meters for s in starts]
def parse_duration(df):
'''
for each trip id
choose a reading nearest the begin stop
choose a reading nearest downtown
subtract the times for those two readings
positive is southbound
'''
mins = df.groupby('trip_id').idxmin()
begin_mins = df.loc[mins.begin_distances].set_index('trip_id')
end_mins = df.loc[mins.end_distances].set_index('trip_id')
unneeded_cols = ['begin_distances', 'end_distances', 'latitude', 'longitude']
begin_mins.drop(unneeded_cols, axis=1, inplace=True)
end_mins.drop(['vehicle_id', 'route_id'] + unneeded_cols, axis=1, inplace=True)
result = begin_mins.join(end_mins, rsuffix='_begin', lsuffix='_end')
duration = begin_mins.timestamp - end_mins.timestamp
result['duration'] = duration / np.timedelta64(1, 's')
return result
def parse_duration_by_hour(df):
df['duration_abs'] = df['duration'].abs()
df['hour'] = df['timestamp_begin'].apply(
lambda x: x.tz_localize('UTC').tz_convert('US/Central').hour
)
df_byhour = df.groupby('hour')
results = pd.concat([
df_byhour['duration_abs'].count(),
df_byhour['duration_abs'].mean()
], axis=1, keys=['count', 'mean'])
return results.reindex(index=range(0, 24))
def parse(capmetrics_path=None, leglob=None, route_id=None, begin_lat=None, begin_lon=None, end_lat=None, end_lon=None, name=None):
df_total = pd.DataFrame()
data_glob = os.path.join(capmetrics_path, 'data', 'vehicle_positions', leglob)
files = glob.glob(data_glob)
for i, fname in enumerate(files):
print('({}/{}) Ingesting {}'.format(i + 1, len(files), fname))
try:
df_ingested = ingest(fname, route_id, (begin_lat, begin_lon), (end_lat, end_lon))
df_duration = parse_duration(df_ingested)
df_total = pd.concat([df_total, df_duration])
except Exception as e:
print(e)
print('Skipping ', fname)
if df_total.empty:
print('No vehicle positions found')
return
return parse_duration_by_hour(df_duration)
def main():
parser = argparse.ArgumentParser(description=main.__doc__)
parser.add_argument('--capmetrics_path', help='Path to the capmetrics directory', required=True, type=str)
parser.add_argument('--glob', help='Glob of vehicle positions CSV files', required=True, type=str)
parser.add_argument('--name', help='Name of the output file', required=True, type=str)
parser.add_argument('--route_id', help='Route ID', required=True, type=int)
parser.add_argument('--begin_lat', help='Latitude of first stop', required=True, type=float)
parser.add_argument('--begin_lon', help='Longitude of first stop', required=True, type=float)
parser.add_argument('--end_lat', help='Latitude of second stop', required=True, type=float)
parser.add_argument('--end_lon', help='Longitude of second stop', required=True, type=float)
args = parser.parse_args()
results = parse(
capmetrics_path=args.capmetrics_path,
name=args.name,
leglob=args.glob,
route_id=args.route_id,
begin_lat=args.begin_lat,
begin_lon=args.begin_lon,
end_lat=args.end_lat,
end_lon=args.end_lon
)
output_filename = '{route_id}_{name}_{glob}'.format(route_id=args.route_id, glob=args.glob, name=args.name)
output_path_duration_by_hour = 'results/duration_by_hour/{}.csv'.format(output_filename)
results.to_csv(output_path_duration_by_hour, header=True, sep='\t')
print('Saved duration by hour to {}'.format(output_path_duration_by_hour))
if __name__ == '__main__':
main()
| gpl-3.0 |
MohammedWasim/scikit-learn | sklearn/decomposition/tests/test_online_lda.py | 21 | 13171 | import numpy as np
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
from scipy.special import psi
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition._online_lda import (_dirichlet_expectation_1d,
_dirichlet_expectation_2d)
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.validation import NotFittedError
from sklearn.externals.six.moves import xrange
def _build_sparse_mtx():
# Create 3 topics and each topic has 3 disticnt words.
# (Each word only belongs to a single topic.)
n_topics = 3
block = n_topics * np.ones((3, 3))
blocks = [block] * n_topics
X = block_diag(*blocks)
X = csr_matrix(X)
return (n_topics, X)
def test_lda_default_prior_params():
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_topics, X = _build_sparse_mtx()
prior = 1. / n_topics
lda_1 = LatentDirichletAllocation(n_topics=n_topics, doc_topic_prior=prior,
topic_word_prior=prior, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
def test_lda_fit_batch():
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, evaluate_every=1,
learning_method='batch', random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_fit_online():
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
evaluate_every=1, learning_method='online',
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_partial_fit():
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
total_samples=100, random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_dense_input():
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_method='batch',
random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_topics = 3
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
X_trans = lda.fit_transform(X)
assert_true((X_trans > 0.0).any())
def test_lda_fit_transform():
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(n_topics=5, learning_method=method,
random_state=rng)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_lda_partial_fit_dim_mismatch():
# test `n_features` mismatch in `partial_fit`
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_col = rng.randint(6, 10)
X_1 = np.random.randint(4, size=(10, n_col))
X_2 = np.random.randint(4, size=(10, n_col + 1))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.partial_fit(X_1)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
def test_invalid_params():
# test `_check_params` method
X = np.ones((5, 10))
invalid_models = (
('n_topics', LatentDirichletAllocation(n_topics=0)),
('learning_method',
LatentDirichletAllocation(learning_method='unknown')),
('total_samples', LatentDirichletAllocation(total_samples=0)),
('learning_offset', LatentDirichletAllocation(learning_offset=-1)),
)
for param, model in invalid_models:
regex = r"^Invalid %r parameter" % param
assert_raises_regexp(ValueError, regex, model.fit, X)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = -np.ones((5, 10))
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
assert_raises_regexp(ValueError, regex, lda.fit, X)
def test_lda_no_component_error():
# test `transform` and `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = r"^no 'components_' attribute"
assert_raises_regexp(NotFittedError, regex, lda.transform, X)
assert_raises_regexp(NotFittedError, regex, lda.perplexity, X)
def test_lda_transform_mismatch():
# test `n_features` mismatch in partial_fit and transform
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
X_2 = rng.randint(4, size=(10, 8))
n_topics = rng.randint(3, 6)
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
lda.partial_fit(X)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
@if_safe_multiprocessing_with_blas
def test_lda_multi_jobs():
n_topics, X = _build_sparse_mtx()
# Test LDA batch training with multi CPU
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_method=method,
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
@if_safe_multiprocessing_with_blas
def test_lda_partial_fit_multi_jobs():
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_offset=5., total_samples=30,
random_state=rng)
for i in range(2):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_topics))
assert_raises_regexp(ValueError, r'Number of samples', lda.perplexity, X,
invalid_n_samples)
# invalid topic number
invalid_n_topics = rng.randint(4, size=(n_samples, n_topics + 1))
assert_raises_regexp(ValueError, r'Number of topics', lda.perplexity, X,
invalid_n_topics)
def test_lda_perplexity():
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
distr_1 = lda_1.fit_transform(X)
perp_1 = lda_1.perplexity(X, distr_1, sub_sampling=False)
distr_2 = lda_2.fit_transform(X)
perp_2 = lda_2.perplexity(X, distr_2, sub_sampling=False)
assert_greater_equal(perp_1, perp_2)
perp_1_subsampling = lda_1.perplexity(X, distr_1, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, distr_2, sub_sampling=True)
assert_greater_equal(perp_1_subsampling, perp_2_subsampling)
def test_lda_score():
# Test LDA score for batch training
# score should be higher after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert_greater_equal(score_2, score_1)
def test_perplexity_input_format():
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method='batch',
total_samples=100, random_state=0)
distr = lda.fit_transform(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X, distr)
perp_3 = lda.perplexity(X.toarray(), distr)
assert_almost_equal(perp_1, perp_2)
assert_almost_equal(perp_1, perp_3)
def test_lda_score_perplexity():
# Test the relationship between LDA score and perplexity
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
random_state=0)
distr = lda.fit_transform(X)
perplexity_1 = lda.perplexity(X, distr, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
def test_lda_empty_docs():
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_matrix(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(lda.components_.sum(axis=0),
np.ones(lda.components_.shape[1]))
def test_dirichlet_expectation():
"""Test Cython version of Dirichlet expectation calculation."""
x = np.logspace(-100, 10, 10000)
expectation = np.empty_like(x)
_dirichlet_expectation_1d(x, 0, expectation)
assert_allclose(expectation, np.exp(psi(x) - psi(np.sum(x))),
atol=1e-19)
x = x.reshape(100, 100)
assert_allclose(_dirichlet_expectation_2d(x),
psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
rtol=1e-11, atol=3e-9)
| bsd-3-clause |
xiaoxiamii/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 128 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
rbalda/neural_ocr | env/lib/python2.7/site-packages/numpy/fft/fftpack.py | 72 | 45497 | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
from numpy.core import (array, asarray, zeros, swapaxes, shape, conjugate,
take, sqrt)
from . import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache=_fft_cache):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified."
% n)
try:
# Thread-safety note: We rely on list.pop() here to atomically
# retrieve-and-remove a wsave from the cache. This ensures that no
# other thread can get the same wsave while we're using it.
wsave = fft_cache.setdefault(n, []).pop()
except (IndexError):
wsave = init_function(n)
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0, n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0, s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
# As soon as we put wsave back into the cache, another thread could pick it
# up and start using it, so we must not do this until after we're
# completely done using it ourselves.
fft_cache[n].append(wsave)
return r
def _unitary(norm):
if norm not in (None, "ortho"):
raise ValueError("Invalid norm value %s, should be None or \"ortho\"."
% norm)
return norm is not None
def fft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
a = asarray(a).astype(complex)
if n is None:
n = a.shape[axis]
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
if _unitary(norm):
output *= 1 / sqrt(n)
return output
def ifft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e., ``a[0]`` should contain the zero frequency term,
``a[1:n/2+1]`` should contain the positive-frequency terms, and
``a[n/2+1:]`` should contain the negative-frequency terms, in order of
decreasingly negative frequency. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at 0x...>
>>> plt.show()
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def rfft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermitian-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n//2 + 1``.
When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains
the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
If `n` is even, ``A[-1]`` contains the term representing both positive
and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf,
_real_fft_cache)
if _unitary(norm):
output *= 1 / sqrt(a.shape[axis])
return output
def irfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def hfft(a, n=None, axis=-1, norm=None):
"""
Compute the FFT of a signal which has Hermitian symmetry (real spectrum).
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([1, 2, 3, 4, 3, 2])
>>> np.fft.fft(signal)
array([ 15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j])
>>> np.fft.hfft(signal[:4]) # Input first half of signal
array([ 15., -4., 0., -1., 0., -4.])
>>> np.fft.hfft(signal, 6) # Input entire signal and truncate
array([ 15., -4., 0., -1., 0., -4.])
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
return irfft(conjugate(a), n, axis) * (sqrt(n) if unitary else n)
def ihfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse FFT of a signal which has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
>>> np.fft.ifft(spectrum)
array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j])
>>> np.fft.ihfft(spectrum)
array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = conjugate(rfft(a, n, axis))
return output * (1 / (sqrt(n) if unitary else n))
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = list(range(-len(s), 0))
if len(s) != len(axes):
raise ValueError("Shape and axes have different lengths.")
if invreal and shapeless:
s[-1] = (a.shape[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = list(range(len(axes)))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii], norm=norm)
return a
def fftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def fft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 50.0 +0.j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5+17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 +4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 -4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5-17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ]])
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def rfftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1], norm)
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii], norm)
return a
def rfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes, norm)
def irfftn(a, s=None, axes=None, norm=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input along the
axes specified by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii], norm)
a = irfft(a, s[-1], axes[-1], norm)
return a
def irfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes, norm)
| mit |
e-baumer/sampling | sampling/stratified_rand.py | 1 | 5349 | from __future__ import division
from collections import defaultdict
import numpy as np
from base_sample import BaseSample
from sklearn.cluster import AffinityPropagation as AP
import pandas as pd
from collections import Counter
class StratifiedRandom(BaseSample):
def __init__(self, data_frame, number_arms=2):
super(StratifiedRandom, self).__init__(data_frame, number_arms)
def create_stratum(self, column_names, **kwargs):
'''
Use affinity propagation to find number of strata for each column.
column_names is a list of the covariates to be split into strata and
used for classification. This funciton adds a column to the data frame
for each column as column_name_strata that gives the strata designation
for that variable. The whole data frame is returned.
'''
for colname in column_names:
X = self.data[colname].reshape(-1, 1)
if np.isnan(X).any():
raise ValueError("There are NaN values in self.data[%s] that the \
clustering algorithm can't handle" % colname)
elif np.unique(self.data[colname]).shape[0] <=2:
string_name = colname+'_strata'
self.data[string_name] = self.data[colname].astype(int)
else:
af_model = AP(damping = 0.9)
strata_groups = af_model.fit(X)
#cluster_centers_indices = af.cluster_centers_indices_
#n_clusters_ = len(cluster_centers_indices)
string_name = colname+'_strata'
self.data[string_name] = strata_groups.labels_
return self.data
#In the main function, you need to call create_stratum before create_unique_strata
def create_unique_strata(self, column_names):
'''
The input should be self.data that has had the strata for each column
name assigned and had a pre-seeded randomization, meaning each arm
has at least one randomly assigned participant.
'''
#Create a column to store concatenated strata strings for each data point
self.data['strata_string'] = np.ones(len(self.data))*np.nan
#Initialize variables to be filled in during the loop
strata_unique = {}
#Loop through data points and create their strata strings
for ind in self.data.index.values:
similar_val = ''
for colname in column_names:
string_name = colname+'_strata'
similar_val += str(self.data[string_name].loc[ind])
#Add the total strata string for that data point
self.data['strata_string'].set_value(ind,similar_val)
#If the strata string exists, continue. If not, assign it a new value
if similar_val in list(strata_unique.keys()):
strata_unique[similar_val].append(ind)
continue
else:
strata_unique[similar_val] = [ind]
return (strata_unique, self.data)
def count_arm_assignments(self, strata_unique, key):
'''
For each unique strata, count how many are assigned to each arm.
'''
#Initialize arm_tally that is the same length as the number of arms
arm_tally = np.zeros(self.n_arms)
#Loop through the values in the unique strata and count how many are in each arm
for value in strata_unique[key]:
#If it is not NaN, add one to the arm_tally for the data point's arm assignment
if np.isnan(self.data['arm_assignment'][value]) == False:
arm_tally[int(self.data['arm_assignment'][value]-1)] += 1;
return arm_tally
def assign_arms(self, column_names, percent_nan = 0.05):
'''
Loop through unique strata and assign each data point to an arm.
'''
#clear all values with NaNs
self.data = self.nan_finder(column_names, percent_nan)
#call create_stratum to create strata for each chosen covariate
self.data = self.create_stratum(column_names,preference=-50)
#combine the covariate strata into a unique strata identifier
(strata_unique, self.data) = self.create_unique_strata(column_names)
#initiate an empty column in the data frame for arm assignments
self.data['arm_assignment'] = np.ones(len(self.data))*np.nan
#Loop through the uniqie strata
for key in strata_unique.keys():
#Loop through the values in the unique stratum
for value in strata_unique[key]:
#update the arm_tally based on new assignments
arm_tally = self.count_arm_assignments(strata_unique, key);
ind_unique = np.where(arm_tally==np.min(arm_tally))[0]
self.data['arm_assignment'].set_value(
value, np.random.choice(list(ind_unique+1)
))
return self.data
#
| apache-2.0 |
CforED/Machine-Learning | sklearn/tests/test_cross_validation.py | 20 | 46586 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
# avoid StratifiedKFold's Warning about least populated class in y
y = np.arange(10) % 3
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
assert_true(np.any(np.arange(100) != ind[test]))
assert_true(np.any(np.arange(100, 200) != ind[test]))
assert_true(np.any(np.arange(200, 300) != ind[test]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_label_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_labels = 15
n_samples = 1000
n_folds = 5
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
labels = rng.randint(0, n_labels, n_samples)
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Construct the test data
labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis',
'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']
labels = np.asarray(labels, dtype=object)
n_labels = len(np.unique(labels))
n_samples = len(labels)
n_folds = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Should fail if there are more folds than labels
labels = np.array([1, 1, 1, 2, 2])
assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1. / 3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| bsd-3-clause |
alvarofierroclavero/scikit-learn | sklearn/cross_decomposition/cca_.py | 209 | 3150 | from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
| bsd-3-clause |
datalyze-solutions/pandas-qt | pandasqt/views/CSVDialogs.py | 1 | 23796 | # -*- coding: utf-8 -*-
import os
from encodings.aliases import aliases as _encodings
import pandas
from pandasqt.compat import Qt, QtCore, QtGui, Slot, Signal
from pandasqt.encoding import Detector
from pandasqt.models.DataFrameModel import DataFrameModel
from pandasqt.views.CustomDelegates import DtypeComboDelegate
from pandasqt.views._ui import icons_rc
from pandasqt.utils import fillNoneValues, convertTimestamps
class DelimiterValidator(QtGui.QRegExpValidator):
"""A Custom RegEx Validator.
The validator checks, if the input has a length of 1.
The input may contain any non-whitespace-character
as denoted by the RegEx term `\S`.
"""
def __init__(self, parent=None):
"""Constructs the object with the given parent.
Args:
parent (QObject, optional): Causes the objected to be owned
by `parent` instead of Qt. Defaults to `None`.
"""
super(DelimiterValidator, self).__init__(parent)
re = QtCore.QRegExp('\S{1}')
self.setRegExp(re)
class DelimiterSelectionWidget(QtGui.QGroupBox):
"""A custom widget with different text delimiter signs.
A user can choose between 3 predefined and one user defined
text delimiter characters. Default delimiters include `semicolon`,
`colon` and `tabulator`. The user defined delimiter may only have
a length of 1 and may not include any whitespace character.
Attributes:
delimiter (QtCore.pyqtSignal): This signal is emitted, whenever a
delimiter character is selected by the user.
semicolonRadioButton (QtGui.QRadioButton): A radio button to
select the `semicolon` character as delimiter.
commaRadioButton (QtGui.QRadioButton): A radio button to select
the `comma` character as delimiter.
tabRadioButton (QtGui.QRadioButton): A radio button to select
the `tabulator` character as delimiter.
otherRadioButton (QtGui.QRadioButton): A radio button to select
the given input text as delimiter.
otherSeparatorLineEdit (QtGui.QLineEdit): An input line to let the
user enter one character only, which may be used as delimiter.
"""
delimiter = Signal('QString')
def __init__(self, parent=None):
"""Constructs the object with the given parent.
Args:
parent (QObject, optional): Causes the objected to be owned
by `parent` instead of Qt. Defaults to `None`.
"""
super(DelimiterSelectionWidget, self).__init__(parent)
self.semicolonRadioButton = None
self.commaRadioButton = None
self.tabRadioButton = None
self.otherRadioButton = None
self.otherSeparatorLineEdit = None
self._initUI()
def _initUI(self):
"""Creates the inital layout with all subwidgets.
The layout is a `QHBoxLayout`. Each time a radio button is
selected or unselected, a slot
`DelimiterSelectionWidget._delimiter` is called.
Furthermore the `QLineEdit` widget has a custom regex validator
`DelimiterValidator` enabled.
"""
#layout = QtGui.QHBoxLayout(self)
self.semicolonRadioButton = QtGui.QRadioButton(u'Semicolon')
self.commaRadioButton = QtGui.QRadioButton(u'Comma')
self.tabRadioButton = QtGui.QRadioButton(u'Tab')
self.otherRadioButton = QtGui.QRadioButton(u'Other')
self.semicolonRadioButton.setChecked(True)
self.otherSeparatorLineEdit = QtGui.QLineEdit(self)
self.otherSeparatorLineEdit.setEnabled(False)
self.semicolonRadioButton.toggled.connect(self._delimiter)
self.commaRadioButton.toggled.connect(self._delimiter)
self.tabRadioButton.toggled.connect(self._delimiter)
self.otherRadioButton.toggled.connect(self._enableLine)
self.otherSeparatorLineEdit.textChanged.connect(lambda: self._delimiter(True))
self.otherSeparatorLineEdit.setValidator(DelimiterValidator(self))
currentLayout = self.layout()
# unset and delete the current layout in order to set a new one
if currentLayout is not None:
del currentLayout
layout = QtGui.QHBoxLayout()
layout.addWidget(self.semicolonRadioButton)
layout.addWidget(self.commaRadioButton)
layout.addWidget(self.tabRadioButton)
layout.addWidget(self.otherRadioButton)
layout.addWidget(self.otherSeparatorLineEdit)
self.setLayout(layout)
@Slot('QBool')
def _enableLine(self, toggled):
self.otherSeparatorLineEdit.setEnabled(toggled)
def currentSelected(self):
"""Returns the currently selected delimiter character.
Returns:
str: One of `,`, `;`, `\t`, `*other*`.
"""
if self.commaRadioButton.isChecked():
return ','
elif self.semicolonRadioButton.isChecked():
return ';'
elif self.tabRadioButton.isChecked():
return '\t'
elif self.otherRadioButton.isChecked():
return self.otherSeparatorLineEdit.text()
return
@Slot('QBool')
def _delimiter(self, checked):
if checked:
if self.commaRadioButton.isChecked():
self.delimiter.emit(',')
elif self.semicolonRadioButton.isChecked():
self.delimiter.emit(';')
elif self.tabRadioButton.isChecked():
self.delimiter.emit('\t')
elif self.otherRadioButton.isChecked():
ret = self.otherSeparatorLineEdit.text()
if len(ret) > 0:
self.delimiter.emit(ret)
def reset(self):
"""Resets this widget to its initial state.
"""
self.semicolonRadioButton.setChecked(True)
self.otherSeparatorLineEdit.setText('')
class CSVImportDialog(QtGui.QDialog):
"""A dialog to import any csv file into a pandas data frame.
This modal dialog enables the user to enter any path to a csv
file and parse this file with or without a header and with special
delimiter characters.
On a successful load, the data can be previewed and the column data
types may be edited by the user.
After all configuration is done, the dataframe and the underlying model
may be used by the main application.
Attributes:
load (QtCore.pyqtSignal): This signal is emitted, whenever the
dialog is successfully closed, e.g. when the ok button is
pressed. Returns DataFrameModel and path of chosen csv file.
"""
load = Signal('QAbstractItemModel', str)
def __init__(self, parent=None):
"""Constructs the object with the given parent.
Args:
parent (QObject, optional): Causes the objected to be owned
by `parent` instead of Qt. Defaults to `None`.
"""
super(CSVImportDialog, self).__init__(parent)
self._modal = True
self._windowTitle = u'Import CSV'
self._encodingKey = None
self._filename = None
self._delimiter = None
self._header = None
self._detector = Detector()
self._initUI()
def _initUI(self):
"""Initiates the user interface with a grid layout and several widgets.
"""
self.setModal(self._modal)
self.setWindowTitle(self._windowTitle)
layout = QtGui.QGridLayout()
self._filenameLabel = QtGui.QLabel(u'Choose File', self)
self._filenameLineEdit = QtGui.QLineEdit(self)
self._filenameLineEdit.textEdited.connect(self._updateFilename)
chooseFileButtonIcon = QtGui.QIcon(QtGui.QPixmap(':/icons/document-open.png'))
self._chooseFileAction = QtGui.QAction(self)
self._chooseFileAction.setIcon(chooseFileButtonIcon)
self._chooseFileAction.triggered.connect(self._openFile)
self._chooseFileButton = QtGui.QToolButton(self)
self._chooseFileButton.setDefaultAction(self._chooseFileAction)
layout.addWidget(self._filenameLabel, 0, 0)
layout.addWidget(self._filenameLineEdit, 0, 1, 1, 2)
layout.addWidget(self._chooseFileButton, 0, 3)
self._encodingLabel = QtGui.QLabel(u'File Encoding', self)
encoding_names = map(lambda x: x.upper(), sorted(list(set(_encodings.viewvalues()))))
self._encodingComboBox = QtGui.QComboBox(self)
self._encodingComboBox.addItems(encoding_names)
self._encodingComboBox.activated.connect(self._updateEncoding)
layout.addWidget(self._encodingLabel, 1, 0)
layout.addWidget(self._encodingComboBox, 1, 1, 1, 1)
self._hasHeaderLabel = QtGui.QLabel(u'Header Available?', self)
self._headerCheckBox = QtGui.QCheckBox(self)
self._headerCheckBox.toggled.connect(self._updateHeader)
layout.addWidget(self._hasHeaderLabel, 2, 0)
layout.addWidget(self._headerCheckBox, 2, 1)
self._delimiterLabel = QtGui.QLabel(u'Column Delimiter', self)
self._delimiterBox = DelimiterSelectionWidget(self)
self._delimiter = self._delimiterBox.currentSelected()
self._delimiterBox.delimiter.connect(self._updateDelimiter)
layout.addWidget(self._delimiterLabel, 3, 0)
layout.addWidget(self._delimiterBox, 3, 1, 1, 3)
self._tabWidget = QtGui.QTabWidget(self)
self._previewTableView = QtGui.QTableView(self)
self._datatypeTableView = QtGui.QTableView(self)
self._tabWidget.addTab(self._previewTableView, u'Preview')
self._tabWidget.addTab(self._datatypeTableView, u'Change Column Types')
layout.addWidget(self._tabWidget, 4, 0, 3, 4)
self._datatypeTableView.horizontalHeader().setDefaultSectionSize(200)
self._datatypeTableView.setItemDelegateForColumn(1, DtypeComboDelegate(self._datatypeTableView))
self._loadButton = QtGui.QPushButton(u'Load Data', self)
#self.loadButton.setAutoDefault(False)
self._cancelButton = QtGui.QPushButton(u'Cancel', self)
# self.cancelButton.setDefault(False)
# self.cancelButton.setAutoDefault(True)
self._buttonBox = QtGui.QDialogButtonBox(self)
self._buttonBox.addButton(self._loadButton, QtGui.QDialogButtonBox.AcceptRole)
self._buttonBox.addButton(self._cancelButton, QtGui.QDialogButtonBox.RejectRole)
self._buttonBox.accepted.connect(self.accepted)
self._buttonBox.rejected.connect(self.rejected)
layout.addWidget(self._buttonBox, 9, 2, 1, 2)
self._loadButton.setDefault(False)
self._filenameLineEdit.setFocus()
self._statusBar = QtGui.QStatusBar(self)
self._statusBar.setSizeGripEnabled(False)
layout.addWidget(self._statusBar, 8, 0, 1, 4)
self.setLayout(layout)
@Slot('QString')
def updateStatusBar(self, message):
"""Updates the status bar widget of this dialog with the given message.
This method is also a `SLOT()`.
The message will be shown for only 5 seconds.
Args:
message (QString): The new message which will be displayed.
"""
self._statusBar.showMessage(message, 5000)
@Slot()
def _openFile(self):
"""Opens a file dialog and sets a value for the QLineEdit widget.
This method is also a `SLOT`.
"""
ret = QtGui.QFileDialog.getOpenFileName(self, self.tr(u'open file'), filter='Comma Separated Values (*.csv)')
if ret:
self._filenameLineEdit.setText(ret)
self._updateFilename()
@Slot('QBool')
def _updateHeader(self, toggled):
"""Changes the internal flag, whether the csv file contains a header or not.
This method is also a `SLOT`.
In addition, after toggling the corresponding checkbox, the
`_previewFile` method will be called.
Args:
toggled (boolean): A flag indicating the status of the checkbox.
The flag will be used to update an internal variable.
"""
self._header = 0 if toggled else None
self._previewFile()
@Slot()
def _updateFilename(self):
"""Calls several methods after the filename changed.
This method is also a `SLOT`.
It checks the encoding of the changed filename and generates a
preview of the data.
"""
self._filename = self._filenameLineEdit.text()
self._guessEncoding(self._filename)
self._previewFile()
def _guessEncoding(self, path):
"""Opens a file from the given `path` and checks the file encoding.
The file must exists on the file system and end with the extension
`.csv`. The file is read line by line until the encoding could be
guessed.
On a successfull identification, the widgets of this dialog will be
updated.
Args:
path (string): Path to a csv file on the file system.
"""
if os.path.exists(path) and path.lower().endswith('csv'):
encoding = self._detector.detect(path)
if encoding is not None:
if encoding.startswith('utf'):
encoding = encoding.replace('-', '')
encoding = encoding.replace('-','_')
viewValue = _encodings.get(encoding)
self._encodingKey = encoding
index = self._encodingComboBox.findText(viewValue.upper())
self._encodingComboBox.setCurrentIndex(index)
@Slot('int')
def _updateEncoding(self, index):
"""Changes the value of the encoding combo box to the value of given index.
This method is also a `SLOT`.
After the encoding is changed, the file will be reloaded and previewed.
Args:
index (int): An valid index of the combo box.
"""
encoding = self._encodingComboBox.itemText(index)
encoding = encoding.lower()
self._encodingKey = _calculateEncodingKey(encoding)
self._previewFile()
@Slot('QString')
def _updateDelimiter(self, delimiter):
"""Changes the value of the delimiter for the csv file.
This method is also a `SLOT`.
Args:
delimiter (string): The new delimiter.
"""
self._delimiter = delimiter
self._previewFile()
def _previewFile(self):
"""Updates the preview widgets with new models for both tab panes.
"""
dataFrame = self._loadCSVDataFrame()
dataFrameModel = DataFrameModel(dataFrame)
dataFrameModel.enableEditing(True)
self._previewTableView.setModel(dataFrameModel)
columnModel = dataFrameModel.columnDtypeModel()
columnModel.changeFailed.connect(self.updateStatusBar)
self._datatypeTableView.setModel(columnModel)
def _loadCSVDataFrame(self):
"""Loads the given csv file with pandas and generate a new dataframe.
The file will be loaded with the configured encoding, delimiter
and header.git
If any execptions will occur, an empty Dataframe is generated
and a message will appear in the status bar.
Returns:
pandas.DataFrame: A dataframe containing all the available
information of the csv file.
"""
if self._filename and os.path.exists(self._filename) and self._filename.endswith('.csv'):
# default fallback if no encoding was found/selected
encoding = self._encodingKey or 'uft8'
try:
dataFrame = pandas.read_csv(self._filename,
sep=self._delimiter, encoding=encoding,
header=self._header)
dataFrame = dataFrame.apply(fillNoneValues)
dataFrame = dataFrame.apply(convertTimestamps)
except Exception, err:
self.updateStatusBar(str(err))
return pandas.DataFrame()
self.updateStatusBar('Preview generated.')
return dataFrame
self.updateStatusBar('File does not exists or does not end with .csv')
return pandas.DataFrame()
def _resetWidgets(self):
"""Resets all widgets of this dialog to its inital state.
"""
self._filenameLineEdit.setText('')
self._encodingComboBox.setCurrentIndex(0)
self._delimiterBox.reset()
self._headerCheckBox.setChecked(False)
self._statusBar.showMessage('')
self._previewTableView.setModel(None)
self._datatypeTableView.setModel(None)
@Slot()
def accepted(self):
"""Successfully close the widget and return the loaded model.
This method is also a `SLOT`.
The dialog will be closed, when the `ok` button is pressed. If
a `DataFrame` was loaded, it will be emitted by the signal `load`.
"""
model = self._previewTableView.model()
if model is not None:
df = model.dataFrame().copy()
dfModel = DataFrameModel(df)
self.load.emit(dfModel, self._filename)
self._resetWidgets()
self.accept()
@Slot()
def rejected(self):
"""Close the widget and reset its inital state.
This method is also a `SLOT`.
The dialog will be closed and all changes reverted, when the
`cancel` button is pressed.
"""
self._resetWidgets()
self.reject()
class CSVExportDialog(QtGui.QDialog):
"""An widget to serialize a `DataFrameModel` to a `CSV-File`.
"""
exported = Signal('QBool')
def __init__(self, model=None, parent=None):
super(CSVExportDialog, self).__init__(parent)
self._model = model
self._modal = True
self._windowTitle = u'Export to CSV'
self._idx = -1
self._initUI()
def _initUI(self):
"""Initiates the user interface with a grid layout and several widgets.
"""
self.setModal(self._modal)
self.setWindowTitle(self._windowTitle)
layout = QtGui.QGridLayout()
self._filenameLabel = QtGui.QLabel(u'Output File', self)
self._filenameLineEdit = QtGui.QLineEdit(self)
chooseFileButtonIcon = QtGui.QIcon(QtGui.QPixmap(':/icons/document-save-as.png'))
self._chooseFileAction = QtGui.QAction(self)
self._chooseFileAction.setIcon(chooseFileButtonIcon)
self._chooseFileAction.triggered.connect(self._createFile)
self._chooseFileButton = QtGui.QToolButton(self)
self._chooseFileButton.setDefaultAction(self._chooseFileAction)
layout.addWidget(self._filenameLabel, 0, 0)
layout.addWidget(self._filenameLineEdit, 0, 1, 1, 2)
layout.addWidget(self._chooseFileButton, 0, 3)
self._encodingLabel = QtGui.QLabel(u'File Encoding', self)
encoding_names = map(lambda x: x.upper(), sorted(list(set(_encodings.viewvalues()))))
self._encodingComboBox = QtGui.QComboBox(self)
self._encodingComboBox.addItems(encoding_names)
self._idx = encoding_names.index('UTF_8')
self._encodingComboBox.setCurrentIndex(self._idx)
#self._encodingComboBox.activated.connect(self._updateEncoding)
layout.addWidget(self._encodingLabel, 1, 0)
layout.addWidget(self._encodingComboBox, 1, 1, 1, 1)
self._hasHeaderLabel = QtGui.QLabel(u'Header Available?', self)
self._headerCheckBox = QtGui.QCheckBox(self)
#self._headerCheckBox.toggled.connect(self._updateHeader)
layout.addWidget(self._hasHeaderLabel, 2, 0)
layout.addWidget(self._headerCheckBox, 2, 1)
self._delimiterLabel = QtGui.QLabel(u'Column Delimiter', self)
self._delimiterBox = DelimiterSelectionWidget(self)
layout.addWidget(self._delimiterLabel, 3, 0)
layout.addWidget(self._delimiterBox, 3, 1, 1, 3)
self._exportButton = QtGui.QPushButton(u'Export Data', self)
self._cancelButton = QtGui.QPushButton(u'Cancel', self)
self._buttonBox = QtGui.QDialogButtonBox(self)
self._buttonBox.addButton(self._exportButton, QtGui.QDialogButtonBox.AcceptRole)
self._buttonBox.addButton(self._cancelButton, QtGui.QDialogButtonBox.RejectRole)
self._buttonBox.accepted.connect(self.accepted)
self._buttonBox.rejected.connect(self.rejected)
layout.addWidget(self._buttonBox, 5, 2, 1, 2)
self._exportButton.setDefault(False)
self._filenameLineEdit.setFocus()
self._statusBar = QtGui.QStatusBar(self)
self._statusBar.setSizeGripEnabled(False)
layout.addWidget(self._statusBar, 4, 0, 1, 4)
self.setLayout(layout)
def setExportModel(self, model):
if not isinstance(model, DataFrameModel):
return False
self._model = model
return True
@Slot()
def _createFile(self):
ret = QtGui.QFileDialog.getSaveFileName(self, 'Save File', filter='Comma Separated Value (*.csv)')
self._filenameLineEdit.setText(ret)
def _saveModel(self):
delimiter = self._delimiterBox.currentSelected()
header = self._headerCheckBox.isChecked() # column labels
filename = self._filenameLineEdit.text()
index = False # row labels
encodingIndex = self._encodingComboBox.currentIndex()
encoding = self._encodingComboBox.itemText(encodingIndex)
encoding = _calculateEncodingKey(encoding.lower())
try:
dataFrame = self._model.dataFrame()
except AttributeError, err:
raise AttributeError('No data loaded to export.')
else:
try:
dataFrame.to_csv(filename, encoding=encoding, header=header, index=index, sep=delimiter)
except IOError, err:
raise IOError('No filename given')
except UnicodeError, err:
raise UnicodeError('Could not encode all data. Choose a different encoding')
except Exception:
raise
def _resetWidgets(self):
"""Resets all widgets of this dialog to its inital state.
"""
self._filenameLineEdit.setText('')
self._encodingComboBox.setCurrentIndex(self._idx)
self._delimiterBox.reset()
self._headerCheckBox.setChecked(False)
self._statusBar.showMessage('')
@Slot()
def accepted(self):
"""Successfully close the widget and emit an export signal.
This method is also a `SLOT`.
The dialog will be closed, when the `Export Data` button is
pressed. If errors occur during the export, the status bar
will show the error message and the dialog will not be closed.
"""
try:
self._saveModel()
except Exception, err:
self._statusBar.showMessage(str(err))
else:
self._resetWidgets()
self.exported.emit(True)
self.accept()
@Slot()
def rejected(self):
"""Close the widget and reset its inital state.
This method is also a `SLOT`.
The dialog will be closed and all changes reverted, when the
`cancel` button is pressed.
"""
self._resetWidgets()
self.exported.emit(False)
self.reject()
def _calculateEncodingKey(comparator):
"""Gets the first key of all available encodings where the corresponding
value matches the comparator.
Args:
comparator (string): A view name for an encoding.
Returns:
str: A key for a specific encoding used by python.
"""
encodingName = None
for k, v in _encodings.viewitems():
if v == comparator:
encodingName = k
break
return encodingName | mit |
Diviyan-Kalainathan/causal-humans | Clustering/performance_evaluation.py | 1 | 3279 | '''
Computing the misclassification error distance between to 2 k-means clustering
according to Marina Meila, "The Uniqueness of a Good Optimum for K-Means", ICML 2006
Author : Diviyan Kalainathan
Date : 20/06/2016
'''
import csv,numpy,itertools
from sklearn import metrics
def Clustering_performance_evaluation(mode, folder_name, run1, run2, num_clusters, num_init):
"""
:param mode: selects which metric is to be used
:param folder_name: Folder of the runs (String)
:param run1: Number of the run 1 (int)
:param run2: Number of the run 2 (int)
:param num_clusters:
:param num_init:
:return: distance value (float?)
"""
numpy.set_printoptions(threshold='nan')
print('-'+str(num_clusters)+'---performance evaluation between runs : ' + str(run1) + ' ,' + str(run2))
valid_data= True
#Checking if the data is valid by loading & testing the shape of it
try:
data_1=numpy.loadtxt('output/'+folder_name+'/cluster_predictions_c'+ str(num_clusters)
+ '_n'+ str(num_init) +'_r'+ str(run1)+'.csv',delimiter=';')
data_2=numpy.loadtxt('output/'+folder_name+'/cluster_predictions_c'+ str(num_clusters)
+ '_n'+ str(num_init) +'_r'+ str(run2)+'.csv',delimiter=';')
if data_1.shape != data_2.shape:
valid_data=False
except IOError as e:
print "I/O error({0}): {1}".format(e.errno, e.strerror)
valid_data=False
if valid_data:
n_samples=data_1.shape[0]
data_1 = numpy.asarray(sorted(data_1, key=lambda x: x[1]))
data_2 = numpy.asarray(sorted(data_2, key=lambda x: x[1]))
if mode==1:
#Distance defined by Marina Meila : k! complexity
clustering_1=numpy.zeros((n_samples,num_clusters))
clustering_2=numpy.zeros((n_samples,num_clusters))
for x in range(0,n_samples):
clustering_1[x,data_1[x,0]]+=1
clustering_2[x,data_2[x,0]]+=1
'''for y in range(0,num_clusters):
try:
clustering_1[:,y]*=1/numpy.sqrt(numpy.sum(clustering_1[:,y]))
except ZeroDivisionError:
clustering_1[:,y]=0
try:
clustering_2[:,y]*=1/numpy.sqrt(numpy.sum(clustering_2[:,y]))
except ZeroDivisionError:
clustering_2[:,y]=0
''' # No normalisation needed
confusion_matrix=numpy.dot(numpy.transpose(clustering_1),clustering_2)
max_confusion=0
result = []
for perm in itertools.permutations(range(num_clusters)):
confusion=0
for i in range(0, num_clusters):
confusion += confusion_matrix[i, perm[i]]
if max_confusion<confusion:
max_confusion=confusion
distance=(max_confusion/n_samples)
return distance
elif mode==2:
#Ajusted rand index
distance=metrics.adjusted_rand_score(data_1[:,0],data_2[:,0])
return distance
elif mode==3:
#V-mesure
distance=metrics.v_measure_score(data_1[:,0],data_2[:,0])
return distance
return 0 | mit |
sibis-platform/ncanda-datacore | scripts/reporting/xnat_scans_filter.py | 4 | 5552 | #!/usr/bin/env python
##
## See COPYING file distributed along with the ncanda-data-integration package
## for the copyright and license terms
##
"""
xnat_scans_filter.py
======================
This script filters the csv file generated using xnat_extractor.py. This filters
is based on records from XNAT where there is one row per scan.
xnat_scans_filter.py -i path/to/xnat.csv
"""
import os
import sys
import redcap
import pandas as pd
# Fields to retrieve from REDCap
fields = ['study_id', 'redcap_event_name', 'exclude', 'visit_ignore',
'visit_date', 'mri_missing', 'mri_xnat_sid', 'mri_xnat_eids',
'visit_notes']
# Forms that the fields come from in REDCap.
forms = ['mr_session_report', 'visit_date', 'demographics']
def get_project_entry(args=None):
"""
Pulls the data from REDCap
"""
# Get API key.
summary_key_file = open(os.path.join(os.path.expanduser("~"),
'.server_config',
'redcap-dataentry-token'), 'r')
summary_api_key = summary_key_file.read().strip()
# Connect to API.
project_entry = redcap.Project('https://ncanda.sri.com/redcap/api/',
summary_api_key,
verify_ssl=False)
return project_entry
def data_entry_fields(fields, project, arm):
"""
Gets the dataframe containing a specific arm from REDCap
"""
# Get a dataframe of fields
data_entry_raw = project.export_records(fields=fields,
forms=forms,
format='df',
events=[arm])
return data_entry_raw
def append_site_id_row(xnat_df, scans_df):
scans_df['site_id'] = ''
ids = xnat_df[['site_id', 'subject_id']]
map = {r.subject_id: r.site_id for idx, r in ids.iterrows()}
for idx, row in scans_df.iterrows():
scans_df.site_id.loc[idx] = map.get(row.case)
return scans_df
def is_in_redcap(rc_df, scans_df):
"""
Checks if the scans missing in the pipeline are listed in REDCap and adds
a column indicating as such.
"""
scans_df['in_redcap'] = False
scans_df['visit_ignore___yes'] = ''
scans_df['visit_ignore_why'] = ''
scans_df['visit_ignore_why_other'] = ''
scans_df['visit_notes'] = ''
scans_df['mri_missing'] = ''
scans_df['mri_missing_why'] = ''
scans_df['mri_missing_why_other'] = ''
scans_df['mri_notes'] = ''
rc_cases = rc_df[rc_df.mri_xnat_sid.isin(scans_df.case)]
for idx, row in rc_cases.iterrows():
scan_cases = scans_df[scans_df.case == row.mri_xnat_sid]
scans_df.in_redcap.loc[scan_cases.index] = True
# Visit info
scans_df.visit_ignore___yes.loc[scan_cases.index] = row.visit_ignore___yes
scans_df.visit_ignore_why.loc[scan_cases.index] = row.visit_ignore_why
scans_df.visit_ignore_why_other.loc[scan_cases.index] = row.visit_ignore_why_other
scans_df.visit_notes.loc[scan_cases.index] = row.visit_notes
# Scan info
scans_df.mri_missing.loc[scan_cases.index] = row.mri_missing
scans_df.mri_missing_why.loc[scan_cases.index] = row.mri_missing_why
scans_df.mri_missing_why_other.loc[scan_cases.index] = row.mri_missing_why_other
scans_df.mri_notes.loc[scan_cases.index] = row.mri_notes
return scans_df
def is_in_xnat(xnat_df, scans_df):
"""
Checks XNAT for scans near the visit date recorded in REDCap
"""
def main(args=None):
# Connect to REDCap
project_entry = get_project_entry()
# Get the visit dataframe
project_df = data_entry_fields(fields, project_entry, args.event)
# Get a list of all EIDs for the given visit
xnat_eids = project_df['mri_xnat_eids'].tolist()
# Read the csv file from xnat_extractor
xnat_csv = pd.read_csv(args.infile)
# Filter the XNAT records by the EIDs in REDCap
# This provides a list of all the scans in XNAT that are also in REDCap
filter_csv = xnat_csv[xnat_csv['experiment_id'].isin(xnat_eids)]
# Iterate through scans missing in the pipeline and check:
# - they are present in the filtered REDCap list
# present in the REDCap filter list.
if args.missing_scans:
list_missing_scans = pd.read_csv(args.missing_scans)
missing_scans_df = append_site_id_row(xnat_csv, list_missing_scans)
# Add columns indicating if there is data for this visit in redcap
in_redcap = is_in_redcap(project_df, missing_scans_df)
filter_csv = in_redcap
# Write the results to disk
filter_csv.to_csv(args.outfile)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--infile',
required=True,
help="Input csv file from xnat_extractor.py")
parser.add_argument('-e', '--event',
choices=['baseline_visit_arm_1', '1y_visit_arm_1'],
default='1y_visit_arm_1')
parser.add_argument('-m', '--missing-scans',
help="Output of list_missing_scans script.")
parser.add_argument('-o', '--outfile',
default=os.path.join('/tmp', 'xnat_scans_filter.csv'))
parser.add_argument('-v', '--verbose',
action='store_true',
help='Enable verbose reporting.')
argv = parser.parse_args()
sys.exit(main(args=argv))
| bsd-3-clause |
rohit21122012/DCASE2013 | runs/2016/baseline128/src/evaluation.py | 38 | 42838 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import numpy
import math
from sklearn import metrics
class DCASE2016_SceneClassification_Metrics():
"""DCASE 2016 scene classification metrics
Examples
--------
>>> dcase2016_scene_metric = DCASE2016_SceneClassification_Metrics(class_list=dataset.scene_labels)
>>> for fold in dataset.folds(mode=dataset_evaluation_mode):
>>> results = []
>>> result_filename = get_result_filename(fold=fold, path=result_path)
>>>
>>> if os.path.isfile(result_filename):
>>> with open(result_filename, 'rt') as f:
>>> for row in csv.reader(f, delimiter='\t'):
>>> results.append(row)
>>>
>>> y_true = []
>>> y_pred = []
>>> for result in results:
>>> y_true.append(dataset.file_meta(result[0])[0]['scene_label'])
>>> y_pred.append(result[1])
>>>
>>> dcase2016_scene_metric.evaluate(system_output=y_pred, annotated_ground_truth=y_true)
>>>
>>> results = dcase2016_scene_metric.results()
"""
def __init__(self, class_list):
"""__init__ method.
Parameters
----------
class_list : list
Evaluated scene labels in the list
"""
self.accuracies_per_class = None
self.Nsys = None
self.Nref = None
self.class_list = class_list
self.eps = numpy.spacing(1)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
return self.results()
def accuracies(self, y_true, y_pred, labels):
"""Calculate accuracy
Parameters
----------
y_true : numpy.array
Ground truth array, list of scene labels
y_pred : numpy.array
System output array, list of scene labels
labels : list
list of scene labels
Returns
-------
array : numpy.array [shape=(number of scene labels,)]
Accuracy per scene label class
"""
confusion_matrix = metrics.confusion_matrix(y_true=y_true, y_pred=y_pred, labels=labels).astype(float)
#print confusion_matrix
temp = numpy.divide(numpy.diag(confusion_matrix), numpy.sum(confusion_matrix, 1)+self.eps)
#print temp
return temp
def evaluate(self, annotated_ground_truth, system_output):
"""Evaluate system output and annotated ground truth pair.
Use results method to get results.
Parameters
----------
annotated_ground_truth : numpy.array
Ground truth array, list of scene labels
system_output : numpy.array
System output array, list of scene labels
Returns
-------
nothing
"""
accuracies_per_class = self.accuracies(y_pred=system_output, y_true=annotated_ground_truth, labels=self.class_list)
if self.accuracies_per_class is None:
self.accuracies_per_class = accuracies_per_class
else:
self.accuracies_per_class = numpy.vstack((self.accuracies_per_class, accuracies_per_class))
Nref = numpy.zeros(len(self.class_list))
Nsys = numpy.zeros(len(self.class_list))
for class_id, class_label in enumerate(self.class_list):
for item in system_output:
if item == class_label:
Nsys[class_id] += 1
for item in annotated_ground_truth:
if item == class_label:
Nref[class_id] += 1
if self.Nref is None:
self.Nref = Nref
else:
self.Nref = numpy.vstack((self.Nref, Nref))
if self.Nsys is None:
self.Nsys = Nsys
else:
self.Nsys = numpy.vstack((self.Nsys, Nsys))
def results(self):
"""Get results
Outputs results in dict, format:
{
'class_wise_data':
{
'office': {
'Nsys': 10,
'Nref': 7,
},
}
'class_wise_accuracy':
{
'office': 0.6,
'home': 0.4,
}
'overall_accuracy': numpy.mean(self.accuracies_per_class)
'Nsys': 100,
'Nref': 100,
}
Parameters
----------
nothing
Returns
-------
results : dict
Results dict
"""
results = {
'class_wise_data': {},
'class_wise_accuracy': {},
'overall_accuracy': numpy.mean(self.accuracies_per_class)
}
if len(self.Nsys.shape) == 2:
results['Nsys'] = int(sum(sum(self.Nsys)))
results['Nref'] = int(sum(sum(self.Nref)))
else:
results['Nsys'] = int(sum(self.Nsys))
results['Nref'] = int(sum(self.Nref))
for class_id, class_label in enumerate(self.class_list):
if len(self.accuracies_per_class.shape) == 2:
results['class_wise_accuracy'][class_label] = numpy.mean(self.accuracies_per_class[:, class_id])
results['class_wise_data'][class_label] = {
'Nsys': int(sum(self.Nsys[:, class_id])),
'Nref': int(sum(self.Nref[:, class_id])),
}
else:
results['class_wise_accuracy'][class_label] = numpy.mean(self.accuracies_per_class[class_id])
results['class_wise_data'][class_label] = {
'Nsys': int(self.Nsys[class_id]),
'Nref': int(self.Nref[class_id]),
}
return results
class EventDetectionMetrics(object):
"""Baseclass for sound event metric classes.
"""
def __init__(self, class_list):
"""__init__ method.
Parameters
----------
class_list : list
List of class labels to be evaluated.
"""
self.class_list = class_list
self.eps = numpy.spacing(1)
def max_event_offset(self, data):
"""Get maximum event offset from event list
Parameters
----------
data : list
Event list, list of event dicts
Returns
-------
max : float > 0
Maximum event offset
"""
max = 0
for event in data:
if event['event_offset'] > max:
max = event['event_offset']
return max
def list_to_roll(self, data, time_resolution=0.01):
"""Convert event list into event roll.
Event roll is binary matrix indicating event activity withing time segment defined by time_resolution.
Parameters
----------
data : list
Event list, list of event dicts
time_resolution : float > 0
Time resolution used when converting event into event roll.
Returns
-------
event_roll : numpy.ndarray [shape=(math.ceil(data_length * 1 / time_resolution) + 1, amount of classes)]
Event roll
"""
# Initialize
data_length = self.max_event_offset(data)
event_roll = numpy.zeros((math.ceil(data_length * 1 / time_resolution) + 1, len(self.class_list)))
# Fill-in event_roll
for event in data:
pos = self.class_list.index(event['event_label'].rstrip())
onset = math.floor(event['event_onset'] * 1 / time_resolution)
offset = math.ceil(event['event_offset'] * 1 / time_resolution) + 1
event_roll[onset:offset, pos] = 1
return event_roll
class DCASE2016_EventDetection_SegmentBasedMetrics(EventDetectionMetrics):
"""DCASE2016 Segment based metrics for sound event detection
Supported metrics:
- Overall
- Error rate (ER), Substitutions (S), Insertions (I), Deletions (D)
- F-score (F1)
- Class-wise
- Error rate (ER), Insertions (I), Deletions (D)
- F-score (F1)
Examples
--------
>>> overall_metrics_per_scene = {}
>>> for scene_id, scene_label in enumerate(dataset.scene_labels):
>>> dcase2016_segment_based_metric = DCASE2016_EventDetection_SegmentBasedMetrics(class_list=dataset.event_labels(scene_label=scene_label))
>>> for fold in dataset.folds(mode=dataset_evaluation_mode):
>>> results = []
>>> result_filename = get_result_filename(fold=fold, scene_label=scene_label, path=result_path)
>>>
>>> if os.path.isfile(result_filename):
>>> with open(result_filename, 'rt') as f:
>>> for row in csv.reader(f, delimiter='\t'):
>>> results.append(row)
>>>
>>> for file_id, item in enumerate(dataset.test(fold,scene_label=scene_label)):
>>> current_file_results = []
>>> for result_line in results:
>>> if result_line[0] == dataset.absolute_to_relative(item['file']):
>>> current_file_results.append(
>>> {'file': result_line[0],
>>> 'event_onset': float(result_line[1]),
>>> 'event_offset': float(result_line[2]),
>>> 'event_label': result_line[3]
>>> }
>>> )
>>> meta = dataset.file_meta(dataset.absolute_to_relative(item['file']))
>>> dcase2016_segment_based_metric.evaluate(system_output=current_file_results, annotated_ground_truth=meta)
>>> overall_metrics_per_scene[scene_label]['segment_based_metrics'] = dcase2016_segment_based_metric.results()
"""
def __init__(self, class_list, time_resolution=1.0):
"""__init__ method.
Parameters
----------
class_list : list
List of class labels to be evaluated.
time_resolution : float > 0
Time resolution used when converting event into event roll.
(Default value = 1.0)
"""
self.time_resolution = time_resolution
self.overall = {
'Ntp': 0.0,
'Ntn': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
'Nref': 0.0,
'Nsys': 0.0,
'ER': 0.0,
'S': 0.0,
'D': 0.0,
'I': 0.0,
}
self.class_wise = {}
for class_label in class_list:
self.class_wise[class_label] = {
'Ntp': 0.0,
'Ntn': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
'Nref': 0.0,
'Nsys': 0.0,
}
EventDetectionMetrics.__init__(self, class_list=class_list)
def __enter__(self):
# Initialize class and return it
return self
def __exit__(self, type, value, traceback):
# Finalize evaluation and return results
return self.results()
def evaluate(self, annotated_ground_truth, system_output):
"""Evaluate system output and annotated ground truth pair.
Use results method to get results.
Parameters
----------
annotated_ground_truth : numpy.array
Ground truth array, list of scene labels
system_output : numpy.array
System output array, list of scene labels
Returns
-------
nothing
"""
# Convert event list into frame-based representation
system_event_roll = self.list_to_roll(data=system_output, time_resolution=self.time_resolution)
annotated_event_roll = self.list_to_roll(data=annotated_ground_truth, time_resolution=self.time_resolution)
# Fix durations of both event_rolls to be equal
if annotated_event_roll.shape[0] > system_event_roll.shape[0]:
padding = numpy.zeros((annotated_event_roll.shape[0] - system_event_roll.shape[0], len(self.class_list)))
system_event_roll = numpy.vstack((system_event_roll, padding))
if system_event_roll.shape[0] > annotated_event_roll.shape[0]:
padding = numpy.zeros((system_event_roll.shape[0] - annotated_event_roll.shape[0], len(self.class_list)))
annotated_event_roll = numpy.vstack((annotated_event_roll, padding))
# Compute segment-based overall metrics
for segment_id in range(0, annotated_event_roll.shape[0]):
annotated_segment = annotated_event_roll[segment_id, :]
system_segment = system_event_roll[segment_id, :]
Ntp = sum(system_segment + annotated_segment > 1)
Ntn = sum(system_segment + annotated_segment == 0)
Nfp = sum(system_segment - annotated_segment > 0)
Nfn = sum(annotated_segment - system_segment > 0)
Nref = sum(annotated_segment)
Nsys = sum(system_segment)
S = min(Nref, Nsys) - Ntp
D = max(0, Nref - Nsys)
I = max(0, Nsys - Nref)
ER = max(Nref, Nsys) - Ntp
self.overall['Ntp'] += Ntp
self.overall['Ntn'] += Ntn
self.overall['Nfp'] += Nfp
self.overall['Nfn'] += Nfn
self.overall['Nref'] += Nref
self.overall['Nsys'] += Nsys
self.overall['S'] += S
self.overall['D'] += D
self.overall['I'] += I
self.overall['ER'] += ER
for class_id, class_label in enumerate(self.class_list):
annotated_segment = annotated_event_roll[:, class_id]
system_segment = system_event_roll[:, class_id]
Ntp = sum(system_segment + annotated_segment > 1)
Ntn = sum(system_segment + annotated_segment == 0)
Nfp = sum(system_segment - annotated_segment > 0)
Nfn = sum(annotated_segment - system_segment > 0)
Nref = sum(annotated_segment)
Nsys = sum(system_segment)
self.class_wise[class_label]['Ntp'] += Ntp
self.class_wise[class_label]['Ntn'] += Ntn
self.class_wise[class_label]['Nfp'] += Nfp
self.class_wise[class_label]['Nfn'] += Nfn
self.class_wise[class_label]['Nref'] += Nref
self.class_wise[class_label]['Nsys'] += Nsys
return self
def results(self):
"""Get results
Outputs results in dict, format:
{
'overall':
{
'Pre':
'Rec':
'F':
'ER':
'S':
'D':
'I':
}
'class_wise':
{
'office': {
'Pre':
'Rec':
'F':
'ER':
'D':
'I':
'Nref':
'Nsys':
'Ntp':
'Nfn':
'Nfp':
},
}
'class_wise_average':
{
'F':
'ER':
}
}
Parameters
----------
nothing
Returns
-------
results : dict
Results dict
"""
results = {'overall': {},
'class_wise': {},
'class_wise_average': {},
}
# Overall metrics
results['overall']['Pre'] = self.overall['Ntp'] / (self.overall['Nsys'] + self.eps)
results['overall']['Rec'] = self.overall['Ntp'] / self.overall['Nref']
results['overall']['F'] = 2 * ((results['overall']['Pre'] * results['overall']['Rec']) / (results['overall']['Pre'] + results['overall']['Rec'] + self.eps))
results['overall']['ER'] = self.overall['ER'] / self.overall['Nref']
results['overall']['S'] = self.overall['S'] / self.overall['Nref']
results['overall']['D'] = self.overall['D'] / self.overall['Nref']
results['overall']['I'] = self.overall['I'] / self.overall['Nref']
# Class-wise metrics
class_wise_F = []
class_wise_ER = []
for class_id, class_label in enumerate(self.class_list):
if class_label not in results['class_wise']:
results['class_wise'][class_label] = {}
results['class_wise'][class_label]['Pre'] = self.class_wise[class_label]['Ntp'] / (self.class_wise[class_label]['Nsys'] + self.eps)
results['class_wise'][class_label]['Rec'] = self.class_wise[class_label]['Ntp'] / (self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['F'] = 2 * ((results['class_wise'][class_label]['Pre'] * results['class_wise'][class_label]['Rec']) / (results['class_wise'][class_label]['Pre'] + results['class_wise'][class_label]['Rec'] + self.eps))
results['class_wise'][class_label]['ER'] = (self.class_wise[class_label]['Nfn'] + self.class_wise[class_label]['Nfp']) / (self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['D'] = self.class_wise[class_label]['Nfn'] / (self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['I'] = self.class_wise[class_label]['Nfp'] / (self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['Nref'] = self.class_wise[class_label]['Nref']
results['class_wise'][class_label]['Nsys'] = self.class_wise[class_label]['Nsys']
results['class_wise'][class_label]['Ntp'] = self.class_wise[class_label]['Ntp']
results['class_wise'][class_label]['Nfn'] = self.class_wise[class_label]['Nfn']
results['class_wise'][class_label]['Nfp'] = self.class_wise[class_label]['Nfp']
class_wise_F.append(results['class_wise'][class_label]['F'])
class_wise_ER.append(results['class_wise'][class_label]['ER'])
results['class_wise_average']['F'] = numpy.mean(class_wise_F)
results['class_wise_average']['ER'] = numpy.mean(class_wise_ER)
return results
class DCASE2016_EventDetection_EventBasedMetrics(EventDetectionMetrics):
"""DCASE2016 Event based metrics for sound event detection
Supported metrics:
- Overall
- Error rate (ER), Substitutions (S), Insertions (I), Deletions (D)
- F-score (F1)
- Class-wise
- Error rate (ER), Insertions (I), Deletions (D)
- F-score (F1)
Examples
--------
>>> overall_metrics_per_scene = {}
>>> for scene_id, scene_label in enumerate(dataset.scene_labels):
>>> dcase2016_event_based_metric = DCASE2016_EventDetection_EventBasedMetrics(class_list=dataset.event_labels(scene_label=scene_label))
>>> for fold in dataset.folds(mode=dataset_evaluation_mode):
>>> results = []
>>> result_filename = get_result_filename(fold=fold, scene_label=scene_label, path=result_path)
>>>
>>> if os.path.isfile(result_filename):
>>> with open(result_filename, 'rt') as f:
>>> for row in csv.reader(f, delimiter='\t'):
>>> results.append(row)
>>>
>>> for file_id, item in enumerate(dataset.test(fold,scene_label=scene_label)):
>>> current_file_results = []
>>> for result_line in results:
>>> if result_line[0] == dataset.absolute_to_relative(item['file']):
>>> current_file_results.append(
>>> {'file': result_line[0],
>>> 'event_onset': float(result_line[1]),
>>> 'event_offset': float(result_line[2]),
>>> 'event_label': result_line[3]
>>> }
>>> )
>>> meta = dataset.file_meta(dataset.absolute_to_relative(item['file']))
>>> dcase2016_event_based_metric.evaluate(system_output=current_file_results, annotated_ground_truth=meta)
>>> overall_metrics_per_scene[scene_label]['event_based_metrics'] = dcase2016_event_based_metric.results()
"""
def __init__(self, class_list, time_resolution=1.0, t_collar=0.2):
"""__init__ method.
Parameters
----------
class_list : list
List of class labels to be evaluated.
time_resolution : float > 0
Time resolution used when converting event into event roll.
(Default value = 1.0)
t_collar : float > 0
Time collar for event onset and offset condition
(Default value = 0.2)
"""
self.time_resolution = time_resolution
self.t_collar = t_collar
self.overall = {
'Nref': 0.0,
'Nsys': 0.0,
'Nsubs': 0.0,
'Ntp': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
}
self.class_wise = {}
for class_label in class_list:
self.class_wise[class_label] = {
'Nref': 0.0,
'Nsys': 0.0,
'Ntp': 0.0,
'Ntn': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
}
EventDetectionMetrics.__init__(self, class_list=class_list)
def __enter__(self):
# Initialize class and return it
return self
def __exit__(self, type, value, traceback):
# Finalize evaluation and return results
return self.results()
def evaluate(self, annotated_ground_truth, system_output):
"""Evaluate system output and annotated ground truth pair.
Use results method to get results.
Parameters
----------
annotated_ground_truth : numpy.array
Ground truth array, list of scene labels
system_output : numpy.array
System output array, list of scene labels
Returns
-------
nothing
"""
# Overall metrics
# Total number of detected and reference events
Nsys = len(system_output)
Nref = len(annotated_ground_truth)
sys_correct = numpy.zeros(Nsys, dtype=bool)
ref_correct = numpy.zeros(Nref, dtype=bool)
# Number of correctly transcribed events, onset/offset within a t_collar range
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
label_condition = annotated_ground_truth[j]['event_label'] == system_output[i]['event_label']
onset_condition = self.onset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
offset_condition = self.offset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
if label_condition and onset_condition and offset_condition:
ref_correct[j] = True
sys_correct[i] = True
break
Ntp = numpy.sum(sys_correct)
sys_leftover = numpy.nonzero(numpy.negative(sys_correct))[0]
ref_leftover = numpy.nonzero(numpy.negative(ref_correct))[0]
# Substitutions
Nsubs = 0
for j in ref_leftover:
for i in sys_leftover:
onset_condition = self.onset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
offset_condition = self.offset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
if onset_condition and offset_condition:
Nsubs += 1
break
Nfp = Nsys - Ntp - Nsubs
Nfn = Nref - Ntp - Nsubs
self.overall['Nref'] += Nref
self.overall['Nsys'] += Nsys
self.overall['Ntp'] += Ntp
self.overall['Nsubs'] += Nsubs
self.overall['Nfp'] += Nfp
self.overall['Nfn'] += Nfn
# Class-wise metrics
for class_id, class_label in enumerate(self.class_list):
Nref = 0.0
Nsys = 0.0
Ntp = 0.0
# Count event frequencies in the ground truth
for i in range(0, len(annotated_ground_truth)):
if annotated_ground_truth[i]['event_label'] == class_label:
Nref += 1
# Count event frequencies in the system output
for i in range(0, len(system_output)):
if system_output[i]['event_label'] == class_label:
Nsys += 1
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
if annotated_ground_truth[j]['event_label'] == class_label and system_output[i]['event_label'] == class_label:
onset_condition = self.onset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
offset_condition = self.offset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
if onset_condition and offset_condition:
Ntp += 1
break
Nfp = Nsys - Ntp
Nfn = Nref - Ntp
self.class_wise[class_label]['Nref'] += Nref
self.class_wise[class_label]['Nsys'] += Nsys
self.class_wise[class_label]['Ntp'] += Ntp
self.class_wise[class_label]['Nfp'] += Nfp
self.class_wise[class_label]['Nfn'] += Nfn
def onset_condition(self, annotated_event, system_event, t_collar=0.200):
"""Onset condition, checked does the event pair fulfill condition
Condition:
- event onsets are within t_collar each other
Parameters
----------
annotated_event : dict
Event dict
system_event : dict
Event dict
t_collar : float > 0
Defines how close event onsets have to be in order to be considered match. In seconds.
(Default value = 0.2)
Returns
-------
result : bool
Condition result
"""
return math.fabs(annotated_event['event_onset'] - system_event['event_onset']) <= t_collar
def offset_condition(self, annotated_event, system_event, t_collar=0.200, percentage_of_length=0.5):
"""Offset condition, checking does the event pair fulfill condition
Condition:
- event offsets are within t_collar each other
or
- system event offset is within the percentage_of_length*annotated event_length
Parameters
----------
annotated_event : dict
Event dict
system_event : dict
Event dict
t_collar : float > 0
Defines how close event onsets have to be in order to be considered match. In seconds.
(Default value = 0.2)
percentage_of_length : float [0-1]
Returns
-------
result : bool
Condition result
"""
annotated_length = annotated_event['event_offset'] - annotated_event['event_onset']
return math.fabs(annotated_event['event_offset'] - system_event['event_offset']) <= max(t_collar, percentage_of_length * annotated_length)
def results(self):
"""Get results
Outputs results in dict, format:
{
'overall':
{
'Pre':
'Rec':
'F':
'ER':
'S':
'D':
'I':
}
'class_wise':
{
'office': {
'Pre':
'Rec':
'F':
'ER':
'D':
'I':
'Nref':
'Nsys':
'Ntp':
'Nfn':
'Nfp':
},
}
'class_wise_average':
{
'F':
'ER':
}
}
Parameters
----------
nothing
Returns
-------
results : dict
Results dict
"""
results = {
'overall': {},
'class_wise': {},
'class_wise_average': {},
}
# Overall metrics
results['overall']['Pre'] = self.overall['Ntp'] / (self.overall['Nsys'] + self.eps)
results['overall']['Rec'] = self.overall['Ntp'] / self.overall['Nref']
results['overall']['F'] = 2 * ((results['overall']['Pre'] * results['overall']['Rec']) / (results['overall']['Pre'] + results['overall']['Rec'] + self.eps))
results['overall']['ER'] = (self.overall['Nfn'] + self.overall['Nfp'] + self.overall['Nsubs']) / self.overall['Nref']
results['overall']['S'] = self.overall['Nsubs'] / self.overall['Nref']
results['overall']['D'] = self.overall['Nfn'] / self.overall['Nref']
results['overall']['I'] = self.overall['Nfp'] / self.overall['Nref']
# Class-wise metrics
class_wise_F = []
class_wise_ER = []
for class_label in self.class_list:
if class_label not in results['class_wise']:
results['class_wise'][class_label] = {}
results['class_wise'][class_label]['Pre'] = self.class_wise[class_label]['Ntp'] / (self.class_wise[class_label]['Nsys'] + self.eps)
results['class_wise'][class_label]['Rec'] = self.class_wise[class_label]['Ntp'] / (self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['F'] = 2 * ((results['class_wise'][class_label]['Pre'] * results['class_wise'][class_label]['Rec']) / (results['class_wise'][class_label]['Pre'] + results['class_wise'][class_label]['Rec'] + self.eps))
results['class_wise'][class_label]['ER'] = (self.class_wise[class_label]['Nfn']+self.class_wise[class_label]['Nfp']) / (self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['D'] = self.class_wise[class_label]['Nfn'] / (self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['I'] = self.class_wise[class_label]['Nfp'] / (self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['Nref'] = self.class_wise[class_label]['Nref']
results['class_wise'][class_label]['Nsys'] = self.class_wise[class_label]['Nsys']
results['class_wise'][class_label]['Ntp'] = self.class_wise[class_label]['Ntp']
results['class_wise'][class_label]['Nfn'] = self.class_wise[class_label]['Nfn']
results['class_wise'][class_label]['Nfp'] = self.class_wise[class_label]['Nfp']
class_wise_F.append(results['class_wise'][class_label]['F'])
class_wise_ER.append(results['class_wise'][class_label]['ER'])
# Class-wise average
results['class_wise_average']['F'] = numpy.mean(class_wise_F)
results['class_wise_average']['ER'] = numpy.mean(class_wise_ER)
return results
class DCASE2013_EventDetection_Metrics(EventDetectionMetrics):
"""Lecagy DCASE2013 metrics, converted from the provided Matlab implementation
Supported metrics:
- Frame based
- F-score (F)
- AEER
- Event based
- Onset
- F-Score (F)
- AEER
- Onset-offset
- F-Score (F)
- AEER
- Class based
- Onset
- F-Score (F)
- AEER
- Onset-offset
- F-Score (F)
- AEER
"""
#
def frame_based(self, annotated_ground_truth, system_output, resolution=0.01):
# Convert event list into frame-based representation
system_event_roll = self.list_to_roll(data=system_output, time_resolution=resolution)
annotated_event_roll = self.list_to_roll(data=annotated_ground_truth, time_resolution=resolution)
# Fix durations of both event_rolls to be equal
if annotated_event_roll.shape[0] > system_event_roll.shape[0]:
padding = numpy.zeros((annotated_event_roll.shape[0] - system_event_roll.shape[0], len(self.class_list)))
system_event_roll = numpy.vstack((system_event_roll, padding))
if system_event_roll.shape[0] > annotated_event_roll.shape[0]:
padding = numpy.zeros((system_event_roll.shape[0] - annotated_event_roll.shape[0], len(self.class_list)))
annotated_event_roll = numpy.vstack((annotated_event_roll, padding))
# Compute frame-based metrics
Nref = sum(sum(annotated_event_roll))
Ntot = sum(sum(system_event_roll))
Ntp = sum(sum(system_event_roll + annotated_event_roll > 1))
Nfp = sum(sum(system_event_roll - annotated_event_roll > 0))
Nfn = sum(sum(annotated_event_roll - system_event_roll > 0))
Nsubs = min(Nfp, Nfn)
eps = numpy.spacing(1)
results = dict()
results['Rec'] = Ntp / (Nref + eps)
results['Pre'] = Ntp / (Ntot + eps)
results['F'] = 2 * ((results['Pre'] * results['Rec']) / (results['Pre'] + results['Rec'] + eps))
results['AEER'] = (Nfn + Nfp + Nsubs) / (Nref + eps)
return results
def event_based(self, annotated_ground_truth, system_output):
# Event-based evaluation for event detection task
# outputFile: the output of the event detection system
# GTFile: the ground truth list of events
# Total number of detected and reference events
Ntot = len(system_output)
Nref = len(annotated_ground_truth)
# Number of correctly transcribed events, onset within a +/-100 ms range
Ncorr = 0
NcorrOff = 0
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
if annotated_ground_truth[j]['event_label'] == system_output[i]['event_label'] and (math.fabs(annotated_ground_truth[j]['event_onset'] - system_output[i]['event_onset']) <= 0.1):
Ncorr += 1
# If offset within a +/-100 ms range or within 50% of ground-truth event's duration
if math.fabs(annotated_ground_truth[j]['event_offset'] - system_output[i]['event_offset']) <= max(0.1, 0.5 * (annotated_ground_truth[j]['event_offset'] - annotated_ground_truth[j]['event_onset'])):
NcorrOff += 1
break # In order to not evaluate duplicates
# Compute onset-only event-based metrics
eps = numpy.spacing(1)
results = {
'onset': {},
'onset-offset': {},
}
Nfp = Ntot - Ncorr
Nfn = Nref - Ncorr
Nsubs = min(Nfp, Nfn)
results['onset']['Rec'] = Ncorr / (Nref + eps)
results['onset']['Pre'] = Ncorr / (Ntot + eps)
results['onset']['F'] = 2 * (
(results['onset']['Pre'] * results['onset']['Rec']) / (
results['onset']['Pre'] + results['onset']['Rec'] + eps))
results['onset']['AEER'] = (Nfn + Nfp + Nsubs) / (Nref + eps)
# Compute onset-offset event-based metrics
NfpOff = Ntot - NcorrOff
NfnOff = Nref - NcorrOff
NsubsOff = min(NfpOff, NfnOff)
results['onset-offset']['Rec'] = NcorrOff / (Nref + eps)
results['onset-offset']['Pre'] = NcorrOff / (Ntot + eps)
results['onset-offset']['F'] = 2 * ((results['onset-offset']['Pre'] * results['onset-offset']['Rec']) / (
results['onset-offset']['Pre'] + results['onset-offset']['Rec'] + eps))
results['onset-offset']['AEER'] = (NfnOff + NfpOff + NsubsOff) / (Nref + eps)
return results
def class_based(self, annotated_ground_truth, system_output):
# Class-wise event-based evaluation for event detection task
# outputFile: the output of the event detection system
# GTFile: the ground truth list of events
# Total number of detected and reference events per class
Ntot = numpy.zeros((len(self.class_list), 1))
for event in system_output:
pos = self.class_list.index(event['event_label'])
Ntot[pos] += 1
Nref = numpy.zeros((len(self.class_list), 1))
for event in annotated_ground_truth:
pos = self.class_list.index(event['event_label'])
Nref[pos] += 1
I = (Nref > 0).nonzero()[0] # index for classes present in ground-truth
# Number of correctly transcribed events per class, onset within a +/-100 ms range
Ncorr = numpy.zeros((len(self.class_list), 1))
NcorrOff = numpy.zeros((len(self.class_list), 1))
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
if annotated_ground_truth[j]['event_label'] == system_output[i]['event_label'] and (
math.fabs(
annotated_ground_truth[j]['event_onset'] - system_output[i]['event_onset']) <= 0.1):
pos = self.class_list.index(system_output[i]['event_label'])
Ncorr[pos] += 1
# If offset within a +/-100 ms range or within 50% of ground-truth event's duration
if math.fabs(annotated_ground_truth[j]['event_offset'] - system_output[i]['event_offset']) <= max(
0.1, 0.5 * (
annotated_ground_truth[j]['event_offset'] - annotated_ground_truth[j][
'event_onset'])):
pos = self.class_list.index(system_output[i]['event_label'])
NcorrOff[pos] += 1
break # In order to not evaluate duplicates
# Compute onset-only class-wise event-based metrics
eps = numpy.spacing(1)
results = {
'onset': {},
'onset-offset': {},
}
Nfp = Ntot - Ncorr
Nfn = Nref - Ncorr
Nsubs = numpy.minimum(Nfp, Nfn)
tempRec = Ncorr[I] / (Nref[I] + eps)
tempPre = Ncorr[I] / (Ntot[I] + eps)
results['onset']['Rec'] = numpy.mean(tempRec)
results['onset']['Pre'] = numpy.mean(tempPre)
tempF = 2 * ((tempPre * tempRec) / (tempPre + tempRec + eps))
results['onset']['F'] = numpy.mean(tempF)
tempAEER = (Nfn[I] + Nfp[I] + Nsubs[I]) / (Nref[I] + eps)
results['onset']['AEER'] = numpy.mean(tempAEER)
# Compute onset-offset class-wise event-based metrics
NfpOff = Ntot - NcorrOff
NfnOff = Nref - NcorrOff
NsubsOff = numpy.minimum(NfpOff, NfnOff)
tempRecOff = NcorrOff[I] / (Nref[I] + eps)
tempPreOff = NcorrOff[I] / (Ntot[I] + eps)
results['onset-offset']['Rec'] = numpy.mean(tempRecOff)
results['onset-offset']['Pre'] = numpy.mean(tempPreOff)
tempFOff = 2 * ((tempPreOff * tempRecOff) / (tempPreOff + tempRecOff + eps))
results['onset-offset']['F'] = numpy.mean(tempFOff)
tempAEEROff = (NfnOff[I] + NfpOff[I] + NsubsOff[I]) / (Nref[I] + eps)
results['onset-offset']['AEER'] = numpy.mean(tempAEEROff)
return results
def main(argv):
# Examples to show usage and required data structures
class_list = ['class1', 'class2', 'class3']
system_output = [
{
'event_label': 'class1',
'event_onset': 0.1,
'event_offset': 1.0
},
{
'event_label': 'class2',
'event_onset': 4.1,
'event_offset': 4.7
},
{
'event_label': 'class3',
'event_onset': 5.5,
'event_offset': 6.7
}
]
annotated_groundtruth = [
{
'event_label': 'class1',
'event_onset': 0.1,
'event_offset': 1.0
},
{
'event_label': 'class2',
'event_onset': 4.2,
'event_offset': 5.4
},
{
'event_label': 'class3',
'event_onset': 5.5,
'event_offset': 6.7
}
]
dcase2013metric = DCASE2013_EventDetection_Metrics(class_list=class_list)
print 'DCASE2013'
print 'Frame-based:', dcase2013metric.frame_based(system_output=system_output,
annotated_ground_truth=annotated_groundtruth)
print 'Event-based:', dcase2013metric.event_based(system_output=system_output,
annotated_ground_truth=annotated_groundtruth)
print 'Class-based:', dcase2013metric.class_based(system_output=system_output,
annotated_ground_truth=annotated_groundtruth)
dcase2016_metric = DCASE2016_EventDetection_SegmentBasedMetrics(class_list=class_list)
print 'DCASE2016'
print dcase2016_metric.evaluate(system_output=system_output, annotated_ground_truth=annotated_groundtruth).results()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| mit |
justincassidy/scikit-learn | sklearn/ensemble/tests/test_weight_boosting.py | 35 | 16763 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(LinearRegression(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(LinearRegression())
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
| bsd-3-clause |
wanggang3333/scikit-learn | sklearn/metrics/cluster/supervised.py | 207 | 27395 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Wei LI <kuantkid@gmail.com>
# Diego Molla <dmolla-aliod@gmail.com>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
printedheart/h2o-3 | h2o-py/h2o/model/binomial.py | 5 | 24203 | """
Binomial Models
"""
from metrics_base import *
class H2OBinomialModel(ModelBase):
"""
Class for Binomial models.
"""
def __init__(self, dest_key, model_json):
"""
Create a new binomial model.
"""
super(H2OBinomialModel, self).__init__(dest_key, model_json,H2OBinomialModelMetrics)
def F1(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the F1 for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the F1 value for the training data.
:param valid: If valid is True, then return the F1 value for the validation data.
:param xval: If xval is True, then return the F1 value for the cross validation data.
:return: The F1 for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("f1", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def F2(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the F2 for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the F2 value for the training data.
:param valid: If valid is True, then return the F2 value for the validation data.
:param xval: If xval is True, then return the F2 value for the cross validation data.
:return: The F2 for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("f2", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def F0point5(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the F0.5 for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the F0point5 value for the training data.
:param valid: If valid is True, then return the F0point5 value for the validation data.
:param xval: If xval is True, then return the F0point5 value for the cross validation data.
:return: The F0point5 for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("f0point5", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def accuracy(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the accuracy for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the accuracy value for the training data.
:param valid: If valid is True, then return the accuracy value for the validation data.
:param xval: If xval is True, then return the accuracy value for the cross validation data.
:return: The accuracy for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("accuracy", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def error(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the error for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the error value for the training data.
:param valid: If valid is True, then return the error value for the validation data.
:param xval: If xval is True, then return the error value for the cross validation data.
:return: The error for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else [[acc[0],1-acc[1]] for acc in v.metric("accuracy", thresholds=thresholds)]
return m.values()[0] if len(m) == 1 else m
def precision(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the precision for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the precision value for the training data.
:param valid: If valid is True, then return the precision value for the validation data.
:param xval: If xval is True, then return the precision value for the cross validation data.
:return: The precision for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("precision", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def tpr(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the True Positive Rate for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the tpr value for the training data.
:param valid: If valid is True, then return the tpr value for the validation data.
:param xval: If xval is True, then return the tpr value for the cross validation data.
:return: The tpr for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("tpr", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def tnr(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the True Negative Rate for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the tnr value for the training data.
:param valid: If valid is True, then return the tnr value for the validation data.
:param xval: If xval is True, then return the tnr value for the cross validation data.
:return: The F1 for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("tnr", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def fnr(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the False Negative Rates for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the fnr value for the training data.
:param valid: If valid is True, then return the fnr value for the validation data.
:param xval: If xval is True, then return the fnr value for the cross validation data.
:return: The fnr for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("fnr", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def fpr(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the False Positive Rates for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the fpr value for the training data.
:param valid: If valid is True, then return the fpr value for the validation data.
:param xval: If xval is True, then return the fpr value for the cross validation data.
:return: The fpr for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("fpr", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def recall(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the Recall (AKA True Positive Rate) for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the recall value for the training data.
:param valid: If valid is True, then return the recall value for the validation data.
:param xval: If xval is True, then return the recall value for the cross validation data.
:return: The recall for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("tpr", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def sensitivity(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the sensitivity (AKA True Positive Rate or Recall) for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the sensitivity value for the training data.
:param valid: If valid is True, then return the sensitivity value for the validation data.
:param xval: If xval is True, then return the sensitivity value for the cross validation data.
:return: The sensitivity for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("tpr", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def fallout(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the Fallout (AKA False Positive Rate) for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the fallout value for the training data.
:param valid: If valid is True, then return the fallout value for the validation data.
:param xval: If xval is True, then return the fallout value for the cross validation data.
:return: The fallout for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("fpr", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def missrate(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the miss rate (AKA False Negative Rate) for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the missrate value for the training data.
:param valid: If valid is True, then return the missrate value for the validation data.
:param xval: If xval is True, then return the missrate value for the cross validation data.
:return: The missrate for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("fnr", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def specificity(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the specificity (AKA True Negative Rate) for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the specificity value for the training data.
:param valid: If valid is True, then return the specificity value for the validation data.
:param xval: If xval is True, then return the specificity value for the cross validation data.
:return: The specificity for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("tnr", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def mcc(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the mcc for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the mcc value for the training data.
:param valid: If valid is True, then return the mcc value for the validation data.
:param xval: If xval is True, then return the mcc value for the cross validation data.
:return: The mcc for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("absolute_MCC", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def max_per_class_error(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the max per class error for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the max_per_class_error value for the training data.
:param valid: If valid is True, then return the max_per_class_error value for the validation data.
:param xval: If xval is True, then return the max_per_class_error value for the cross validation data.
:return: The max_per_class_error for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else [[mpca[0],1-mpca[1]] for mpca in v.metric("min_per_class_accuracy", thresholds=thresholds)]
return m.values()[0] if len(m) == 1 else m
def metric(self, metric, thresholds=None, train=False, valid=False, xval=False):
"""
Get the metric value for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the metrics for the training data.
:param valid: If valid is True, then return the metrics for the validation data.
:param xval: If xval is True, then return the metrics for the cross validation data.
:return: The metrics for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric(metric,thresholds)
return m.values()[0] if len(m) == 1 else m
def plot(self, type="roc", train=False, valid=False, xval=False, **kwargs):
"""
Produce the desired metric plot
If all are False (default), then return the training metric value.
:param type: the type of metric plot (currently, only ROC supported)
:param train: If train is True, then plot for training data.
:param valid: If valid is True, then plot for validation data.
:param xval: If xval is True, then plot for cross validation data.
:param show: if False, the plot is not shown. matplotlib show method is blocking.
:return: None
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
for k,v in zip(tm.keys(),tm.values()):
if v is not None: v.plot(type=type, **kwargs)
def roc(self, train=False, valid=False, xval=False):
"""
Return the coordinates of the ROC curve for a given set of data,
as a two-tuple containing the false positive rates as a list and true positive
rates as a list.
If all are False (default), then return is the training data.
If more than one ROC curve is requested, the data is returned as a dictionary
of two-tuples.
:param train: If train is true, then return the ROC coordinates for the training data.
:param valid: If valid is true, then return the ROC coordinates for the validation data.
:param xval: If xval is true, then return the ROC coordinates for the cross validation data.
:return rocs_cooridinates: the true cooridinates of the roc curve.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()):
if v is not None:
m[k] = (v.fprs, v.tprs)
return m.values()[0] if len(m) == 1 else m
def confusion_matrix(self, metrics=None, thresholds=None, train=False, valid=False, xval=False):
"""
Get the confusion matrix for the specified metrics/thresholds
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param metrics: A string (or list of strings) in {"min_per_class_accuracy", "absolute_MCC", "tnr", "fnr", "fpr", "tpr", "precision", "accuracy", "f0point5", "f2", "f1"}
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the confusion matrix value for the training data.
:param valid: If valid is True, then return the confusion matrix value for the validation data.
:param xval: If xval is True, then return the confusion matrix value for the cross validation data.
:return: The confusion matrix for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.confusion_matrix(metrics=metrics, thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def find_threshold_by_max_metric(self,metric,train=False, valid=False, xval=False):
"""
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the threshold_by_max_metric value for the training data.
:param valid: If valid is True, then return the threshold_by_max_metric value for the validation data.
:param xval: If xval is True, then return the threshold_by_max_metric value for the cross validation data.
:return: The threshold_by_max_metric for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.find_threshold_by_max_metric(metric)
return m.values()[0] if len(m) == 1 else m
def find_idx_by_threshold(self,threshold,train=False, valid=False, xval=False):
"""
Retrieve the index in this metric's threshold list at which the given threshold is located.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the idx_by_threshold for the training data.
:param valid: If valid is True, then return the idx_by_threshold for the validation data.
:param xval: If xval is True, then return the idx_by_threshold for the cross validation data.
:return: The idx_by_threshold for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.find_idx_by_threshold(threshold)
return m.values()[0] if len(m) == 1 else m
| apache-2.0 |
mikebenfield/scikit-learn | sklearn/covariance/tests/test_graph_lasso.py | 33 | 6157 | """ Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_warns_message
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
from numpy.testing import assert_equal
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and scikit-learn do not match in a few places,
# these values are for the scikit-learn version.
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
def test_deprecated_grid_scores(random_state=1):
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
graph_lasso = GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1)
graph_lasso.fit(X)
depr_message = ("Attribute grid_scores was deprecated in version "
"0.19 and will be removed in 0.21. Use "
"'grid_scores_' instead")
assert_warns_message(DeprecationWarning, depr_message,
lambda: graph_lasso.grid_scores)
assert_equal(graph_lasso.grid_scores, graph_lasso.grid_scores_)
| bsd-3-clause |
wdurhamh/statsmodels | statsmodels/sandbox/examples/ex_cusum.py | 33 | 3219 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 02 11:41:25 2010
Author: josef-pktd
"""
import numpy as np
from scipy import stats
from numpy.testing import assert_almost_equal
import statsmodels.api as sm
from statsmodels.sandbox.regression.onewaygls import OneWayLS
from statsmodels.stats.diagnostic import recursive_olsresiduals
from statsmodels.sandbox.stats.diagnostic import _recursive_olsresiduals2 as recursive_olsresiduals2
#examples from ex_onewaygls.py
#choose example
#--------------
example = ['null', 'smalldiff', 'mediumdiff', 'largediff'][1]
example_size = [20, 100][1]
example_groups = ['2', '2-2'][1]
#'2-2': 4 groups,
# groups 0 and 1 and groups 2 and 3 have identical parameters in DGP
#generate example
#----------------
#np.random.seed(87654589)
nobs = example_size
x1 = 0.1+np.random.randn(nobs)
y1 = 10 + 15*x1 + 2*np.random.randn(nobs)
x1 = sm.add_constant(x1, prepend=False)
#assert_almost_equal(x1, np.vander(x1[:,0],2), 16)
#res1 = sm.OLS(y1, x1).fit()
#print res1.params
#print np.polyfit(x1[:,0], y1, 1)
#assert_almost_equal(res1.params, np.polyfit(x1[:,0], y1, 1), 14)
#print res1.summary(xname=['x1','const1'])
#regression 2
x2 = 0.1+np.random.randn(nobs)
if example == 'null':
y2 = 10 + 15*x2 + 2*np.random.randn(nobs) # if H0 is true
elif example == 'smalldiff':
y2 = 11 + 16*x2 + 2*np.random.randn(nobs)
elif example == 'mediumdiff':
y2 = 12 + 16*x2 + 2*np.random.randn(nobs)
else:
y2 = 19 + 17*x2 + 2*np.random.randn(nobs)
x2 = sm.add_constant(x2, prepend=False)
# stack
x = np.concatenate((x1,x2),0)
y = np.concatenate((y1,y2))
if example_groups == '2':
groupind = (np.arange(2*nobs)>nobs-1).astype(int)
else:
groupind = np.mod(np.arange(2*nobs),4)
groupind.sort()
#x = np.column_stack((x,x*groupind[:,None]))
res1 = sm.OLS(y, x).fit()
skip = 8
rresid, rparams, rypred, rresid_standardized, rresid_scaled, rcusum, rcusumci = \
recursive_olsresiduals(res1, skip)
print(rcusum)
print(rresid_scaled[skip-1:])
assert_almost_equal(rparams[-1], res1.params)
import matplotlib.pyplot as plt
plt.plot(rcusum)
plt.plot(rcusumci[0])
plt.plot(rcusumci[1])
plt.figure()
plt.plot(rresid)
plt.plot(np.abs(rresid))
print('cusum test reject:')
print(((rcusum[1:]>rcusumci[1])|(rcusum[1:]<rcusumci[0])).any())
rresid2, rparams2, rypred2, rresid_standardized2, rresid_scaled2, rcusum2, rcusumci2 = \
recursive_olsresiduals2(res1, skip)
#assert_almost_equal(rparams[skip+1:], rparams2[skip:-1],13)
assert_almost_equal(rparams[skip:], rparams2[skip:],13)
#np.c_[rparams[skip+1:], rparams2[skip:-1]]
#plt.show()
#################### Example break test
#import statsmodels.sandbox.tools.stattools
from statsmodels.sandbox.stats.diagnostic import breaks_hansen, \
breaks_cusumolsresid#, breaks_cusum
H, crit95, ft, s = breaks_hansen(res1)
print(H)
print(crit95)
supb, pval, crit = breaks_cusumolsresid(res1.resid)
print(supb, pval, crit)
##check whether this works directly: Ploberger/Kramer framing of standard cusum
##no, it's different, there is another denominator
#print breaks_cusumolsresid(rresid[skip:])
#this function is still completely wrong, cut and paste doesn't apply
#print breaks_cusum(rresid[skip:])
| bsd-3-clause |
Alecardv/College-projects | Metodos Numericos 2012/trapecio.py | 1 | 1308 | import function
from matplotlib.pyplot import *
from pylab import *
import numpy as np
import math
class Trapecio:
def __init__(self, fun, xi, xf):
self.fun = function.Function(fun,'x')
self.a,self.b = xi,xf
self.fig, self.ax = subplots()
def relativeError(self):
f = self.fun.getDerivate()
Ea = ((self.b-self.a)**3/12)*((f.evalFunction(self.b) - f.evalFunction(self.a))/(self.b-self.a))
return Ea
def graph(self):
figure()
root = self.method()
print 'AreaAprox = ',root
print 'AreaReal = ',self.fun.getAndEvalIntegral([self.a,self.b])
print 'Error = ',self.relativeError()
Ox = np.arange(self.a-5,self.b+5, 0.02)
Oy = []
for i in Ox:
Oy.append( self.fun.evalFunction(i) )
self.ax.plot(Ox, Oy, color = "blue",lw = 1,label="f(x)")
self.ax.legend(loc=2)
show()
def px(self,x):
return (self.fun.evalFunction(self.b)-self.fun.evalFunction(self.a))/(self.b-self.a)*(x-self.a) + self.fun.evalFunction(self.a)
def method(self):
I = (self.b-self.a)*((self.fun.evalFunction(self.a) + self.fun.evalFunction(self.b))/2)
self.ax.vlines(self.a,0,self.fun.evalFunction(self.a))
self.ax.vlines(self.b,0,self.fun.evalFunction(self.b))
Ox = np.arange(self.a,self.b, 0.02)
Oy = []
for i in Ox:
Oy.append(self.px(i))
self.ax.plot(Ox, Oy,lw = 2)
return I
| gpl-3.0 |
voxlol/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
xyguo/scikit-learn | benchmarks/bench_plot_incremental_pca.py | 374 | 6430 | """
========================
IncrementalPCA benchmark
========================
Benchmarks for IncrementalPCA
"""
import numpy as np
import gc
from time import time
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import IncrementalPCA, RandomizedPCA, PCA
def plot_results(X, y, label):
plt.plot(X, y, label=label, marker='o')
def benchmark(estimator, data):
gc.collect()
print("Benching %s" % estimator)
t0 = time()
estimator.fit(data)
training_time = time() - t0
data_t = estimator.transform(data)
data_r = estimator.inverse_transform(data_t)
reconstruction_error = np.mean(np.abs(data - data_r))
return {'time': training_time, 'error': reconstruction_error}
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_times['pca'], label="PCA")
plot_results(all_components, all_times['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_times['rpca'], label="RandomizedPCA")
plt.legend(loc="upper left")
plt.suptitle("Algorithm runtime vs. n_components\n \
LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Time (seconds)")
def plot_feature_errors(all_errors, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_errors['pca'], label="PCA")
plot_results(all_components, all_errors['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_errors['rpca'], label="RandomizedPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. n_components\n"
"LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Mean absolute error")
def plot_batch_times(all_times, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_times['pca'], label="PCA")
plot_results(all_batch_sizes, all_times['rpca'], label="RandomizedPCA")
plot_results(all_batch_sizes, all_times['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm runtime vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Time (seconds)")
def plot_batch_errors(all_errors, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_errors['pca'], label="PCA")
plot_results(all_batch_sizes, all_errors['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Mean absolute error")
def fixed_batch_size_comparison(data):
all_features = [i.astype(int) for i in np.linspace(data.shape[1] // 10,
data.shape[1], num=5)]
batch_size = 1000
# Compare runtimes and error for fixed batch size
all_times = defaultdict(list)
all_errors = defaultdict(list)
for n_components in all_features:
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('ipca', ipca),
('rpca', rpca)]}
for k in sorted(results_dict.keys()):
all_times[k].append(results_dict[k]['time'])
all_errors[k].append(results_dict[k]['error'])
plot_feature_times(all_times, batch_size, all_features, data)
plot_feature_errors(all_errors, batch_size, all_features, data)
def variable_batch_size_comparison(data):
batch_sizes = [i.astype(int) for i in np.linspace(data.shape[0] // 10,
data.shape[0], num=10)]
for n_components in [i.astype(int) for i in
np.linspace(data.shape[1] // 10,
data.shape[1], num=4)]:
all_times = defaultdict(list)
all_errors = defaultdict(list)
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('rpca', rpca)]}
# Create flat baselines to compare the variation over batch size
all_times['pca'].extend([results_dict['pca']['time']] *
len(batch_sizes))
all_errors['pca'].extend([results_dict['pca']['error']] *
len(batch_sizes))
all_times['rpca'].extend([results_dict['rpca']['time']] *
len(batch_sizes))
all_errors['rpca'].extend([results_dict['rpca']['error']] *
len(batch_sizes))
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=n_components,
batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('ipca',
ipca)]}
all_times['ipca'].append(results_dict['ipca']['time'])
all_errors['ipca'].append(results_dict['ipca']['error'])
plot_batch_times(all_times, n_components, batch_sizes, data)
# RandomizedPCA error is always worse (approx 100x) than other PCA
# tests
plot_batch_errors(all_errors, n_components, batch_sizes, data)
faces = fetch_lfw_people(resize=.2, min_faces_per_person=5)
# limit dataset to 5000 people (don't care who they are!)
X = faces.data[:5000]
n_samples, h, w = faces.images.shape
n_features = X.shape[1]
X -= X.mean(axis=0)
X /= X.std(axis=0)
fixed_batch_size_comparison(X)
variable_batch_size_comparison(X)
plt.show()
| bsd-3-clause |
henrykironde/scikit-learn | examples/manifold/plot_compare_methods.py | 259 | 4031 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <vanderplas@astro.washington.edu>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
adamrp/qiime | scripts/categorized_dist_scatterplot.py | 15 | 6299 | #!/usr/bin/env python
# File created on 19 Jan 2011
from __future__ import division
__author__ = "Justin Kuczynski"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Justin Kuczynski"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Justin Kuczynski"
__email__ = "justinak@gmail.com"
from qiime.util import make_option
import os
import warnings
warnings.filterwarnings('ignore', 'Not using MPI as mpi4py not found')
from qiime.parse import parse_distmat_to_dict, parse_mapping_file,\
mapping_file_to_dict
from qiime.util import parse_command_line_parameters
import numpy
import matplotlib.pyplot as plt
from qiime.categorized_dist_scatterplot import get_avg_dists, get_sam_ids
script_info = {}
script_info[
'brief_description'] = "Create a categorized distance scatterplot representing average distances between samples, broken down by categories"
script_info[
'script_description'] = "Create a figure representing average distances between samples, broken down by categories. I call it a 'categorized distance scatterplot'. See script usage for more details. The mapping file specifies the relevant data - if you have e.g. 'N/A' values or samples you don't want included, first use filter_samples_from_otu_table.py to remove unwanted samples from the mapping file, and thus the analysis. Note that the resulting plot will include only samples in both the mapping file AND the distance matrix."
script_info['script_usage'] = [(
"Canonical Example:",
"Split samples by country. Within each country compare each child to all adults. Plot the average distance from that child to all adults, vs. the age of that child",
"python categorized_dist_scatterplot.py -m map.txt -d unifrac_distance.txt -c Country -p AgeCategory:Child -s AgeCategory:Adult -a AgeYears -o fig1.png"),
("Example 2:",
"Same as above, but compares Child with all other categories (e.g.: NA, Infant, etc.)",
"python categorized_dist_scatterplot.py -m map.txt -d unifrac_distance.txt -c Country -p AgeCategory:Child -a AgeYears -o fig1.svg")]
script_info[
'output_description'] = "a figure and the text dat for that figure "
script_info['required_options'] = [
make_option('-m', '--map', type='existing_filepath',
help='mapping file'),
make_option('-d', '--distance_matrix', type='existing_filepath',
help='distance matrix'),
make_option('-p', '--primary_state', type='string',
help="Samples matching this state will be plotted. E.g.: AgeCategory:Child . See qiime's filter_samples_from_otu_table.py for more syntax options"),
make_option('-a', '--axis_category', type='string',
help='this will form the horizontal axis of the figure, e.g.: AgeYears . Must be numbers'),
make_option('-o', '--output_path', type='new_dirpath',
help='output figure, filename extention determines format. E.g.: "fig1.png" or similar. A "fig1.txt" or similar will also be created with the data underlying the figure'),
]
script_info['optional_options'] = [
make_option('-c', '--colorby', type='string',
help='samples will first be separated by this column of the mapping file. They will be colored by this column of the mapping file, and all comparisons will be done only among samples with the same value in this column. e.g.: Country. You may omit -c, and the samples will not be separated'),
make_option('-s', '--secondary_state', type='string',
help='all samples matching the primary state will be compared to samples matcthing this secondary state. E.g.: AgeCategory:Adult'),
]
script_info['version'] = __version__
def main():
option_parser, opts, args =\
parse_command_line_parameters(**script_info)
map_data, map_header, map_comments = parse_mapping_file(
open(opts.map, 'U'))
map_dict = mapping_file_to_dict(map_data, map_header)
distdict = parse_distmat_to_dict(open(opts.distance_matrix, 'U'))
if opts.colorby is None:
colorby_cats = [None]
else:
colorby_idx = map_header.index(opts.colorby)
colorby_cats = list(set([map_data[i][colorby_idx] for
i in range(len(map_data))]))
textfilename = os.path.splitext(opts.output_path)[0] + '.txt'
text_fh = open(textfilename, 'w')
text_fh.write(opts.axis_category + '\tdistance\tSampleID' + '\n')
colorby_cats.sort()
plt.figure()
for cat_num, cat in enumerate(colorby_cats):
# collect the primary and secondary samples within this category
state1_samids, state2_samids = get_sam_ids(map_data, map_header,
opts.colorby, cat, opts.primary_state, opts.secondary_state)
state1_samids =\
list(set(state1_samids).intersection(set(distdict.keys())))
state2_samids =\
list(set(state2_samids).intersection(set(distdict.keys())))
if state1_samids == [] or state2_samids == [] or \
(len(state1_samids) == 1 and state1_samids == state2_samids):
raise RuntimeError("one category of samples didn't have any valid" +
" distances. try eliminating samples from -p or -s, or changing" +
" your mapping file with filter_samples_from_otu_table.py")
# go through dmtx
state1_avg_dists = get_avg_dists(
state1_samids,
state2_samids,
distdict)
# plot
xvals = [float(map_dict[sam][opts.axis_category]) for
sam in state1_samids]
try:
color = plt.cm.jet(cat_num / (len(colorby_cats) - 1))
except ZeroDivisionError: # only one cat
color = 'b'
plt.scatter(xvals, state1_avg_dists, edgecolors=color, alpha=.5,
facecolors='none')
plt.xlabel(opts.axis_category)
plt.ylabel('average distance')
lines = [str(xvals[i]) + '\t' + str(state1_avg_dists[i]) +
'\t' + state1_samids[i] + '\n' for i in range(len(xvals))]
text_fh.writelines(lines)
if opts.colorby is not None:
plt.legend(colorby_cats)
plt.savefig(opts.output_path)
if __name__ == "__main__":
main()
| gpl-2.0 |
dssg/wikienergy | disaggregator/build/pandas/pandas/tests/test_frame.py | 1 | 552242 | # -*- coding: utf-8 -*-
from __future__ import print_function
# pylint: disable-msg=W0612,E1101
from copy import deepcopy
from datetime import datetime, timedelta, time
import sys
import operator
import re
import csv
import nose
import functools
import itertools
from itertools import product
from distutils.version import LooseVersion
from pandas.compat import(
map, zip, range, long, lrange, lmap, lzip,
OrderedDict, u, StringIO
)
from pandas import compat
from numpy import random, nan
from numpy.random import randn
import numpy as np
import numpy.ma as ma
from numpy.testing import assert_array_equal
import numpy.ma.mrecords as mrecords
import pandas.core.nanops as nanops
import pandas.core.common as com
import pandas.core.format as fmt
import pandas.core.datetools as datetools
from pandas import (DataFrame, Index, Series, notnull, isnull,
MultiIndex, DatetimeIndex, Timestamp, date_range,
read_csv, timedelta_range, Timedelta,
option_context)
import pandas as pd
from pandas.parser import CParserError
from pandas.util.misc import is_little_endian
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp,
assertRaises,
makeCustomDataframe as mkdf,
ensure_clean)
from pandas.core.indexing import IndexingError
from pandas.core.common import PandasError
import pandas.util.testing as tm
import pandas.lib as lib
from numpy.testing.decorators import slow
#---------------------------------------------------------------------
# DataFrame test cases
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
MIXED_FLOAT_DTYPES = ['float16','float32','float64']
MIXED_INT_DTYPES = ['uint8','uint16','uint32','uint64','int8','int16',
'int32','int64']
def _check_mixed_float(df, dtype = None):
# float16 are most likely to be upcasted to float32
dtypes = dict(A = 'float32', B = 'float32', C = 'float16', D = 'float64')
if isinstance(dtype, compat.string_types):
dtypes = dict([ (k,dtype) for k, v in dtypes.items() ])
elif isinstance(dtype, dict):
dtypes.update(dtype)
if dtypes.get('A'):
assert(df.dtypes['A'] == dtypes['A'])
if dtypes.get('B'):
assert(df.dtypes['B'] == dtypes['B'])
if dtypes.get('C'):
assert(df.dtypes['C'] == dtypes['C'])
if dtypes.get('D'):
assert(df.dtypes['D'] == dtypes['D'])
def _check_mixed_int(df, dtype = None):
dtypes = dict(A = 'int32', B = 'uint64', C = 'uint8', D = 'int64')
if isinstance(dtype, compat.string_types):
dtypes = dict([ (k,dtype) for k, v in dtypes.items() ])
elif isinstance(dtype, dict):
dtypes.update(dtype)
if dtypes.get('A'):
assert(df.dtypes['A'] == dtypes['A'])
if dtypes.get('B'):
assert(df.dtypes['B'] == dtypes['B'])
if dtypes.get('C'):
assert(df.dtypes['C'] == dtypes['C'])
if dtypes.get('D'):
assert(df.dtypes['D'] == dtypes['D'])
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
# slicing
sl = self.frame[:20]
self.assertEqual(20, len(sl.index))
# column access
for _, series in compat.iteritems(sl):
self.assertEqual(20, len(series.index))
self.assertTrue(tm.equalContents(series.index, sl.index))
for key, _ in compat.iteritems(self.frame._series):
self.assertIsNotNone(self.frame[key])
self.assertNotIn('random', self.frame)
with assertRaisesRegexp(KeyError, 'random'):
self.frame['random']
df = self.frame.copy()
df['$10'] = randn(len(df))
ad = randn(len(df))
df['@awesome_domain'] = ad
self.assertRaises(KeyError, df.__getitem__, 'df["$10"]')
res = df['@awesome_domain']
assert_array_equal(ad, res.values)
def test_getitem_dupe_cols(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b'])
try:
df[['baf']]
except KeyError:
pass
else:
self.fail("Dataframe failed to raise KeyError")
def test_get(self):
b = self.frame.get('B')
assert_series_equal(b, self.frame['B'])
self.assertIsNone(self.frame.get('foo'))
assert_series_equal(self.frame.get('foo', self.frame['B']),
self.frame['B'])
# None
# GH 5652
for df in [DataFrame(), DataFrame(columns=list('AB')), DataFrame(columns=list('AB'),index=range(3)) ]:
result = df.get(None)
self.assertIsNone(result)
def test_getitem_iterator(self):
idx = iter(['A', 'B', 'C'])
result = self.frame.ix[:, idx]
expected = self.frame.ix[:, ['A', 'B', 'C']]
assert_frame_equal(result, expected)
def test_getitem_list(self):
self.frame.columns.name = 'foo'
result = self.frame[['B', 'A']]
result2 = self.frame[Index(['B', 'A'])]
expected = self.frame.ix[:, ['B', 'A']]
expected.columns.name = 'foo'
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
self.assertEqual(result.columns.name, 'foo')
with assertRaisesRegexp(KeyError, 'not in index'):
self.frame[['B', 'A', 'food']]
with assertRaisesRegexp(KeyError, 'not in index'):
self.frame[Index(['B', 'A', 'foo'])]
# tuples
df = DataFrame(randn(8, 3),
columns=Index([('foo', 'bar'), ('baz', 'qux'),
('peek', 'aboo')], name=['sth', 'sth2']))
result = df[[('foo', 'bar'), ('baz', 'qux')]]
expected = df.ix[:, :2]
assert_frame_equal(result, expected)
self.assertEqual(result.columns.names, ['sth', 'sth2'])
def test_setitem_list(self):
self.frame['E'] = 'foo'
data = self.frame[['A', 'B']]
self.frame[['B', 'A']] = data
assert_series_equal(self.frame['B'], data['A'])
assert_series_equal(self.frame['A'], data['B'])
with assertRaisesRegexp(ValueError, 'Columns must be same length as key'):
data[['A']] = self.frame[['A', 'B']]
with assertRaisesRegexp(ValueError, 'Length of values does not match '
'length of index'):
data['A'] = range(len(data.index) - 1)
df = DataFrame(0, lrange(3), ['tt1', 'tt2'], dtype=np.int_)
df.ix[1, ['tt1', 'tt2']] = [1, 2]
result = df.ix[1, ['tt1', 'tt2']]
expected = Series([1, 2], df.columns, dtype=np.int_)
assert_series_equal(result, expected)
df['tt1'] = df['tt2'] = '0'
df.ix[1, ['tt1', 'tt2']] = ['1', '2']
result = df.ix[1, ['tt1', 'tt2']]
expected = Series(['1', '2'], df.columns)
assert_series_equal(result, expected)
def test_setitem_list_not_dataframe(self):
data = np.random.randn(len(self.frame), 2)
self.frame[['A', 'B']] = data
assert_almost_equal(self.frame[['A', 'B']].values, data)
def test_setitem_list_of_tuples(self):
tuples = lzip(self.frame['A'], self.frame['B'])
self.frame['tuples'] = tuples
result = self.frame['tuples']
expected = Series(tuples, index=self.frame.index)
assert_series_equal(result, expected)
def test_setitem_mulit_index(self):
# GH7655, test that assigning to a sub-frame of a frame
# with multi-index columns aligns both rows and columns
it = ['jim', 'joe', 'jolie'], ['first', 'last'], \
['left', 'center', 'right']
cols = MultiIndex.from_product(it)
index = pd.date_range('20141006',periods=20)
vals = np.random.randint(1, 1000, (len(index), len(cols)))
df = pd.DataFrame(vals, columns=cols, index=index)
i, j = df.index.values.copy(), it[-1][:]
np.random.shuffle(i)
df['jim'] = df['jolie'].loc[i, ::-1]
assert_frame_equal(df['jim'], df['jolie'])
np.random.shuffle(j)
df[('joe', 'first')] = df[('jolie', 'last')].loc[i, j]
assert_frame_equal(df[('joe', 'first')], df[('jolie', 'last')])
np.random.shuffle(j)
df[('joe', 'last')] = df[('jolie', 'first')].loc[i, j]
assert_frame_equal(df[('joe', 'last')], df[('jolie', 'first')])
def test_inplace_ops_alignment(self):
# inplace ops / ops alignment
# GH 8511
columns = list('abcdefg')
X_orig = DataFrame(np.arange(10*len(columns)).reshape(-1,len(columns)), columns=columns, index=range(10))
Z = 100*X_orig.iloc[:,1:-1].copy()
block1 = list('bedcf')
subs = list('bcdef')
# add
X = X_orig.copy()
result1 = (X[block1] + Z).reindex(columns=subs)
X[block1] += Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] + Z[block1]).reindex(columns=subs)
X[block1] += Z[block1]
result4 = X.reindex(columns=subs)
assert_frame_equal(result1, result2)
assert_frame_equal(result1, result3)
assert_frame_equal(result1, result4)
# sub
X = X_orig.copy()
result1 = (X[block1] - Z).reindex(columns=subs)
X[block1] -= Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] - Z[block1]).reindex(columns=subs)
X[block1] -= Z[block1]
result4 = X.reindex(columns=subs)
assert_frame_equal(result1, result2)
assert_frame_equal(result1, result3)
assert_frame_equal(result1, result4)
def test_inplace_ops_identity(self):
# GH 5104
# make sure that we are actually changing the object
s_orig = Series([1, 2, 3])
df_orig = DataFrame(np.random.randint(0,5,size=10).reshape(-1,5))
# no dtype change
s = s_orig.copy()
s2 = s
s += 1
assert_series_equal(s,s2)
assert_series_equal(s_orig+1,s)
self.assertIs(s,s2)
self.assertIs(s._data,s2._data)
df = df_orig.copy()
df2 = df
df += 1
assert_frame_equal(df,df2)
assert_frame_equal(df_orig+1,df)
self.assertIs(df,df2)
self.assertIs(df._data,df2._data)
# dtype change
s = s_orig.copy()
s2 = s
s += 1.5
assert_series_equal(s,s2)
assert_series_equal(s_orig+1.5,s)
df = df_orig.copy()
df2 = df
df += 1.5
assert_frame_equal(df,df2)
assert_frame_equal(df_orig+1.5,df)
self.assertIs(df,df2)
self.assertIs(df._data,df2._data)
# mixed dtype
arr = np.random.randint(0,10,size=5)
df_orig = DataFrame({'A' : arr.copy(), 'B' : 'foo'})
df = df_orig.copy()
df2 = df
df['A'] += 1
expected = DataFrame({'A' : arr.copy()+1, 'B' : 'foo'})
assert_frame_equal(df,expected)
assert_frame_equal(df2,expected)
self.assertIs(df._data,df2._data)
df = df_orig.copy()
df2 = df
df['A'] += 1.5
expected = DataFrame({'A' : arr.copy()+1.5, 'B' : 'foo'})
assert_frame_equal(df,expected)
assert_frame_equal(df2,expected)
self.assertIs(df._data,df2._data)
def test_getitem_boolean(self):
# boolean indexing
d = self.tsframe.index[10]
indexer = self.tsframe.index > d
indexer_obj = indexer.astype(object)
subindex = self.tsframe.index[indexer]
subframe = self.tsframe[indexer]
self.assert_numpy_array_equal(subindex, subframe.index)
with assertRaisesRegexp(ValueError, 'Item wrong length'):
self.tsframe[indexer[:-1]]
subframe_obj = self.tsframe[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
with tm.assertRaisesRegexp(ValueError, 'boolean values only'):
self.tsframe[self.tsframe]
# test that Series work
indexer_obj = Series(indexer_obj, self.tsframe.index)
subframe_obj = self.tsframe[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
# test that Series indexers reindex
import warnings
warnings.filterwarnings(action='ignore', category=UserWarning)
indexer_obj = indexer_obj.reindex(self.tsframe.index[::-1])
subframe_obj = self.tsframe[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
warnings.filterwarnings(action='default', category=UserWarning)
# test df[df > 0]
for df in [ self.tsframe, self.mixed_frame, self.mixed_float, self.mixed_int ]:
data = df._get_numeric_data()
bif = df[df > 0]
bifw = DataFrame(dict([ (c,np.where(data[c] > 0, data[c], np.nan)) for c in data.columns ]),
index=data.index, columns=data.columns)
# add back other columns to compare
for c in df.columns:
if c not in bifw:
bifw[c] = df[c]
bifw = bifw.reindex(columns = df.columns)
assert_frame_equal(bif, bifw, check_dtype=False)
for c in df.columns:
if bif[c].dtype != bifw[c].dtype:
self.assertEqual(bif[c].dtype, df[c].dtype)
def test_getitem_boolean_casting(self):
# don't upcast if we don't need to
df = self.tsframe.copy()
df['E'] = 1
df['E'] = df['E'].astype('int32')
df['E1'] = df['E'].copy()
df['F'] = 1
df['F'] = df['F'].astype('int64')
df['F1'] = df['F'].copy()
casted = df[df>0]
result = casted.get_dtype_counts()
expected = Series({'float64': 4, 'int32' : 2, 'int64' : 2})
assert_series_equal(result, expected)
# int block splitting
df.ix[1:3,['E1','F1']] = 0
casted = df[df>0]
result = casted.get_dtype_counts()
expected = Series({'float64': 6, 'int32' : 1, 'int64' : 1})
assert_series_equal(result, expected)
# where dtype conversions
# GH 3733
df = DataFrame(data = np.random.randn(100, 50))
df = df.where(df > 0) # create nans
bools = df > 0
mask = isnull(df)
expected = bools.astype(float).mask(mask)
result = bools.mask(mask)
assert_frame_equal(result,expected)
def test_getitem_boolean_list(self):
df = DataFrame(np.arange(12).reshape(3, 4))
def _checkit(lst):
result = df[lst]
expected = df.ix[df.index[lst]]
assert_frame_equal(result, expected)
_checkit([True, False, True])
_checkit([True, True, True])
_checkit([False, False, False])
def test_getitem_boolean_iadd(self):
arr = randn(5, 5)
df = DataFrame(arr.copy(), columns = ['A','B','C','D','E'])
df[df < 0] += 1
arr[arr < 0] += 1
assert_almost_equal(df.values, arr)
def test_boolean_index_empty_corner(self):
# #2096
blah = DataFrame(np.empty([0, 1]), columns=['A'],
index=DatetimeIndex([]))
# both of these should succeed trivially
k = np.array([], bool)
blah[k]
blah[k] = 0
def test_getitem_ix_mixed_integer(self):
df = DataFrame(np.random.randn(4, 3),
index=[1, 10, 'C', 'E'], columns=[1, 2, 3])
result = df.ix[:-1]
expected = df.ix[df.index[:-1]]
assert_frame_equal(result, expected)
result = df.ix[[1, 10]]
expected = df.ix[Index([1, 10], dtype=object)]
assert_frame_equal(result, expected)
def test_getitem_setitem_ix_negative_integers(self):
result = self.frame.ix[:, -1]
assert_series_equal(result, self.frame['D'])
result = self.frame.ix[:, [-1]]
assert_frame_equal(result, self.frame[['D']])
result = self.frame.ix[:, [-1, -2]]
assert_frame_equal(result, self.frame[['D', 'C']])
self.frame.ix[:, [-1]] = 0
self.assertTrue((self.frame['D'] == 0).all())
df = DataFrame(np.random.randn(8, 4))
self.assertTrue(isnull(df.ix[:, [-1]].values).all())
# #1942
a = DataFrame(randn(20, 2), index=[chr(x + 65) for x in range(20)])
a.ix[-1] = a.ix[-2]
assert_series_equal(a.ix[-1], a.ix[-2])
def test_getattr(self):
tm.assert_series_equal(self.frame.A, self.frame['A'])
self.assertRaises(AttributeError, getattr, self.frame,
'NONEXISTENT_NAME')
def test_setattr_column(self):
df = DataFrame({'foobar': 1}, index=lrange(10))
df.foobar = 5
self.assertTrue((df.foobar == 5).all())
def test_setitem(self):
# not sure what else to do here
series = self.frame['A'][::2]
self.frame['col5'] = series
self.assertIn('col5', self.frame)
tm.assert_dict_equal(series, self.frame['col5'],
compare_keys=False)
series = self.frame['A']
self.frame['col6'] = series
tm.assert_dict_equal(series, self.frame['col6'],
compare_keys=False)
with tm.assertRaises(KeyError):
self.frame[randn(len(self.frame) + 1)] = 1
# set ndarray
arr = randn(len(self.frame))
self.frame['col9'] = arr
self.assertTrue((self.frame['col9'] == arr).all())
self.frame['col7'] = 5
assert((self.frame['col7'] == 5).all())
self.frame['col0'] = 3.14
assert((self.frame['col0'] == 3.14).all())
self.frame['col8'] = 'foo'
assert((self.frame['col8'] == 'foo').all())
# this is partially a view (e.g. some blocks are view)
# so raise/warn
smaller = self.frame[:2]
def f():
smaller['col10'] = ['1', '2']
self.assertRaises(com.SettingWithCopyError, f)
self.assertEqual(smaller['col10'].dtype, np.object_)
self.assertTrue((smaller['col10'] == ['1', '2']).all())
# with a dtype
for dtype in ['int32','int64','float32','float64']:
self.frame[dtype] = np.array(arr,dtype=dtype)
self.assertEqual(self.frame[dtype].dtype.name, dtype)
# dtype changing GH4204
df = DataFrame([[0,0]])
df.iloc[0] = np.nan
expected = DataFrame([[np.nan,np.nan]])
assert_frame_equal(df,expected)
df = DataFrame([[0,0]])
df.loc[0] = np.nan
assert_frame_equal(df,expected)
def test_setitem_tuple(self):
self.frame['A', 'B'] = self.frame['A']
assert_series_equal(self.frame['A', 'B'], self.frame['A'])
def test_setitem_always_copy(self):
s = self.frame['A'].copy()
self.frame['E'] = s
self.frame['E'][5:10] = nan
self.assertTrue(notnull(s[5:10]).all())
def test_setitem_boolean(self):
df = self.frame.copy()
values = self.frame.values
df[df['A'] > 0] = 4
values[values[:, 0] > 0] = 4
assert_almost_equal(df.values, values)
# test that column reindexing works
series = df['A'] == 4
series = series.reindex(df.index[::-1])
df[series] = 1
values[values[:, 0] == 4] = 1
assert_almost_equal(df.values, values)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
# indexed with same shape but rows-reversed df
df[df[::-1] == 2] = 3
values[values == 2] = 3
assert_almost_equal(df.values, values)
with assertRaisesRegexp(TypeError, 'Must pass DataFrame with boolean '
'values only'):
df[df * 0] = 2
# index with DataFrame
mask = df > np.abs(df)
expected = df.copy()
df[df > np.abs(df)] = nan
expected.values[mask.values] = nan
assert_frame_equal(df, expected)
# set from DataFrame
expected = df.copy()
df[df > np.abs(df)] = df * 2
np.putmask(expected.values, mask.values, df.values * 2)
assert_frame_equal(df, expected)
def test_setitem_cast(self):
self.frame['D'] = self.frame['D'].astype('i8')
self.assertEqual(self.frame['D'].dtype, np.int64)
# #669, should not cast?
# this is now set to int64, which means a replacement of the column to
# the value dtype (and nothing to do with the existing dtype)
self.frame['B'] = 0
self.assertEqual(self.frame['B'].dtype, np.int64)
# cast if pass array of course
self.frame['B'] = np.arange(len(self.frame))
self.assertTrue(issubclass(self.frame['B'].dtype.type, np.integer))
self.frame['foo'] = 'bar'
self.frame['foo'] = 0
self.assertEqual(self.frame['foo'].dtype, np.int64)
self.frame['foo'] = 'bar'
self.frame['foo'] = 2.5
self.assertEqual(self.frame['foo'].dtype, np.float64)
self.frame['something'] = 0
self.assertEqual(self.frame['something'].dtype, np.int64)
self.frame['something'] = 2
self.assertEqual(self.frame['something'].dtype, np.int64)
self.frame['something'] = 2.5
self.assertEqual(self.frame['something'].dtype, np.float64)
# GH 7704
# dtype conversion on setting
df = DataFrame(np.random.rand(30, 3), columns=tuple('ABC'))
df['event'] = np.nan
df.loc[10,'event'] = 'foo'
result = df.get_dtype_counts().order()
expected = Series({'float64' : 3, 'object' : 1 }).order()
assert_series_equal(result, expected)
def test_setitem_boolean_column(self):
expected = self.frame.copy()
mask = self.frame['A'] > 0
self.frame.ix[mask, 'B'] = 0
expected.values[mask.values, 1] = 0
assert_frame_equal(self.frame, expected)
def test_setitem_corner(self):
# corner case
df = DataFrame({'B': [1., 2., 3.],
'C': ['a', 'b', 'c']},
index=np.arange(3))
del df['B']
df['B'] = [1., 2., 3.]
self.assertIn('B', df)
self.assertEqual(len(df.columns), 2)
df['A'] = 'beginning'
df['E'] = 'foo'
df['D'] = 'bar'
df[datetime.now()] = 'date'
df[datetime.now()] = 5.
# what to do when empty frame with index
dm = DataFrame(index=self.frame.index)
dm['A'] = 'foo'
dm['B'] = 'bar'
self.assertEqual(len(dm.columns), 2)
self.assertEqual(dm.values.dtype, np.object_)
# upcast
dm['C'] = 1
self.assertEqual(dm['C'].dtype, np.int64)
dm['E'] = 1.
self.assertEqual(dm['E'].dtype, np.float64)
# set existing column
dm['A'] = 'bar'
self.assertEqual('bar', dm['A'][0])
dm = DataFrame(index=np.arange(3))
dm['A'] = 1
dm['foo'] = 'bar'
del dm['foo']
dm['foo'] = 'bar'
self.assertEqual(dm['foo'].dtype, np.object_)
dm['coercable'] = ['1', '2', '3']
self.assertEqual(dm['coercable'].dtype, np.object_)
def test_setitem_corner2(self):
data = {"title": ['foobar', 'bar', 'foobar'] + ['foobar'] * 17,
"cruft": np.random.random(20)}
df = DataFrame(data)
ix = df[df['title'] == 'bar'].index
df.ix[ix, ['title']] = 'foobar'
df.ix[ix, ['cruft']] = 0
assert(df.ix[1, 'title'] == 'foobar')
assert(df.ix[1, 'cruft'] == 0)
def test_setitem_ambig(self):
# difficulties with mixed-type data
from decimal import Decimal
# created as float type
dm = DataFrame(index=lrange(3), columns=lrange(3))
coercable_series = Series([Decimal(1) for _ in range(3)],
index=lrange(3))
uncoercable_series = Series(['foo', 'bzr', 'baz'], index=lrange(3))
dm[0] = np.ones(3)
self.assertEqual(len(dm.columns), 3)
# self.assertIsNone(dm.objects)
dm[1] = coercable_series
self.assertEqual(len(dm.columns), 3)
# self.assertIsNone(dm.objects)
dm[2] = uncoercable_series
self.assertEqual(len(dm.columns), 3)
# self.assertIsNotNone(dm.objects)
self.assertEqual(dm[2].dtype, np.object_)
def test_setitem_clear_caches(self):
# GH #304
df = DataFrame({'x': [1.1, 2.1, 3.1, 4.1], 'y': [5.1, 6.1, 7.1, 8.1]},
index=[0, 1, 2, 3])
df.insert(2, 'z', np.nan)
# cache it
foo = df['z']
df.ix[2:, 'z'] = 42
expected = Series([np.nan, np.nan, 42, 42], index=df.index)
self.assertIsNot(df['z'], foo)
assert_series_equal(df['z'], expected)
def test_setitem_None(self):
# GH #766
self.frame[None] = self.frame['A']
assert_series_equal(self.frame.iloc[:,-1], self.frame['A'])
assert_series_equal(self.frame.loc[:,None], self.frame['A'])
assert_series_equal(self.frame[None], self.frame['A'])
repr(self.frame)
def test_delitem_corner(self):
f = self.frame.copy()
del f['D']
self.assertEqual(len(f.columns), 3)
self.assertRaises(KeyError, f.__delitem__, 'D')
del f['B']
self.assertEqual(len(f.columns), 2)
def test_getitem_fancy_2d(self):
f = self.frame
ix = f.ix
assert_frame_equal(ix[:, ['B', 'A']], f.reindex(columns=['B', 'A']))
subidx = self.frame.index[[5, 4, 1]]
assert_frame_equal(ix[subidx, ['B', 'A']],
f.reindex(index=subidx, columns=['B', 'A']))
# slicing rows, etc.
assert_frame_equal(ix[5:10], f[5:10])
assert_frame_equal(ix[5:10, :], f[5:10])
assert_frame_equal(ix[:5, ['A', 'B']],
f.reindex(index=f.index[:5], columns=['A', 'B']))
# slice rows with labels, inclusive!
expected = ix[5:11]
result = ix[f.index[5]:f.index[10]]
assert_frame_equal(expected, result)
# slice columns
assert_frame_equal(ix[:, :2], f.reindex(columns=['A', 'B']))
# get view
exp = f.copy()
ix[5:10].values[:] = 5
exp.values[5:10] = 5
assert_frame_equal(f, exp)
self.assertRaises(ValueError, ix.__getitem__, f > 0.5)
def test_slice_floats(self):
index = [52195.504153, 52196.303147, 52198.369883]
df = DataFrame(np.random.rand(3, 2), index=index)
s1 = df.ix[52195.1:52196.5]
self.assertEqual(len(s1), 2)
s1 = df.ix[52195.1:52196.6]
self.assertEqual(len(s1), 2)
s1 = df.ix[52195.1:52198.9]
self.assertEqual(len(s1), 3)
def test_getitem_fancy_slice_integers_step(self):
df = DataFrame(np.random.randn(10, 5))
# this is OK
result = df.ix[:8:2]
df.ix[:8:2] = np.nan
self.assertTrue(isnull(df.ix[:8:2]).values.all())
def test_getitem_setitem_integer_slice_keyerrors(self):
df = DataFrame(np.random.randn(10, 5), index=lrange(0, 20, 2))
# this is OK
cp = df.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).values.all())
# so is this
cp = df.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = df.ix[4:10]
result2 = df.ix[3:11]
expected = df.reindex([4, 6, 8, 10])
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
# non-monotonic, raise KeyError
df2 = df.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, df2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, df2.ix.__setitem__, slice(3, 11), 0)
def test_setitem_fancy_2d(self):
f = self.frame
ix = f.ix
# case 1
frame = self.frame.copy()
expected = frame.copy()
frame.ix[:, ['B', 'A']] = 1
expected['B'] = 1.
expected['A'] = 1.
assert_frame_equal(frame, expected)
# case 2
frame = self.frame.copy()
frame2 = self.frame.copy()
expected = frame.copy()
subidx = self.frame.index[[5, 4, 1]]
values = randn(3, 2)
frame.ix[subidx, ['B', 'A']] = values
frame2.ix[[5, 4, 1], ['B', 'A']] = values
expected['B'].ix[subidx] = values[:, 0]
expected['A'].ix[subidx] = values[:, 1]
assert_frame_equal(frame, expected)
assert_frame_equal(frame2, expected)
# case 3: slicing rows, etc.
frame = self.frame.copy()
expected1 = self.frame.copy()
frame.ix[5:10] = 1.
expected1.values[5:10] = 1.
assert_frame_equal(frame, expected1)
expected2 = self.frame.copy()
arr = randn(5, len(frame.columns))
frame.ix[5:10] = arr
expected2.values[5:10] = arr
assert_frame_equal(frame, expected2)
# case 4
frame = self.frame.copy()
frame.ix[5:10, :] = 1.
assert_frame_equal(frame, expected1)
frame.ix[5:10, :] = arr
assert_frame_equal(frame, expected2)
# case 5
frame = self.frame.copy()
frame2 = self.frame.copy()
expected = self.frame.copy()
values = randn(5, 2)
frame.ix[:5, ['A', 'B']] = values
expected['A'][:5] = values[:, 0]
expected['B'][:5] = values[:, 1]
assert_frame_equal(frame, expected)
frame2.ix[:5, [0, 1]] = values
assert_frame_equal(frame2, expected)
# case 6: slice rows with labels, inclusive!
frame = self.frame.copy()
expected = self.frame.copy()
frame.ix[frame.index[5]:frame.index[10]] = 5.
expected.values[5:11] = 5
assert_frame_equal(frame, expected)
# case 7: slice columns
frame = self.frame.copy()
frame2 = self.frame.copy()
expected = self.frame.copy()
# slice indices
frame.ix[:, 1:3] = 4.
expected.values[:, 1:3] = 4.
assert_frame_equal(frame, expected)
# slice with labels
frame.ix[:, 'B':'C'] = 4.
assert_frame_equal(frame, expected)
# new corner case of boolean slicing / setting
frame = DataFrame(lzip([2, 3, 9, 6, 7], [np.nan] * 5),
columns=['a', 'b'])
lst = [100]
lst.extend([np.nan] * 4)
expected = DataFrame(lzip([100, 3, 9, 6, 7], lst),
columns=['a', 'b'])
frame[frame['a'] == 2] = 100
assert_frame_equal(frame, expected)
def test_fancy_getitem_slice_mixed(self):
sliced = self.mixed_frame.ix[:, -3:]
self.assertEqual(sliced['D'].dtype, np.float64)
# get view with single block
# setting it triggers setting with copy
sliced = self.frame.ix[:, -3:]
def f():
sliced['C'] = 4.
self.assertRaises(com.SettingWithCopyError, f)
self.assertTrue((self.frame['C'] == 4).all())
def test_fancy_setitem_int_labels(self):
# integer index defers to label-based indexing
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
tmp = df.copy()
exp = df.copy()
tmp.ix[[0, 2, 4]] = 5
exp.values[:3] = 5
assert_frame_equal(tmp, exp)
tmp = df.copy()
exp = df.copy()
tmp.ix[6] = 5
exp.values[3] = 5
assert_frame_equal(tmp, exp)
tmp = df.copy()
exp = df.copy()
tmp.ix[:, 2] = 5
# tmp correctly sets the dtype
# so match the exp way
exp[2] = 5
assert_frame_equal(tmp, exp)
def test_fancy_getitem_int_labels(self):
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
result = df.ix[[4, 2, 0], [2, 0]]
expected = df.reindex(index=[4, 2, 0], columns=[2, 0])
assert_frame_equal(result, expected)
result = df.ix[[4, 2, 0]]
expected = df.reindex(index=[4, 2, 0])
assert_frame_equal(result, expected)
result = df.ix[4]
expected = df.xs(4)
assert_series_equal(result, expected)
result = df.ix[:, 3]
expected = df[3]
assert_series_equal(result, expected)
def test_fancy_index_int_labels_exceptions(self):
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
# labels that aren't contained
self.assertRaises(KeyError, df.ix.__setitem__,
([0, 1, 2], [2, 3, 4]), 5)
# try to set indices not contained in frame
self.assertRaises(KeyError,
self.frame.ix.__setitem__,
['foo', 'bar', 'baz'], 1)
self.assertRaises(KeyError,
self.frame.ix.__setitem__,
(slice(None, None), ['E']), 1)
# partial setting now allows this GH2578
#self.assertRaises(KeyError,
# self.frame.ix.__setitem__,
# (slice(None, None), 'E'), 1)
def test_setitem_fancy_mixed_2d(self):
self.mixed_frame.ix[:5, ['C', 'B', 'A']] = 5
result = self.mixed_frame.ix[:5, ['C', 'B', 'A']]
self.assertTrue((result.values == 5).all())
self.mixed_frame.ix[5] = np.nan
self.assertTrue(isnull(self.mixed_frame.ix[5]).all())
self.mixed_frame.ix[5] = self.mixed_frame.ix[6]
assert_series_equal(self.mixed_frame.ix[5], self.mixed_frame.ix[6])
# #1432
df = DataFrame({1: [1., 2., 3.],
2: [3, 4, 5]})
self.assertTrue(df._is_mixed_type)
df.ix[1] = [5, 10]
expected = DataFrame({1: [1., 5., 3.],
2: [3, 10, 5]})
assert_frame_equal(df, expected)
def test_ix_align(self):
b = Series(randn(10))
b.sort()
df_orig = DataFrame(randn(10, 4))
df = df_orig.copy()
df.ix[:, 0] = b
assert_series_equal(df.ix[:, 0].reindex(b.index), b)
dft = df_orig.T
dft.ix[0, :] = b
assert_series_equal(dft.ix[0, :].reindex(b.index), b)
df = df_orig.copy()
df.ix[:5, 0] = b
s = df.ix[:5, 0]
assert_series_equal(s, b.reindex(s.index))
dft = df_orig.T
dft.ix[0, :5] = b
s = dft.ix[0, :5]
assert_series_equal(s, b.reindex(s.index))
df = df_orig.copy()
idx = [0, 1, 3, 5]
df.ix[idx, 0] = b
s = df.ix[idx, 0]
assert_series_equal(s, b.reindex(s.index))
dft = df_orig.T
dft.ix[0, idx] = b
s = dft.ix[0, idx]
assert_series_equal(s, b.reindex(s.index))
def test_ix_frame_align(self):
b = DataFrame(np.random.randn(3, 4))
df_orig = DataFrame(randn(10, 4))
df = df_orig.copy()
df.ix[:3] = b
out = b.ix[:3]
assert_frame_equal(out, b)
b.sort_index(inplace=True)
df = df_orig.copy()
df.ix[[0, 1, 2]] = b
out = df.ix[[0, 1, 2]].reindex(b.index)
assert_frame_equal(out, b)
df = df_orig.copy()
df.ix[:3] = b
out = df.ix[:3]
assert_frame_equal(out, b.reindex(out.index))
def test_getitem_setitem_non_ix_labels(self):
df = tm.makeTimeDataFrame()
start, end = df.index[[5, 10]]
result = df.ix[start:end]
result2 = df[start:end]
expected = df[5:11]
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
result = df.copy()
result.ix[start:end] = 0
result2 = df.copy()
result2[start:end] = 0
expected = df.copy()
expected[5:11] = 0
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_ix_multi_take(self):
df = DataFrame(np.random.randn(3, 2))
rs = df.ix[df.index == 0, :]
xp = df.reindex([0])
assert_frame_equal(rs, xp)
""" #1321
df = DataFrame(np.random.randn(3, 2))
rs = df.ix[df.index==0, df.columns==1]
xp = df.reindex([0], [1])
assert_frame_equal(rs, xp)
"""
def test_ix_multi_take_nonint_index(self):
df = DataFrame(np.random.randn(3, 2), index=['x', 'y', 'z'],
columns=['a', 'b'])
rs = df.ix[[0], [0]]
xp = df.reindex(['x'], columns=['a'])
assert_frame_equal(rs, xp)
def test_ix_multi_take_multiindex(self):
df = DataFrame(np.random.randn(3, 2), index=['x', 'y', 'z'],
columns=[['a', 'b'], ['1', '2']])
rs = df.ix[[0], [0]]
xp = df.reindex(['x'], columns=[('a', '1')])
assert_frame_equal(rs, xp)
def test_ix_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
df = DataFrame(np.random.randn(len(idx), 3), idx)
sub = df.ix[:'d']
assert_frame_equal(sub, df)
sub = df.ix['a':'c']
assert_frame_equal(sub, df.ix[0:4])
sub = df.ix['b':'d']
assert_frame_equal(sub, df.ix[2:])
def test_getitem_fancy_1d(self):
f = self.frame
ix = f.ix
# return self if no slicing...for now
self.assertIs(ix[:, :], f)
# low dimensional slice
xs1 = ix[2, ['C', 'B', 'A']]
xs2 = f.xs(f.index[2]).reindex(['C', 'B', 'A'])
assert_series_equal(xs1, xs2)
ts1 = ix[5:10, 2]
ts2 = f[f.columns[2]][5:10]
assert_series_equal(ts1, ts2)
# positional xs
xs1 = ix[0]
xs2 = f.xs(f.index[0])
assert_series_equal(xs1, xs2)
xs1 = ix[f.index[5]]
xs2 = f.xs(f.index[5])
assert_series_equal(xs1, xs2)
# single column
assert_series_equal(ix[:, 'A'], f['A'])
# return view
exp = f.copy()
exp.values[5] = 4
ix[5][:] = 4
assert_frame_equal(exp, f)
exp.values[:, 1] = 6
ix[:, 1][:] = 6
assert_frame_equal(exp, f)
# slice of mixed-frame
xs = self.mixed_frame.ix[5]
exp = self.mixed_frame.xs(self.mixed_frame.index[5])
assert_series_equal(xs, exp)
def test_setitem_fancy_1d(self):
# case 1: set cross-section for indices
frame = self.frame.copy()
expected = self.frame.copy()
frame.ix[2, ['C', 'B', 'A']] = [1., 2., 3.]
expected['C'][2] = 1.
expected['B'][2] = 2.
expected['A'][2] = 3.
assert_frame_equal(frame, expected)
frame2 = self.frame.copy()
frame2.ix[2, [3, 2, 1]] = [1., 2., 3.]
assert_frame_equal(frame, expected)
# case 2, set a section of a column
frame = self.frame.copy()
expected = self.frame.copy()
vals = randn(5)
expected.values[5:10, 2] = vals
frame.ix[5:10, 2] = vals
assert_frame_equal(frame, expected)
frame2 = self.frame.copy()
frame2.ix[5:10, 'B'] = vals
assert_frame_equal(frame, expected)
# case 3: full xs
frame = self.frame.copy()
expected = self.frame.copy()
frame.ix[4] = 5.
expected.values[4] = 5.
assert_frame_equal(frame, expected)
frame.ix[frame.index[4]] = 6.
expected.values[4] = 6.
assert_frame_equal(frame, expected)
# single column
frame = self.frame.copy()
expected = self.frame.copy()
frame.ix[:, 'A'] = 7.
expected['A'] = 7.
assert_frame_equal(frame, expected)
def test_getitem_fancy_scalar(self):
f = self.frame
ix = f.ix
# individual value
for col in f.columns:
ts = f[col]
for idx in f.index[::5]:
assert_almost_equal(ix[idx, col], ts[idx])
def test_setitem_fancy_scalar(self):
f = self.frame
expected = self.frame.copy()
ix = f.ix
# individual value
for j, col in enumerate(f.columns):
ts = f[col]
for idx in f.index[::5]:
i = f.index.get_loc(idx)
val = randn()
expected.values[i, j] = val
ix[idx, col] = val
assert_frame_equal(f, expected)
def test_getitem_fancy_boolean(self):
f = self.frame
ix = f.ix
expected = f.reindex(columns=['B', 'D'])
result = ix[:, [False, True, False, True]]
assert_frame_equal(result, expected)
expected = f.reindex(index=f.index[5:10], columns=['B', 'D'])
result = ix[5:10, [False, True, False, True]]
assert_frame_equal(result, expected)
boolvec = f.index > f.index[7]
expected = f.reindex(index=f.index[boolvec])
result = ix[boolvec]
assert_frame_equal(result, expected)
result = ix[boolvec, :]
assert_frame_equal(result, expected)
result = ix[boolvec, 2:]
expected = f.reindex(index=f.index[boolvec],
columns=['C', 'D'])
assert_frame_equal(result, expected)
def test_setitem_fancy_boolean(self):
# from 2d, set with booleans
frame = self.frame.copy()
expected = self.frame.copy()
mask = frame['A'] > 0
frame.ix[mask] = 0.
expected.values[mask.values] = 0.
assert_frame_equal(frame, expected)
frame = self.frame.copy()
expected = self.frame.copy()
frame.ix[mask, ['A', 'B']] = 0.
expected.values[mask.values, :2] = 0.
assert_frame_equal(frame, expected)
def test_getitem_fancy_ints(self):
result = self.frame.ix[[1, 4, 7]]
expected = self.frame.ix[self.frame.index[[1, 4, 7]]]
assert_frame_equal(result, expected)
result = self.frame.ix[:, [2, 0, 1]]
expected = self.frame.ix[:, self.frame.columns[[2, 0, 1]]]
assert_frame_equal(result, expected)
def test_getitem_setitem_fancy_exceptions(self):
ix = self.frame.ix
with assertRaisesRegexp(IndexingError, 'Too many indexers'):
ix[:, :, :]
with assertRaisesRegexp(IndexingError, 'only tuples of length <= 2 '
'supported'):
ix[:, :, :] = 1
def test_getitem_setitem_boolean_misaligned(self):
# boolean index misaligned labels
mask = self.frame['A'][::-1] > 1
result = self.frame.ix[mask]
expected = self.frame.ix[mask[::-1]]
assert_frame_equal(result, expected)
cp = self.frame.copy()
expected = self.frame.copy()
cp.ix[mask] = 0
expected.ix[mask] = 0
assert_frame_equal(cp, expected)
def test_getitem_setitem_boolean_multi(self):
df = DataFrame(np.random.randn(3, 2))
# get
k1 = np.array([True, False, True])
k2 = np.array([False, True])
result = df.ix[k1, k2]
expected = df.ix[[0, 2], [1]]
assert_frame_equal(result, expected)
expected = df.copy()
df.ix[np.array([True, False, True]),
np.array([False, True])] = 5
expected.ix[[0, 2], [1]] = 5
assert_frame_equal(df, expected)
def test_getitem_setitem_float_labels(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.random.randn(5, 5), index=index)
result = df.ix[1.5:4]
expected = df.reindex([1.5, 2, 3, 4])
assert_frame_equal(result, expected)
self.assertEqual(len(result), 4)
result = df.ix[4:5]
expected = df.reindex([4, 5])
assert_frame_equal(result, expected)
self.assertEqual(len(result), 2)
# loc_float changes this to work properly
result = df.ix[1:2]
expected = df.iloc[0:2]
assert_frame_equal(result, expected)
df.ix[1:2] = 0
result = df[1:2]
self.assertTrue((result==0).all().all())
# #2727
index = Index([1.0, 2.5, 3.5, 4.5, 5.0])
df = DataFrame(np.random.randn(5, 5), index=index)
# positional slicing only via iloc!
with tm.assert_produces_warning(FutureWarning):
result = df.iloc[1.0:5]
expected = df.reindex([2.5, 3.5, 4.5, 5.0])
assert_frame_equal(result, expected)
self.assertEqual(len(result), 4)
result = df.iloc[4:5]
expected = df.reindex([5.0])
assert_frame_equal(result, expected)
self.assertEqual(len(result), 1)
# GH 4892, float indexers in iloc are deprecated
import warnings
warnings.filterwarnings(action='error', category=FutureWarning)
cp = df.copy()
def f():
cp.iloc[1.0:5] = 0
self.assertRaises(FutureWarning, f)
def f():
result = cp.iloc[1.0:5] == 0
self.assertRaises(FutureWarning, f)
self.assertTrue(result.values.all())
self.assertTrue((cp.iloc[0:1] == df.iloc[0:1]).values.all())
warnings.filterwarnings(action='ignore', category=FutureWarning)
cp = df.copy()
cp.iloc[4:5] = 0
self.assertTrue((cp.iloc[4:5] == 0).values.all())
self.assertTrue((cp.iloc[0:4] == df.iloc[0:4]).values.all())
# float slicing
result = df.ix[1.0:5]
expected = df
assert_frame_equal(result, expected)
self.assertEqual(len(result), 5)
result = df.ix[1.1:5]
expected = df.reindex([2.5, 3.5, 4.5, 5.0])
assert_frame_equal(result, expected)
self.assertEqual(len(result), 4)
result = df.ix[4.51:5]
expected = df.reindex([5.0])
assert_frame_equal(result, expected)
self.assertEqual(len(result), 1)
result = df.ix[1.0:5.0]
expected = df.reindex([1.0, 2.5, 3.5, 4.5, 5.0])
assert_frame_equal(result, expected)
self.assertEqual(len(result), 5)
cp = df.copy()
cp.ix[1.0:5.0] = 0
result = cp.ix[1.0:5.0]
self.assertTrue((result == 0).values.all())
def test_setitem_single_column_mixed(self):
df = DataFrame(randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],
columns=['foo', 'bar', 'baz'])
df['str'] = 'qux'
df.ix[::2, 'str'] = nan
expected = [nan, 'qux', nan, 'qux', nan]
assert_almost_equal(df['str'].values, expected)
def test_setitem_single_column_mixed_datetime(self):
df = DataFrame(randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],
columns=['foo', 'bar', 'baz'])
df['timestamp'] = Timestamp('20010102')
# check our dtypes
result = df.get_dtype_counts()
expected = Series({'float64': 3, 'datetime64[ns]': 1})
assert_series_equal(result, expected)
# set an allowable datetime64 type
from pandas import tslib
df.ix['b', 'timestamp'] = tslib.iNaT
self.assertTrue(com.isnull(df.ix['b', 'timestamp']))
# allow this syntax
df.ix['c', 'timestamp'] = nan
self.assertTrue(com.isnull(df.ix['c', 'timestamp']))
# allow this syntax
df.ix['d', :] = nan
self.assertTrue(com.isnull(df.ix['c', :]).all() == False)
# as of GH 3216 this will now work!
# try to set with a list like item
#self.assertRaises(
# Exception, df.ix.__setitem__, ('d', 'timestamp'), [nan])
def test_setitem_frame(self):
piece = self.frame.ix[:2, ['A', 'B']]
self.frame.ix[-2:, ['A', 'B']] = piece.values
assert_almost_equal(self.frame.ix[-2:, ['A', 'B']].values,
piece.values)
# GH 3216
# already aligned
f = self.mixed_frame.copy()
piece = DataFrame([[ 1, 2], [3, 4]], index=f.index[0:2],columns=['A', 'B'])
key = (slice(None,2), ['A', 'B'])
f.ix[key] = piece
assert_almost_equal(f.ix[0:2, ['A', 'B']].values,
piece.values)
# rows unaligned
f = self.mixed_frame.copy()
piece = DataFrame([[ 1, 2 ], [3, 4], [5, 6], [7, 8]], index=list(f.index[0:2]) + ['foo','bar'],columns=['A', 'B'])
key = (slice(None,2), ['A', 'B'])
f.ix[key] = piece
assert_almost_equal(f.ix[0:2:, ['A', 'B']].values,
piece.values[0:2])
# key is unaligned with values
f = self.mixed_frame.copy()
piece = f.ix[:2, ['A']]
piece.index = f.index[-2:]
key = (slice(-2, None), ['A', 'B'])
f.ix[key] = piece
piece['B'] = np.nan
assert_almost_equal(f.ix[-2:, ['A', 'B']].values,
piece.values)
# ndarray
f = self.mixed_frame.copy()
piece = self.mixed_frame.ix[:2, ['A', 'B']]
key = (slice(-2, None), ['A', 'B'])
f.ix[key] = piece.values
assert_almost_equal(f.ix[-2:, ['A', 'B']].values,
piece.values)
# needs upcasting
df = DataFrame([[1,2,'foo'],[3,4,'bar']],columns=['A','B','C'])
df2 = df.copy()
df2.ix[:,['A','B']] = df.ix[:,['A','B']]+0.5
expected = df.reindex(columns=['A','B'])
expected += 0.5
expected['C'] = df['C']
assert_frame_equal(df2, expected)
def test_setitem_frame_align(self):
piece = self.frame.ix[:2, ['A', 'B']]
piece.index = self.frame.index[-2:]
piece.columns = ['A', 'B']
self.frame.ix[-2:, ['A', 'B']] = piece
assert_almost_equal(self.frame.ix[-2:, ['A', 'B']].values,
piece.values)
def test_setitem_fancy_exceptions(self):
pass
def test_getitem_boolean_missing(self):
pass
def test_setitem_boolean_missing(self):
pass
def test_getitem_setitem_ix_duplicates(self):
# #1201
df = DataFrame(np.random.randn(5, 3),
index=['foo', 'foo', 'bar', 'baz', 'bar'])
result = df.ix['foo']
expected = df[:2]
assert_frame_equal(result, expected)
result = df.ix['bar']
expected = df.ix[[2, 4]]
assert_frame_equal(result, expected)
result = df.ix['baz']
expected = df.ix[3]
assert_series_equal(result, expected)
def test_getitem_ix_boolean_duplicates_multiple(self):
# #1201
df = DataFrame(np.random.randn(5, 3),
index=['foo', 'foo', 'bar', 'baz', 'bar'])
result = df.ix[['bar']]
exp = df.ix[[2, 4]]
assert_frame_equal(result, exp)
result = df.ix[df[1] > 0]
exp = df[df[1] > 0]
assert_frame_equal(result, exp)
result = df.ix[df[0] > 0]
exp = df[df[0] > 0]
assert_frame_equal(result, exp)
def test_getitem_setitem_ix_bool_keyerror(self):
# #2199
df = DataFrame({'a': [1, 2, 3]})
self.assertRaises(KeyError, df.ix.__getitem__, False)
self.assertRaises(KeyError, df.ix.__getitem__, True)
self.assertRaises(KeyError, df.ix.__setitem__, False, 0)
self.assertRaises(KeyError, df.ix.__setitem__, True, 0)
def test_getitem_list_duplicates(self):
# #1943
df = DataFrame(np.random.randn(4, 4), columns=list('AABC'))
df.columns.name = 'foo'
result = df[['B', 'C']]
self.assertEqual(result.columns.name, 'foo')
expected = df.ix[:, 2:]
assert_frame_equal(result, expected)
def test_get_value(self):
for idx in self.frame.index:
for col in self.frame.columns:
result = self.frame.get_value(idx, col)
expected = self.frame[col][idx]
assert_almost_equal(result, expected)
def test_iteritems(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b'])
for k, v in compat.iteritems(df):
self.assertEqual(type(v), Series)
def test_lookup(self):
def alt(df, rows, cols):
result = []
for r, c in zip(rows, cols):
result.append(df.get_value(r, c))
return result
def testit(df):
rows = list(df.index) * len(df.columns)
cols = list(df.columns) * len(df.index)
result = df.lookup(rows, cols)
expected = alt(df, rows, cols)
assert_almost_equal(result, expected)
testit(self.mixed_frame)
testit(self.frame)
df = DataFrame({'label': ['a', 'b', 'a', 'c'],
'mask_a': [True, True, False, True],
'mask_b': [True, False, False, False],
'mask_c': [False, True, False, True]})
df['mask'] = df.lookup(df.index, 'mask_' + df['label'])
exp_mask = alt(df, df.index, 'mask_' + df['label'])
assert_almost_equal(df['mask'], exp_mask)
self.assertEqual(df['mask'].dtype, np.bool_)
with tm.assertRaises(KeyError):
self.frame.lookup(['xyz'], ['A'])
with tm.assertRaises(KeyError):
self.frame.lookup([self.frame.index[0]], ['xyz'])
with tm.assertRaisesRegexp(ValueError, 'same size'):
self.frame.lookup(['a', 'b', 'c'], ['a'])
def test_set_value(self):
for idx in self.frame.index:
for col in self.frame.columns:
self.frame.set_value(idx, col, 1)
assert_almost_equal(self.frame[col][idx], 1)
def test_set_value_resize(self):
res = self.frame.set_value('foobar', 'B', 0)
self.assertIs(res, self.frame)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res.get_value('foobar', 'B'), 0)
self.frame.loc['foobar','qux'] = 0
self.assertEqual(self.frame.get_value('foobar', 'qux'), 0)
res = self.frame.copy()
res3 = res.set_value('foobar', 'baz', 'sam')
self.assertEqual(res3['baz'].dtype, np.object_)
res = self.frame.copy()
res3 = res.set_value('foobar', 'baz', True)
self.assertEqual(res3['baz'].dtype, np.object_)
res = self.frame.copy()
res3 = res.set_value('foobar', 'baz', 5)
self.assertTrue(com.is_float_dtype(res3['baz']))
self.assertTrue(isnull(res3['baz'].drop(['foobar'])).all())
self.assertRaises(ValueError, res3.set_value, 'foobar', 'baz', 'sam')
def test_set_value_with_index_dtype_change(self):
df_orig = DataFrame(randn(3, 3), index=lrange(3), columns=list('ABC'))
# this is actually ambiguous as the 2 is interpreted as a positional
# so column is not created
df = df_orig.copy()
df.set_value('C', 2, 1.0)
self.assertEqual(list(df.index), list(df_orig.index) + ['C'])
#self.assertEqual(list(df.columns), list(df_orig.columns) + [2])
df = df_orig.copy()
df.loc['C', 2] = 1.0
self.assertEqual(list(df.index), list(df_orig.index) + ['C'])
#self.assertEqual(list(df.columns), list(df_orig.columns) + [2])
# create both new
df = df_orig.copy()
df.set_value('C', 'D', 1.0)
self.assertEqual(list(df.index), list(df_orig.index) + ['C'])
self.assertEqual(list(df.columns), list(df_orig.columns) + ['D'])
df = df_orig.copy()
df.loc['C', 'D'] = 1.0
self.assertEqual(list(df.index), list(df_orig.index) + ['C'])
self.assertEqual(list(df.columns), list(df_orig.columns) + ['D'])
def test_get_set_value_no_partial_indexing(self):
# partial w/ MultiIndex raise exception
index = MultiIndex.from_tuples([(0, 1), (0, 2), (1, 1), (1, 2)])
df = DataFrame(index=index, columns=lrange(4))
self.assertRaises(KeyError, df.get_value, 0, 1)
# self.assertRaises(KeyError, df.set_value, 0, 1, 0)
def test_single_element_ix_dont_upcast(self):
self.frame['E'] = 1
self.assertTrue(issubclass(self.frame['E'].dtype.type,
(int, np.integer)))
result = self.frame.ix[self.frame.index[5], 'E']
self.assertTrue(com.is_integer(result))
def test_irow(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2))
result = df.irow(1)
exp = df.ix[2]
assert_series_equal(result, exp)
result = df.irow(2)
exp = df.ix[4]
assert_series_equal(result, exp)
# slice
result = df.irow(slice(4, 8))
expected = df.ix[8:14]
assert_frame_equal(result, expected)
# verify slice is view
# setting it makes it raise/warn
def f():
result[2] = 0.
self.assertRaises(com.SettingWithCopyError, f)
exp_col = df[2].copy()
exp_col[4:8] = 0.
assert_series_equal(df[2], exp_col)
# list of integers
result = df.irow([1, 2, 4, 6])
expected = df.reindex(df.index[[1, 2, 4, 6]])
assert_frame_equal(result, expected)
def test_icol(self):
df = DataFrame(np.random.randn(4, 10), columns=lrange(0, 20, 2))
result = df.icol(1)
exp = df.ix[:, 2]
assert_series_equal(result, exp)
result = df.icol(2)
exp = df.ix[:, 4]
assert_series_equal(result, exp)
# slice
result = df.icol(slice(4, 8))
expected = df.ix[:, 8:14]
assert_frame_equal(result, expected)
# verify slice is view
# and that we are setting a copy
def f():
result[8] = 0.
self.assertRaises(com.SettingWithCopyError, f)
self.assertTrue((df[8] == 0).all())
# list of integers
result = df.icol([1, 2, 4, 6])
expected = df.reindex(columns=df.columns[[1, 2, 4, 6]])
assert_frame_equal(result, expected)
def test_irow_icol_duplicates(self):
df = DataFrame(np.random.rand(3, 3), columns=list('ABC'),
index=list('aab'))
result = df.irow(0)
result2 = df.ix[0]
tm.assert_isinstance(result, Series)
assert_almost_equal(result.values, df.values[0])
assert_series_equal(result, result2)
result = df.T.icol(0)
result2 = df.T.ix[:, 0]
tm.assert_isinstance(result, Series)
assert_almost_equal(result.values, df.values[0])
assert_series_equal(result, result2)
# multiindex
df = DataFrame(np.random.randn(3, 3), columns=[['i', 'i', 'j'],
['A', 'A', 'B']],
index=[['i', 'i', 'j'], ['X', 'X', 'Y']])
rs = df.irow(0)
xp = df.ix[0]
assert_series_equal(rs, xp)
rs = df.icol(0)
xp = df.T.ix[0]
assert_series_equal(rs, xp)
rs = df.icol([0])
xp = df.ix[:, [0]]
assert_frame_equal(rs, xp)
# #2259
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1, 1, 2])
result = df.icol([0])
expected = df.take([0], axis=1)
assert_frame_equal(result, expected)
def test_icol_sparse_propegate_fill_value(self):
from pandas.sparse.api import SparseDataFrame
df = SparseDataFrame({'A': [999, 1]}, default_fill_value=999)
self.assertTrue(len(df['A'].sp_values) == len(df.icol(0).sp_values))
def test_iget_value(self):
for i, row in enumerate(self.frame.index):
for j, col in enumerate(self.frame.columns):
result = self.frame.iget_value(i, j)
expected = self.frame.get_value(row, col)
assert_almost_equal(result, expected)
def test_nested_exception(self):
# Ignore the strange way of triggering the problem
# (which may get fixed), it's just a way to trigger
# the issue or reraising an outer exception without
# a named argument
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8,
9]}).set_index(["a", "b"])
l = list(df.index)
l[0] = ["a", "b"]
df.index = l
try:
repr(df)
except Exception as e:
self.assertNotEqual(type(e), UnboundLocalError)
def test_reverse_reindex_ffill_raises(self):
dr = pd.date_range('2013-08-01', periods=6, freq='B')
data = np.random.randn(6,1)
df = pd.DataFrame(data, index=dr, columns=list('A'))
df['A'][3] = np.nan
df_rev = pd.DataFrame(data, index=dr[::-1], columns=list('A'))
# Reverse index is not 'monotonic'
self.assertRaises(ValueError, df_rev.reindex, df.index, method='pad')
self.assertRaises(ValueError, df_rev.reindex, df.index, method='ffill')
self.assertRaises(ValueError, df_rev.reindex, df.index, method='bfill')
def test_reversed_reindex_ffill_raises(self):
dr = pd.date_range('2013-08-01', periods=6, freq='B')
data = np.random.randn(6,1)
df = pd.DataFrame(data, index=dr, columns=list('A'))
df['A'][3] = np.nan
df = pd.DataFrame(data, index=dr, columns=list('A'))
# Reversed reindex is not 'monotonic'
self.assertRaises(ValueError, df.reindex, dr[::-1], method='pad')
self.assertRaises(ValueError, df.reindex, dr[::-1], method='ffill')
self.assertRaises(ValueError, df.reindex, dr[::-1], method='bfill')
def test_reindex_level(self):
from itertools import permutations
icol = ['jim', 'joe', 'jolie']
def verify_first_level(df, level, idx):
f = lambda val: np.nonzero(df[level] == val)[0]
i = np.concatenate(list(map(f, idx)))
left = df.set_index(icol).reindex(idx, level=level)
right = df.iloc[i].set_index(icol)
assert_frame_equal(left, right)
def verify(df, level, idx, indexer):
left = df.set_index(icol).reindex(idx, level=level)
right = df.iloc[indexer].set_index(icol)
assert_frame_equal(left, right)
df = pd.DataFrame({'jim':list('B' * 4 + 'A' * 2 + 'C' * 3),
'joe':list('abcdeabcd')[::-1],
'jolie':[10, 20, 30] * 3,
'joline': np.random.randint(0, 1000, 9)})
target = [['C', 'B', 'A'], ['F', 'C', 'A', 'D'], ['A'], ['D', 'F'],
['A', 'B', 'C'], ['C', 'A', 'B'], ['C', 'B'], ['C', 'A'],
['A', 'B'], ['B', 'A', 'C'], ['A', 'C', 'B']]
for idx in target:
verify_first_level(df, 'jim', idx)
verify(df, 'joe', list('abcde'), [3, 2, 1, 0, 5, 4, 8, 7, 6])
verify(df, 'joe', list('abcd'), [3, 2, 1, 0, 5, 8, 7, 6])
verify(df, 'joe', list('abc'), [3, 2, 1, 8, 7, 6])
verify(df, 'joe', list('eca'), [1, 3, 4, 6, 8])
verify(df, 'joe', list('edc'), [0, 1, 4, 5, 6])
verify(df, 'joe', list('eadbc'), [3, 0, 2, 1, 4, 5, 8, 7, 6])
verify(df, 'joe', list('edwq'), [0, 4, 5])
verify(df, 'joe', list('wq'), [])
df = DataFrame({'jim':['mid'] * 5 + ['btm'] * 8 + ['top'] * 7,
'joe':['3rd'] * 2 + ['1st'] * 3 + ['2nd'] * 3 +
['1st'] * 2 + ['3rd'] * 3 + ['1st'] * 2 +
['3rd'] * 3 + ['2nd'] * 2,
'jolie':np.random.randint(0, 1000, 20),
'joline': np.random.randn(20).round(3) * 10})
for idx in permutations(df['jim'].unique()):
for i in range(3):
verify_first_level(df, 'jim', idx[:i+1])
i = [2,3,4,0,1,8,9,5,6,7,10,11,12,13,14,18,19,15,16,17]
verify(df, 'joe', ['1st', '2nd', '3rd'], i)
i = [0,1,2,3,4,10,11,12,5,6,7,8,9,15,16,17,18,19,13,14]
verify(df, 'joe', ['3rd', '2nd', '1st'], i)
i = [0,1,5,6,7,10,11,12,18,19,15,16,17]
verify(df, 'joe', ['2nd', '3rd'], i)
i = [0,1,2,3,4,10,11,12,8,9,15,16,17,13,14]
verify(df, 'joe', ['3rd', '1st'], i)
def test_getitem_ix_float_duplicates(self):
df = pd.DataFrame(np.random.randn(3, 3),
index=[0.1, 0.2, 0.2], columns=list('abc'))
expect = df.iloc[1:]
tm.assert_frame_equal(df.loc[0.2], expect)
tm.assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[1:, 0]
tm.assert_series_equal(df.loc[0.2, 'a'], expect)
df.index = [1, 0.2, 0.2]
expect = df.iloc[1:]
tm.assert_frame_equal(df.loc[0.2], expect)
tm.assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[1:, 0]
tm.assert_series_equal(df.loc[0.2, 'a'], expect)
df = pd.DataFrame(np.random.randn(4, 3),
index=[1, 0.2, 0.2, 1], columns=list('abc'))
expect = df.iloc[1:-1]
tm.assert_frame_equal(df.loc[0.2], expect)
tm.assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[1:-1, 0]
tm.assert_series_equal(df.loc[0.2, 'a'], expect)
df.index = [0.1, 0.2, 2, 0.2]
expect = df.iloc[[1, -1]]
tm.assert_frame_equal(df.loc[0.2], expect)
tm.assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[[1, -1], 0]
tm.assert_series_equal(df.loc[0.2, 'a'], expect)
def test_setitem_with_sparse_value(self):
# GH8131
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_1': [1., 2., 3.]})
sp_series = pd.Series([0, 0, 1]).to_sparse(fill_value=0)
df['new_column'] = sp_series
tm.assert_series_equal(df['new_column'], sp_series)
def test_setitem_with_unaligned_sparse_value(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_1': [1., 2., 3.]})
sp_series = (pd.Series([0, 0, 1], index=[2, 1, 0])
.to_sparse(fill_value=0))
df['new_column'] = sp_series
tm.assert_series_equal(df['new_column'], pd.Series([1, 0, 0]))
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])
_intframe = DataFrame(dict((k, v.astype(int))
for k, v in compat.iteritems(_seriesd)))
_tsframe = DataFrame(_tsd)
_mixed_frame = _frame.copy()
_mixed_frame['foo'] = 'bar'
class SafeForSparse(object):
_multiprocess_can_split_ = True
def test_copy_index_name_checking(self):
# don't want to be able to modify the index stored elsewhere after
# making a copy
for attr in ('index', 'columns'):
ind = getattr(self.frame, attr)
ind.name = None
cp = self.frame.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.frame, attr).name)
def test_getitem_pop_assign_name(self):
s = self.frame['A']
self.assertEqual(s.name, 'A')
s = self.frame.pop('A')
self.assertEqual(s.name, 'A')
s = self.frame.ix[:, 'B']
self.assertEqual(s.name, 'B')
s2 = s.ix[:]
self.assertEqual(s2.name, 'B')
def test_get_value(self):
for idx in self.frame.index:
for col in self.frame.columns:
result = self.frame.get_value(idx, col)
expected = self.frame[col][idx]
assert_almost_equal(result, expected)
def test_join_index(self):
# left / right
f = self.frame.reindex(columns=['A', 'B'])[:10]
f2 = self.frame.reindex(columns=['C', 'D'])
joined = f.join(f2)
self.assertTrue(f.index.equals(joined.index))
self.assertEqual(len(joined.columns), 4)
joined = f.join(f2, how='left')
self.assertTrue(joined.index.equals(f.index))
self.assertEqual(len(joined.columns), 4)
joined = f.join(f2, how='right')
self.assertTrue(joined.index.equals(f2.index))
self.assertEqual(len(joined.columns), 4)
# inner
f = self.frame.reindex(columns=['A', 'B'])[:10]
f2 = self.frame.reindex(columns=['C', 'D'])
joined = f.join(f2, how='inner')
self.assertTrue(joined.index.equals(f.index.intersection(f2.index)))
self.assertEqual(len(joined.columns), 4)
# outer
f = self.frame.reindex(columns=['A', 'B'])[:10]
f2 = self.frame.reindex(columns=['C', 'D'])
joined = f.join(f2, how='outer')
self.assertTrue(tm.equalContents(self.frame.index, joined.index))
self.assertEqual(len(joined.columns), 4)
assertRaisesRegexp(ValueError, 'join method', f.join, f2, how='foo')
# corner case - overlapping columns
for how in ('outer', 'left', 'inner'):
with assertRaisesRegexp(ValueError, 'columns overlap but no suffix'):
self.frame.join(self.frame, how=how)
def test_join_index_more(self):
af = self.frame.ix[:, ['A', 'B']]
bf = self.frame.ix[::2, ['C', 'D']]
expected = af.copy()
expected['C'] = self.frame['C'][::2]
expected['D'] = self.frame['D'][::2]
result = af.join(bf)
assert_frame_equal(result, expected)
result = af.join(bf, how='right')
assert_frame_equal(result, expected[::2])
result = bf.join(af, how='right')
assert_frame_equal(result, expected.ix[:, result.columns])
def test_join_index_series(self):
df = self.frame.copy()
s = df.pop(self.frame.columns[-1])
joined = df.join(s)
assert_frame_equal(joined, self.frame, check_names=False) # TODO should this check_names ?
s.name = None
assertRaisesRegexp(ValueError, 'must have a name', df.join, s)
def test_join_overlap(self):
df1 = self.frame.ix[:, ['A', 'B', 'C']]
df2 = self.frame.ix[:, ['B', 'C', 'D']]
joined = df1.join(df2, lsuffix='_df1', rsuffix='_df2')
df1_suf = df1.ix[:, ['B', 'C']].add_suffix('_df1')
df2_suf = df2.ix[:, ['B', 'C']].add_suffix('_df2')
no_overlap = self.frame.ix[:, ['A', 'D']]
expected = df1_suf.join(df2_suf).join(no_overlap)
# column order not necessarily sorted
assert_frame_equal(joined, expected.ix[:, joined.columns])
def test_add_prefix_suffix(self):
with_prefix = self.frame.add_prefix('foo#')
expected = ['foo#%s' % c for c in self.frame.columns]
self.assert_numpy_array_equal(with_prefix.columns, expected)
with_suffix = self.frame.add_suffix('#foo')
expected = ['%s#foo' % c for c in self.frame.columns]
self.assert_numpy_array_equal(with_suffix.columns, expected)
class TestDataFrame(tm.TestCase, CheckIndexing,
SafeForSparse):
klass = DataFrame
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
# force these all to int64 to avoid platform testing issues
self.intframe = DataFrame(dict([ (c,s) for c,s in compat.iteritems(_intframe) ]), dtype = np.int64)
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.mixed_float = DataFrame({ 'A': _frame['A'].copy().astype('float32'),
'B': _frame['B'].copy().astype('float32'),
'C': _frame['C'].copy().astype('float16'),
'D': _frame['D'].copy().astype('float64') })
self.mixed_float2 = DataFrame({ 'A': _frame2['A'].copy().astype('float32'),
'B': _frame2['B'].copy().astype('float32'),
'C': _frame2['C'].copy().astype('float16'),
'D': _frame2['D'].copy().astype('float64') })
self.mixed_int = DataFrame({ 'A': _intframe['A'].copy().astype('int32'),
'B': np.ones(len(_intframe['B']),dtype='uint64'),
'C': _intframe['C'].copy().astype('uint8'),
'D': _intframe['D'].copy().astype('int64') })
self.all_mixed = DataFrame({'a': 1., 'b': 2, 'c': 'foo', 'float32' : np.array([1.]*10,dtype='float32'),
'int32' : np.array([1]*10,dtype='int32'),
}, index=np.arange(10))
self.ts1 = tm.makeTimeSeries()
self.ts2 = tm.makeTimeSeries()[5:]
self.ts3 = tm.makeTimeSeries()[-5:]
self.ts4 = tm.makeTimeSeries()[1:-1]
self.ts_dict = {
'col1': self.ts1,
'col2': self.ts2,
'col3': self.ts3,
'col4': self.ts4,
}
self.empty = DataFrame({})
arr = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]])
self.simple = DataFrame(arr, columns=['one', 'two', 'three'],
index=['a', 'b', 'c'])
def test_get_axis(self):
f = self.frame
self.assertEqual(f._get_axis_number(0), 0)
self.assertEqual(f._get_axis_number(1), 1)
self.assertEqual(f._get_axis_number('index'), 0)
self.assertEqual(f._get_axis_number('rows'), 0)
self.assertEqual(f._get_axis_number('columns'), 1)
self.assertEqual(f._get_axis_name(0), 'index')
self.assertEqual(f._get_axis_name(1), 'columns')
self.assertEqual(f._get_axis_name('index'), 'index')
self.assertEqual(f._get_axis_name('rows'), 'index')
self.assertEqual(f._get_axis_name('columns'), 'columns')
self.assertIs(f._get_axis(0), f.index)
self.assertIs(f._get_axis(1), f.columns)
assertRaisesRegexp(ValueError, 'No axis named', f._get_axis_number, 2)
assertRaisesRegexp(ValueError, 'No axis.*foo', f._get_axis_name, 'foo')
assertRaisesRegexp(ValueError, 'No axis.*None', f._get_axis_name, None)
assertRaisesRegexp(ValueError, 'No axis named', f._get_axis_number, None)
def test_set_index(self):
idx = Index(np.arange(len(self.mixed_frame)))
# cache it
_ = self.mixed_frame['foo']
self.mixed_frame.index = idx
self.assertIs(self.mixed_frame['foo'].index, idx)
with assertRaisesRegexp(ValueError, 'Length mismatch'):
self.mixed_frame.index = idx[::2]
def test_set_index_cast(self):
# issue casting an index then set_index
df = DataFrame({'A' : [1.1,2.2,3.3], 'B' : [5.0,6.1,7.2]},
index = [2010,2011,2012])
expected = df.ix[2010]
new_index = df.index.astype(np.int32)
df.index = new_index
result = df.ix[2010]
assert_series_equal(result,expected)
def test_set_index2(self):
df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'],
'B': ['one', 'two', 'three', 'one', 'two'],
'C': ['a', 'b', 'c', 'd', 'e'],
'D': np.random.randn(5),
'E': np.random.randn(5)})
# new object, single-column
result = df.set_index('C')
result_nodrop = df.set_index('C', drop=False)
index = Index(df['C'], name='C')
expected = df.ix[:, ['A', 'B', 'D', 'E']]
expected.index = index
expected_nodrop = df.copy()
expected_nodrop.index = index
assert_frame_equal(result, expected)
assert_frame_equal(result_nodrop, expected_nodrop)
self.assertEqual(result.index.name, index.name)
# inplace, single
df2 = df.copy()
df2.set_index('C', inplace=True)
assert_frame_equal(df2, expected)
df3 = df.copy()
df3.set_index('C', drop=False, inplace=True)
assert_frame_equal(df3, expected_nodrop)
# create new object, multi-column
result = df.set_index(['A', 'B'])
result_nodrop = df.set_index(['A', 'B'], drop=False)
index = MultiIndex.from_arrays([df['A'], df['B']], names=['A', 'B'])
expected = df.ix[:, ['C', 'D', 'E']]
expected.index = index
expected_nodrop = df.copy()
expected_nodrop.index = index
assert_frame_equal(result, expected)
assert_frame_equal(result_nodrop, expected_nodrop)
self.assertEqual(result.index.names, index.names)
# inplace
df2 = df.copy()
df2.set_index(['A', 'B'], inplace=True)
assert_frame_equal(df2, expected)
df3 = df.copy()
df3.set_index(['A', 'B'], drop=False, inplace=True)
assert_frame_equal(df3, expected_nodrop)
# corner case
with assertRaisesRegexp(ValueError, 'Index has duplicate keys'):
df.set_index('A', verify_integrity=True)
# append
result = df.set_index(['A', 'B'], append=True)
xp = df.reset_index().set_index(['index', 'A', 'B'])
xp.index.names = [None, 'A', 'B']
assert_frame_equal(result, xp)
# append to existing multiindex
rdf = df.set_index(['A'], append=True)
rdf = rdf.set_index(['B', 'C'], append=True)
expected = df.set_index(['A', 'B', 'C'], append=True)
assert_frame_equal(rdf, expected)
# Series
result = df.set_index(df.C)
self.assertEqual(result.index.name, 'C')
def test_set_index_nonuniq(self):
df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'],
'B': ['one', 'two', 'three', 'one', 'two'],
'C': ['a', 'b', 'c', 'd', 'e'],
'D': np.random.randn(5),
'E': np.random.randn(5)})
with assertRaisesRegexp(ValueError, 'Index has duplicate keys'):
df.set_index('A', verify_integrity=True, inplace=True)
self.assertIn('A', df)
def test_set_index_bug(self):
# GH1590
df = DataFrame({'val': [0, 1, 2], 'key': ['a', 'b', 'c']})
df2 = df.select(lambda indx: indx >= 1)
rs = df2.set_index('key')
xp = DataFrame({'val': [1, 2]},
Index(['b', 'c'], name='key'))
assert_frame_equal(rs, xp)
def test_set_index_pass_arrays(self):
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
# multiple columns
result = df.set_index(['A', df['B'].values], drop=False)
expected = df.set_index(['A', 'B'], drop=False)
assert_frame_equal(result, expected, check_names=False) # TODO should set_index check_names ?
def test_set_index_cast_datetimeindex(self):
df = DataFrame({'A': [datetime(2000, 1, 1) + timedelta(i)
for i in range(1000)],
'B': np.random.randn(1000)})
idf = df.set_index('A')
tm.assert_isinstance(idf.index, DatetimeIndex)
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
i = pd.DatetimeIndex(pd.tseries.tools.to_datetime(['2013-1-1 13:00','2013-1-2 14:00'], errors="raise")).tz_localize('US/Pacific')
df = DataFrame(np.random.randn(2,1),columns=['A'])
expected = Series(np.array([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),
pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Pacific')], dtype="object"))
# convert index to series
result = Series(i)
assert_series_equal(result, expected)
# assignt to frame
df['B'] = i
result = df['B']
assert_series_equal(result, expected)
# keep the timezone
result = i.to_series(keep_tz=True)
assert_series_equal(result.reset_index(drop=True), expected)
# convert to utc
df['C'] = i.to_series().reset_index(drop=True)
result = df['C']
comp = DatetimeIndex(expected.values).copy()
comp.tz = None
self.assert_numpy_array_equal(result.values, comp.values)
# list of datetimes with a tz
df['D'] = i.to_pydatetime()
result = df['D']
assert_series_equal(result, expected)
# GH 6785
# set the index manually
import pytz
df = DataFrame([{'ts':datetime(2014, 4, 1, tzinfo=pytz.utc), 'foo':1}])
expected = df.set_index('ts')
df.index = df['ts']
df.pop('ts')
assert_frame_equal(df, expected)
# GH 3950
# reset_index with single level
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern']:
idx = pd.date_range('1/1/2011', periods=5, freq='D', tz=tz, name='idx')
df = pd.DataFrame({'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']}, index=idx)
expected = pd.DataFrame({'idx': [datetime(2011, 1, 1), datetime(2011, 1, 2),
datetime(2011, 1, 3), datetime(2011, 1, 4),
datetime(2011, 1, 5)],
'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']},
columns=['idx', 'a', 'b'])
expected['idx'] = expected['idx'].apply(lambda d: pd.Timestamp(d, tz=tz))
assert_frame_equal(df.reset_index(), expected)
def test_set_index_multiindexcolumns(self):
columns = MultiIndex.from_tuples([('foo', 1), ('foo', 2), ('bar', 1)])
df = DataFrame(np.random.randn(3, 3), columns=columns)
rs = df.set_index(df.columns[0])
xp = df.ix[:, 1:]
xp.index = df.ix[:, 0].values
xp.index.names = [df.columns[0]]
assert_frame_equal(rs, xp)
def test_set_index_empty_column(self):
# #1971
df = DataFrame([
dict(a=1, p=0),
dict(a=2, m=10),
dict(a=3, m=11, p=20),
dict(a=4, m=12, p=21)
], columns=('a', 'm', 'p', 'x'))
# it works!
result = df.set_index(['a', 'x'])
repr(result)
def test_set_columns(self):
cols = Index(np.arange(len(self.mixed_frame.columns)))
self.mixed_frame.columns = cols
with assertRaisesRegexp(ValueError, 'Length mismatch'):
self.mixed_frame.columns = cols[::2]
def test_keys(self):
getkeys = self.frame.keys
self.assertIs(getkeys(), self.frame.columns)
def test_column_contains_typeerror(self):
try:
self.frame.columns in self.frame
except TypeError:
pass
def test_constructor(self):
df = DataFrame()
self.assertEqual(len(df.index), 0)
df = DataFrame(data={})
self.assertEqual(len(df.index), 0)
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
indexed_frame = DataFrame(data, index=index)
unindexed_frame = DataFrame(data)
self.assertEqual(self.mixed_frame['foo'].dtype, np.object_)
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
self.assertEqual(foo['a'].dtype, object)
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4,2)))
# this is ok
df['foo'] = np.ones((4,2)).tolist()
# this is not ok
self.assertRaises(ValueError, df.__setitem__, tuple(['test']), np.ones((4,2)))
# this is ok
df['foo2'] = np.ones((4,2)).tolist()
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
self.assertEqual(df.values[0, 0], 99)
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
self.assertEqual(df.values[0, 0], 97)
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
self.assertIsNone(df.ix[1, 0])
self.assertEqual(df.ix[0, 1], '2')
def test_constructor_list_frames(self):
# GH 3243
result = DataFrame([DataFrame([])])
self.assertEqual(result.shape, (1,0))
result = DataFrame([DataFrame(dict(A = lrange(5)))])
tm.assert_isinstance(result.iloc[0,0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad = None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [ np.array(np.random.rand(10), dtype = d) for d in dtypes ]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [ np.array(np.random.randint(10, size=10), dtype = d) for d in dtypes ]
zipper = lzip(dtypes,arrays)
for d,a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update(dict([ (d,a) for d,a in zipper ]))
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes = None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A = 1, B = 'foo', C = 'bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
# Assigning causes segfault in NumPy < 1.5.1
# rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
self.assert_numpy_array_equal(df.columns, rec.dtype.names)
df2 = DataFrame(rec, index=index)
self.assert_numpy_array_equal(df2.columns, rec.dtype.names)
self.assertTrue(df2.index.equals(index))
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
self.assertEqual(df.values.dtype, np.bool_)
def test_constructor_overflow_int64(self):
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
self.assertEqual(result['a'].dtype, object)
# #2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45), (long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
self.assertEqual(df_crawls['uid'].dtype, object)
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
self.assertEqual(expected, list(df.columns))
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
tm.assert_dict_equal(self.ts1, frame['col1'], compare_keys=False)
tm.assert_dict_equal(self.ts2, frame['col2'], compare_keys=False)
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
self.assertEqual(len(frame), len(self.ts2))
self.assertNotIn('col1', frame)
self.assertTrue(isnull(frame['col3']).all())
# Corner cases
self.assertEqual(len(DataFrame({})), 0)
# mix dict and array, wrong size - no spec for which error should raise
# first
with tm.assertRaises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
self.assert_numpy_array_equal(frame.index, ['1', '2'])
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
self.assertIs(frame.index, idx)
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
self.assertIs(frame.index, idx)
self.assertIs(frame.columns, idx)
self.assertEqual(len(frame._series), 3)
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
self.assertTrue(frame.index.equals(Index([])))
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi,columns=mi)
self.assertTrue(pd.isnull(df).values.ravel().all())
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi,columns=mi)
self.assertTrue(pd.isnull(df).values.ravel().all())
def test_constructor_error_msgs(self):
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with assertRaisesRegexp(ValueError, msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = "Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with assertRaisesRegexp(ValueError, msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=date_range('2000-01-01', periods=3))
# higher dim raise exception
with assertRaisesRegexp(ValueError, 'Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
with assertRaisesRegexp(ValueError, "Shape of passed values is \(3, 2\), indices imply \(3, 1\)"):
DataFrame(np.random.rand(2,3), columns=['A', 'B', 'C'], index=[1])
with assertRaisesRegexp(ValueError, "Shape of passed values is \(3, 2\), indices imply \(2, 2\)"):
DataFrame(np.random.rand(2,3), columns=['A', 'B'], index=[1, 2])
with assertRaisesRegexp(ValueError, 'If using all scalar values, you must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a':[1, 2, 3], 'b':[3, 4, 5]})
df2 = DataFrame([df1, df1+10])
df2.dtypes
str(df2)
result = df2.loc[0,0]
assert_frame_equal(result,df1)
result = df2.loc[1,0]
assert_frame_equal(result,df1+10)
def test_insert_error_msmgs(self):
# GH 7432
df = DataFrame({'foo':['a', 'b', 'c'], 'bar':[1,2,3], 'baz':['d','e','f']}).set_index('foo')
s = DataFrame({'foo':['a', 'b', 'c', 'a'], 'fiz':['g','h','i','j']}).set_index('foo')
msg = 'cannot reindex from a duplicate axis'
with assertRaisesRegexp(ValueError, msg):
df['newcol'] = s
# GH 4107, more descriptive error message
df = DataFrame(np.random.randint(0,2,(4,4)),
columns=['a', 'b', 'c', 'd'])
msg = 'incompatible index of inserted column with frame index'
with assertRaisesRegexp(TypeError, msg):
df['gr'] = df.groupby(['b', 'c']).count()
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame(dict((col, dict(compat.iteritems(val)))
for col, val in compat.iteritems(data)))
assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = [[4., 3., 2., 1.]]
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
assert_almost_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
self.assertEqual(len(frame), 3)
self.assertEqual(frame['B'].dtype, np.float64)
self.assertEqual(frame['A'].dtype, np.float64)
frame = DataFrame(test_data)
self.assertEqual(len(frame), 3)
self.assertEqual(frame['B'].dtype, np.object_)
self.assertEqual(frame['A'].dtype, np.float64)
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
self.assertEqual(len(frame), 20)
self.assertEqual(frame['A'].dtype, np.object_)
self.assertEqual(frame['B'].dtype, np.float64)
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
tm.assert_isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
tm.assert_isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame(dict((k, list(v)) for k, v in compat.iteritems(data)))
assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
check = lambda result, expected: tm.assert_frame_equal(
result, expected, check_dtype=True, check_index_type=True,
check_column_type=True, check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def _check_basic_constructor(self, empty):
"mat: 2d matrix with shpae (3, 2) to input. empty - makes sized objects"
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(len(frame.index), 2)
self.assertEqual(len(frame.columns), 3)
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
self.assertEqual(len(frame.index), 3)
self.assertEqual(len(frame.columns), 1)
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
self.assertEqual(frame.values.dtype, np.int64)
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with assertRaisesRegexp(ValueError, msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with assertRaisesRegexp(ValueError, msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with assertRaisesRegexp(ValueError, 'Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
self.assert_numpy_array_equal(frame.index, lrange(2))
self.assert_numpy_array_equal(frame.columns, lrange(3))
frame = DataFrame(mat, index=[1, 2])
self.assert_numpy_array_equal(frame.columns, lrange(3))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
self.assert_numpy_array_equal(frame.index, lrange(2))
# 0-length axis
frame = DataFrame(empty((0, 3)))
self.assertEqual(len(frame.index), 0)
frame = DataFrame(empty((3, 0)))
self.assertEqual(len(frame.columns), 0)
def test_constructor_ndarray(self):
mat = np.zeros((2, 3), dtype=float)
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
self.assertEqual(len(frame), 2)
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(1.0, frame['A'][1])
self.assertEqual(2.0, frame['C'][2])
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertTrue(np.all(~np.asarray(frame == frame)))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(len(frame.index), 2)
self.assertEqual(len(frame.columns), 3)
self.assertTrue(np.all(~np.asarray(frame == frame)))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
self.assertEqual(frame.values.dtype, np.float64)
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(1, frame['A'][1])
self.assertEqual(2, frame['C'][2])
# masked np.datetime64 stays (use lib.NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(len(frame.index), 2)
self.assertEqual(len(frame.columns), 3)
self.assertTrue(isnull(frame).values.all())
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
self.assertEqual(frame.values.dtype, np.int64)
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(1, frame['A'].view('i8')[1])
self.assertEqual(2, frame['C'].view('i8')[2])
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(len(frame.index), 2)
self.assertEqual(len(frame.columns), 3)
self.assertTrue(np.all(~np.asarray(frame == frame)))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
self.assertEqual(frame.values.dtype, object)
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
self.assertEqual(True, frame['A'][1])
self.assertEqual(False, frame['C'][2])
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = dict([ (k, v.filled()) if hasattr(v,'filled') else (k, v) for k, v in comb ])
expected = DataFrame(comb,columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result,expected)
# specify columns
expected = DataFrame(comb,columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result,expected)
# specify index
expected = DataFrame(comb,columns=names,index=[1,2])
result = DataFrame(mrecs, index=[1,2])
assert_fr_equal(result,expected)
def test_constructor_corner(self):
df = DataFrame(index=[])
self.assertEqual(df.values.shape, (0, 0))
# empty but with specified dtype
df = DataFrame(index=lrange(10), columns=['a', 'b'], dtype=object)
self.assertEqual(df.values.dtype, np.object_)
# does not error but ends up float
df = DataFrame(index=lrange(10), columns=['a', 'b'], dtype=int)
self.assertEqual(df.values.dtype, np.object_)
# #1783 empty dtype object
df = DataFrame({}, columns=['foo', 'bar'])
self.assertEqual(df.values.dtype, np.object_)
df = DataFrame({'b': 1}, index=lrange(10), columns=list('abc'),
dtype=int)
self.assertEqual(df.values.dtype, np.object_)
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
self.assertEqual(df['int'].dtype, np.int64)
self.assertEqual(df['bool'].dtype, np.bool_)
self.assertEqual(df['float'].dtype, np.float64)
self.assertEqual(df['complex'].dtype, np.complex128)
self.assertEqual(df['object'].dtype, np.object_)
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
assert_frame_equal(df, exp)
with tm.assertRaisesRegexp(ValueError, 'must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
self.assertEqual(df_casted.values.dtype, np.int64)
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
self.assertEqual(dm.values.ndim, 2)
arr = randn(0)
dm = DataFrame(arr)
self.assertEqual(dm.values.ndim, 2)
self.assertEqual(dm.values.ndim, 2)
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
self.assertEqual(dm.values.shape, (10, 2))
dm = DataFrame(columns=['A', 'B'])
self.assertEqual(dm.values.shape, (0, 2))
dm = DataFrame(index=np.arange(10))
self.assertEqual(dm.values.shape, (10, 0))
# corner, silly
# TODO: Fix this Exception to be better...
with assertRaisesRegexp(PandasError, 'constructor not properly called'):
DataFrame((1, 2, 3))
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with assertRaisesRegexp(ValueError, 'cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
self.assertEqual(len(dm.columns), 2)
self.assertEqual(dm.values.dtype, np.float64)
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
l = [[1, 'a'], [2, 'b']]
df = DataFrame(data=l, columns=["num", "str"])
self.assertTrue(com.is_integer_dtype(df['num']))
self.assertEqual(df['str'].dtype, np.object_)
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({ 0: range(10) })
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
import collections
class DummyContainer(collections.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
l = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(l, columns=columns)
expected = DataFrame([[1,'a'],[2,'b']],columns=columns)
assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame.from_items([('A', array.array('i', range(10)))])
expected = DataFrame({ 'A' : list(range(10)) })
assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([ list(range(10)), list(range(10)) ])
result = DataFrame([ array.array('i', range(10)), array.array('i',range(10)) ])
assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterator(self):
expected = DataFrame([ list(range(10)), list(range(10)) ])
result = DataFrame([ range(10), range(10) ])
assert_frame_equal(result, expected)
def test_constructor_generator(self):
#related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([ list(range(10)), list(range(10)) ])
result = DataFrame([ gen1, gen2 ])
assert_frame_equal(result, expected)
gen = ([ i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({ 0 : range(10), 1 : 'a' })
assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(['a', 'b', 'c'])
data2 = [Series([1.5, 3, 4], idx, dtype='O'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {'a': 1.5, 'b': 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {'A': randn(10),
'B': randn(8)}
with assertRaisesRegexp(ValueError, 'arrays must all be same length'):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(lrange(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self):
df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}
data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])
result = DataFrame(data)
self.assertTrue(result.index.is_monotonic)
# ordering ambiguous, raise exception
with assertRaisesRegexp(ValueError, 'ambiguous ordering'):
DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})
# this is OK though
result = DataFrame({'A': ['a', 'b'],
'B': Series(['a', 'b'], index=['a', 'b'])})
expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},
index=['a', 'b'])
assert_frame_equal(result, expected)
def test_constructor_tuples(self):
result = DataFrame({'A': [(1, 2), (3, 4)]})
expected = DataFrame({'A': Series([(1, 2), (3, 4)])})
assert_frame_equal(result, expected)
def test_constructor_orient(self):
data_dict = self.mixed_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
expected = self.mixed_frame.sort_index()
assert_frame_equal(recons, expected)
# dict of sequence
a = {'hi': [32, 3, 3],
'there': [3, 5, 3]}
rs = DataFrame.from_dict(a, orient='index')
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
assert_frame_equal(rs, xp)
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
df = DataFrame(a)
self.assertEqual(df.columns[0], 'x')
self.assertTrue(df.index.equals(a.index))
# ndarray like
arr = np.random.randn(10)
s = Series(arr,name='x')
df = DataFrame(s)
expected = DataFrame(dict(x = s))
assert_frame_equal(df,expected)
s = Series(arr,index=range(3,13))
df = DataFrame(s)
expected = DataFrame({ 0 : s })
assert_frame_equal(df,expected)
self.assertRaises(ValueError, DataFrame, s, columns=[1,2])
# #2234
a = Series([], name='x')
df = DataFrame(a)
self.assertEqual(df.columns[0], 'x')
# series with name and w/o
s1 = Series(arr,name='x')
df = DataFrame([s1, arr]).T
expected = DataFrame({ 'x' : s1, 'Unnamed 0' : arr },columns=['x','Unnamed 0'])
assert_frame_equal(df,expected)
# this is a bit non-intuitive here; the series collapse down to arrays
df = DataFrame([arr, s1]).T
expected = DataFrame({ 1 : s1, 0 : arr },columns=[0,1])
assert_frame_equal(df,expected)
def test_constructor_Series_differently_indexed(self):
# name
s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
# no name
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
other_index = Index(['a', 'b'])
df1 = DataFrame(s1, index=other_index)
exp1 = DataFrame(s1.reindex(other_index))
self.assertEqual(df1.columns[0], 'x')
assert_frame_equal(df1, exp1)
df2 = DataFrame(s2, index=other_index)
exp2 = DataFrame(s2.reindex(other_index))
self.assertEqual(df2.columns[0], 0)
self.assertTrue(df2.index.equals(other_index))
assert_frame_equal(df2, exp2)
def test_constructor_manager_resize(self):
index = list(self.frame.index[:5])
columns = list(self.frame.columns[:3])
result = DataFrame(self.frame._data, index=index,
columns=columns)
self.assert_numpy_array_equal(result.index, index)
self.assert_numpy_array_equal(result.columns, columns)
def test_constructor_from_items(self):
items = [(c, self.frame[c]) for c in self.frame.columns]
recons = DataFrame.from_items(items)
assert_frame_equal(recons, self.frame)
# pass some columns
recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])
assert_frame_equal(recons, self.frame.ix[:, ['C', 'B', 'A']])
# orient='index'
row_items = [(idx, self.mixed_frame.xs(idx))
for idx in self.mixed_frame.index]
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
assert_frame_equal(recons, self.mixed_frame)
self.assertEqual(recons['A'].dtype, np.float64)
with tm.assertRaisesRegexp(TypeError,
"Must pass columns with orient='index'"):
DataFrame.from_items(row_items, orient='index')
# orient='index', but thar be tuples
arr = lib.list_to_object_array(
[('bar', 'baz')] * len(self.mixed_frame))
self.mixed_frame['foo'] = arr
row_items = [(idx, list(self.mixed_frame.xs(idx)))
for idx in self.mixed_frame.index]
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
assert_frame_equal(recons, self.mixed_frame)
tm.assert_isinstance(recons['foo'][0], tuple)
rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
orient='index', columns=['one', 'two', 'three'])
xp = DataFrame([[1, 2, 3], [4, 5, 6]], index=['A', 'B'],
columns=['one', 'two', 'three'])
assert_frame_equal(rs, xp)
def test_constructor_mix_series_nonseries(self):
df = DataFrame({'A': self.frame['A'],
'B': list(self.frame['B'])}, columns=['A', 'B'])
assert_frame_equal(df, self.frame.ix[:, ['A', 'B']])
with tm.assertRaisesRegexp(ValueError, 'does not match index length'):
DataFrame({'A': self.frame['A'], 'B': list(self.frame['B'])[:-2]})
def test_constructor_miscast_na_int_dtype(self):
df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)
expected = DataFrame([[np.nan, 1], [1, 0]])
assert_frame_equal(df, expected)
def test_constructor_iterator_failure(self):
with assertRaisesRegexp(TypeError, 'iterator'):
df = DataFrame(iter([1, 2, 3]))
def test_constructor_column_duplicates(self):
# it works! #2079
df = DataFrame([[8, 5]], columns=['a', 'a'])
edf = DataFrame([[8, 5]])
edf.columns = ['a', 'a']
assert_frame_equal(df, edf)
idf = DataFrame.from_items(
[('a', [8]), ('a', [5])], columns=['a', 'a'])
assert_frame_equal(idf, edf)
self.assertRaises(ValueError, DataFrame.from_items,
[('a', [8]), ('a', [5]), ('b', [6])],
columns=['b', 'a', 'a'])
def test_column_dups_operations(self):
def check(result, expected=None):
if expected is not None:
assert_frame_equal(result,expected)
result.dtypes
str(result)
# assignment
# GH 3687
arr = np.random.randn(3, 2)
idx = lrange(2)
df = DataFrame(arr, columns=['A', 'A'])
df.columns = idx
expected = DataFrame(arr,columns=idx)
check(df,expected)
idx = date_range('20130101',periods=4,freq='Q-NOV')
df = DataFrame([[1,1,1,5],[1,1,2,5],[2,1,3,5]],columns=['a','a','a','a'])
df.columns = idx
expected = DataFrame([[1,1,1,5],[1,1,2,5],[2,1,3,5]],columns=idx)
check(df,expected)
# insert
df = DataFrame([[1,1,1,5],[1,1,2,5],[2,1,3,5]],columns=['foo','bar','foo','hello'])
df['string'] = 'bah'
expected = DataFrame([[1,1,1,5,'bah'],[1,1,2,5,'bah'],[2,1,3,5,'bah']],columns=['foo','bar','foo','hello','string'])
check(df,expected)
with assertRaisesRegexp(ValueError, 'Length of value'):
df.insert(0, 'AnotherColumn', range(len(df.index) - 1))
# insert same dtype
df['foo2'] = 3
expected = DataFrame([[1,1,1,5,'bah',3],[1,1,2,5,'bah',3],[2,1,3,5,'bah',3]],columns=['foo','bar','foo','hello','string','foo2'])
check(df,expected)
# set (non-dup)
df['foo2'] = 4
expected = DataFrame([[1,1,1,5,'bah',4],[1,1,2,5,'bah',4],[2,1,3,5,'bah',4]],columns=['foo','bar','foo','hello','string','foo2'])
check(df,expected)
df['foo2'] = 3
# delete (non dup)
del df['bar']
expected = DataFrame([[1,1,5,'bah',3],[1,2,5,'bah',3],[2,3,5,'bah',3]],columns=['foo','foo','hello','string','foo2'])
check(df,expected)
# try to delete again (its not consolidated)
del df['hello']
expected = DataFrame([[1,1,'bah',3],[1,2,'bah',3],[2,3,'bah',3]],columns=['foo','foo','string','foo2'])
check(df,expected)
# consolidate
df = df.consolidate()
expected = DataFrame([[1,1,'bah',3],[1,2,'bah',3],[2,3,'bah',3]],columns=['foo','foo','string','foo2'])
check(df,expected)
# insert
df.insert(2,'new_col',5.)
expected = DataFrame([[1,1,5.,'bah',3],[1,2,5.,'bah',3],[2,3,5.,'bah',3]],columns=['foo','foo','new_col','string','foo2'])
check(df,expected)
# insert a dup
assertRaisesRegexp(ValueError, 'cannot insert', df.insert, 2, 'new_col', 4.)
df.insert(2,'new_col',4.,allow_duplicates=True)
expected = DataFrame([[1,1,4.,5.,'bah',3],[1,2,4.,5.,'bah',3],[2,3,4.,5.,'bah',3]],columns=['foo','foo','new_col','new_col','string','foo2'])
check(df,expected)
# delete (dup)
del df['foo']
expected = DataFrame([[4.,5.,'bah',3],[4.,5.,'bah',3],[4.,5.,'bah',3]],columns=['new_col','new_col','string','foo2'])
assert_frame_equal(df,expected)
# dup across dtypes
df = DataFrame([[1,1,1.,5],[1,1,2.,5],[2,1,3.,5]],columns=['foo','bar','foo','hello'])
check(df)
df['foo2'] = 7.
expected = DataFrame([[1,1,1.,5,7.],[1,1,2.,5,7.],[2,1,3.,5,7.]],columns=['foo','bar','foo','hello','foo2'])
check(df,expected)
result = df['foo']
expected = DataFrame([[1,1.],[1,2.],[2,3.]],columns=['foo','foo'])
check(result,expected)
# multiple replacements
df['foo'] = 'string'
expected = DataFrame([['string',1,'string',5,7.],['string',1,'string',5,7.],['string',1,'string',5,7.]],columns=['foo','bar','foo','hello','foo2'])
check(df,expected)
del df['foo']
expected = DataFrame([[1,5,7.],[1,5,7.],[1,5,7.]],columns=['bar','hello','foo2'])
check(df,expected)
# values
df = DataFrame([[1,2.5],[3,4.5]], index=[1,2], columns=['x','x'])
result = df.values
expected = np.array([[1,2.5],[3,4.5]])
self.assertTrue((result == expected).all().all())
# rename, GH 4403
df4 = DataFrame({'TClose': [22.02],
'RT': [0.0454],
'TExg': [0.0422]},
index=MultiIndex.from_tuples([(600809, 20130331)], names=['STK_ID', 'RPT_Date']))
df5 = DataFrame({'STK_ID': [600809] * 3,
'RPT_Date': [20120930,20121231,20130331],
'STK_Name': [u('饡驦'), u('饡驦'), u('饡驦')],
'TClose': [38.05, 41.66, 30.01]},
index=MultiIndex.from_tuples([(600809, 20120930), (600809, 20121231),(600809,20130331)], names=['STK_ID', 'RPT_Date']))
k = pd.merge(df4,df5,how='inner',left_index=True,right_index=True)
result = k.rename(columns={'TClose_x':'TClose', 'TClose_y':'QT_Close'})
str(result)
result.dtypes
expected = DataFrame([[0.0454, 22.02, 0.0422, 20130331, 600809, u('饡驦'), 30.01 ]],
columns=['RT','TClose','TExg','RPT_Date','STK_ID','STK_Name','QT_Close']).set_index(['STK_ID','RPT_Date'],drop=False)
assert_frame_equal(result,expected)
# reindex is invalid!
df = DataFrame([[1,5,7.],[1,5,7.],[1,5,7.]],columns=['bar','a','a'])
self.assertRaises(ValueError, df.reindex, columns=['bar'])
self.assertRaises(ValueError, df.reindex, columns=['bar','foo'])
# drop
df = DataFrame([[1,5,7.],[1,5,7.],[1,5,7.]],columns=['bar','a','a'])
result = df.drop(['a'],axis=1)
expected = DataFrame([[1],[1],[1]],columns=['bar'])
check(result,expected)
result = df.drop('a',axis=1)
check(result,expected)
# describe
df = DataFrame([[1,1,1],[2,2,2],[3,3,3]],columns=['bar','a','a'],dtype='float64')
result = df.describe()
s = df.iloc[:,0].describe()
expected = pd.concat([ s, s, s],keys=df.columns,axis=1)
check(result,expected)
# check column dups with index equal and not equal to df's index
df = DataFrame(np.random.randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],
columns=['A', 'B', 'A'])
for index in [df.index, pd.Index(list('edcba'))]:
this_df = df.copy()
expected_ser = pd.Series(index.values, index=this_df.index)
expected_df = DataFrame.from_items([('A', expected_ser),
('B', this_df['B']),
('A', expected_ser)])
this_df['A'] = index
check(this_df, expected_df)
# operations
for op in ['__add__','__mul__','__sub__','__truediv__']:
df = DataFrame(dict(A = np.arange(10), B = np.random.rand(10)))
expected = getattr(df,op)(df)
expected.columns = ['A','A']
df.columns = ['A','A']
result = getattr(df,op)(df)
check(result,expected)
# multiple assignments that change dtypes
# the location indexer is a slice
# GH 6120
df = DataFrame(np.random.randn(5,2), columns=['that', 'that'])
expected = DataFrame(1.0, index=range(5), columns=['that', 'that'])
df['that'] = 1.0
check(df, expected)
df = DataFrame(np.random.rand(5,2), columns=['that', 'that'])
expected = DataFrame(1, index=range(5), columns=['that', 'that'])
df['that'] = 1
check(df, expected)
def test_column_dups2(self):
# drop buggy GH 6240
df = DataFrame({'A' : np.random.randn(5),
'B' : np.random.randn(5),
'C' : np.random.randn(5),
'D' : ['a','b','c','d','e'] })
expected = df.take([0,1,1], axis=1)
df2 = df.take([2,0,1,2,1], axis=1)
result = df2.drop('C',axis=1)
assert_frame_equal(result, expected)
# dropna
df = DataFrame({'A' : np.random.randn(5),
'B' : np.random.randn(5),
'C' : np.random.randn(5),
'D' : ['a','b','c','d','e'] })
df.iloc[2,[0,1,2]] = np.nan
df.iloc[0,0] = np.nan
df.iloc[1,1] = np.nan
df.iloc[:,3] = np.nan
expected = df.dropna(subset=['A','B','C'],how='all')
expected.columns = ['A','A','B','C']
df.columns = ['A','A','B','C']
result = df.dropna(subset=['A','C'],how='all')
assert_frame_equal(result, expected)
def test_column_dups_indexing(self):
def check(result, expected=None):
if expected is not None:
assert_frame_equal(result,expected)
result.dtypes
str(result)
# boolean indexing
# GH 4879
dups = ['A', 'A', 'C', 'D']
df = DataFrame(np.arange(12).reshape(3,4), columns=['A', 'B', 'C', 'D'],dtype='float64')
expected = df[df.C > 6]
expected.columns = dups
df = DataFrame(np.arange(12).reshape(3,4), columns=dups,dtype='float64')
result = df[df.C > 6]
check(result,expected)
# where
df = DataFrame(np.arange(12).reshape(3,4), columns=['A', 'B', 'C', 'D'],dtype='float64')
expected = df[df > 6]
expected.columns = dups
df = DataFrame(np.arange(12).reshape(3,4), columns=dups,dtype='float64')
result = df[df > 6]
check(result,expected)
# boolean with the duplicate raises
df = DataFrame(np.arange(12).reshape(3,4), columns=dups,dtype='float64')
self.assertRaises(ValueError, lambda : df[df.A > 6])
# dup aligining operations should work
# GH 5185
df1 = DataFrame([1, 2, 3, 4, 5], index=[1, 2, 1, 2, 3])
df2 = DataFrame([1, 2, 3], index=[1, 2, 3])
expected = DataFrame([0,2,0,2,2],index=[1,1,2,2,3])
result = df1.sub(df2)
assert_frame_equal(result,expected)
# equality
df1 = DataFrame([[1,2],[2,np.nan],[3,4],[4,4]],columns=['A','B'])
df2 = DataFrame([[0,1],[2,4],[2,np.nan],[4,5]],columns=['A','A'])
# not-comparing like-labelled
self.assertRaises(ValueError, lambda : df1 == df2)
df1r = df1.reindex_like(df2)
result = df1r == df2
expected = DataFrame([[False,True],[True,False],[False,False],[True,False]],columns=['A','A'])
assert_frame_equal(result,expected)
# mixed column selection
# GH 5639
dfbool = DataFrame({'one' : Series([True, True, False], index=['a', 'b', 'c']),
'two' : Series([False, False, True, False], index=['a', 'b', 'c', 'd']),
'three': Series([False, True, True, True], index=['a', 'b', 'c', 'd'])})
expected = pd.concat([dfbool['one'],dfbool['three'],dfbool['one']],axis=1)
result = dfbool[['one', 'three', 'one']]
check(result,expected)
# multi-axis dups
# GH 6121
df = DataFrame(np.arange(25.).reshape(5,5),
index=['a', 'b', 'c', 'd', 'e'],
columns=['A', 'B', 'C', 'D', 'E'])
z = df[['A', 'C', 'A']].copy()
expected = z.ix[['a', 'c', 'a']]
df = DataFrame(np.arange(25.).reshape(5,5),
index=['a', 'b', 'c', 'd', 'e'],
columns=['A', 'B', 'C', 'D', 'E'])
z = df[['A', 'C', 'A']]
result = z.ix[['a', 'c', 'a']]
check(result,expected)
def test_column_dups_indexing2(self):
# GH 8363
# datetime ops with a non-unique index
df = DataFrame({'A' : np.arange(5,dtype='int64'),
'B' : np.arange(1,6,dtype='int64')},
index=[2,2,3,3,4])
result = df.B-df.A
expected = Series(1,index=[2,2,3,3,4])
assert_series_equal(result,expected)
df = DataFrame({'A' : date_range('20130101',periods=5), 'B' : date_range('20130101 09:00:00', periods=5)},index=[2,2,3,3,4])
result = df.B-df.A
expected = Series(Timedelta('9 hours'),index=[2,2,3,3,4])
assert_series_equal(result,expected)
def test_insert_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
K = 5
df = DataFrame(index=lrange(N))
new_col = np.random.randn(N)
for i in range(K):
df[i] = new_col
expected = DataFrame(np.repeat(new_col,K).reshape(N,K),index=lrange(N))
assert_frame_equal(df,expected)
def test_constructor_single_value(self):
# expecting single value upcasting here
df = DataFrame(0., index=[1, 2, 3], columns=['a', 'b', 'c'])
assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('float64'), df.index,
df.columns))
df = DataFrame(0, index=[1, 2, 3], columns=['a', 'b', 'c'])
assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('int64'), df.index,
df.columns))
df = DataFrame('a', index=[1, 2], columns=['a', 'c'])
assert_frame_equal(df, DataFrame(np.array([['a', 'a'],
['a', 'a']],
dtype=object),
index=[1, 2],
columns=['a', 'c']))
self.assertRaises(com.PandasError, DataFrame, 'a', [1, 2])
self.assertRaises(com.PandasError, DataFrame, 'a', columns=['a', 'c'])
with tm.assertRaisesRegexp(TypeError, 'incompatible data and dtype'):
DataFrame('a', [1, 2], ['a', 'c'], float)
def test_constructor_with_datetimes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# single item
df = DataFrame({'A' : 1, 'B' : 'foo', 'C' : 'bar', 'D' : Timestamp("20010101"), 'E' : datetime(2001,1,2,0,0) },
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, datetime64name: 2, objectname : 2})
result.sort_index()
expected.sort_index()
assert_series_equal(result, expected)
# check with ndarray construction ndim==0 (e.g. we are passing a ndim 0 ndarray with a dtype specified)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo', floatname : np.array(1.,dtype=floatname),
intname : np.array(1,dtype=intname)}, index=np.arange(10))
result = df.get_dtype_counts()
expected = { objectname : 1 }
if intname == 'int64':
expected['int64'] = 2
else:
expected['int64'] = 1
expected[intname] = 1
if floatname == 'float64':
expected['float64'] = 2
else:
expected['float64'] = 1
expected[floatname] = 1
result.sort_index()
expected = Series(expected)
expected.sort_index()
assert_series_equal(result, expected)
# check with ndarray construction ndim>0
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo', floatname : np.array([1.]*10,dtype=floatname),
intname : np.array([1]*10,dtype=intname)}, index=np.arange(10))
result = df.get_dtype_counts()
result.sort_index()
assert_series_equal(result, expected)
# GH 2809
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
datetime_s = Series(datetimes)
self.assertEqual(datetime_s.dtype, 'M8[ns]')
df = DataFrame({'datetime_s':datetime_s})
result = df.get_dtype_counts()
expected = Series({ datetime64name : 1 })
result.sort_index()
expected.sort_index()
assert_series_equal(result, expected)
# GH 2810
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
dates = [ts.date() for ts in ind]
df = DataFrame({'datetimes': datetimes, 'dates':dates})
result = df.get_dtype_counts()
expected = Series({ datetime64name : 1, objectname : 1 })
result.sort_index()
expected.sort_index()
assert_series_equal(result, expected)
# GH 7594
# don't coerce tz-aware
import pytz
tz = pytz.timezone('US/Eastern')
dt = tz.localize(datetime(2012, 1, 1))
df = DataFrame({'End Date': dt}, index=[0])
self.assertEqual(df.iat[0,0],dt)
assert_series_equal(df.dtypes,Series({'End Date' : np.dtype('object') }))
df = DataFrame([{'End Date': dt}])
self.assertEqual(df.iat[0,0],dt)
assert_series_equal(df.dtypes,Series({'End Date' : np.dtype('object') }))
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range('20130101',periods=3)
df = DataFrame({ 'value' : dr})
self.assertTrue(df.iat[0,0].tz is None)
dr = date_range('20130101',periods=3,tz='UTC')
df = DataFrame({ 'value' : dr})
self.assertTrue(str(df.iat[0,0].tz) == 'UTC')
dr = date_range('20130101',periods=3,tz='US/Eastern')
df = DataFrame({ 'value' : dr})
self.assertTrue(str(df.iat[0,0].tz) == 'US/Eastern')
# GH 7822
# preserver an index with a tz on dict construction
i = date_range('1/1/2011', periods=5, freq='10s', tz = 'US/Eastern')
expected = DataFrame( {'a' : i.to_series(keep_tz=True).reset_index(drop=True) })
df = DataFrame()
df['a'] = i
assert_frame_equal(df, expected)
df = DataFrame( {'a' : i } )
assert_frame_equal(df, expected)
# multiples
i_no_tz = date_range('1/1/2011', periods=5, freq='10s')
df = DataFrame( {'a' : i, 'b' : i_no_tz } )
expected = DataFrame( {'a' : i.to_series(keep_tz=True).reset_index(drop=True), 'b': i_no_tz })
assert_frame_equal(df, expected)
def test_constructor_for_list_with_dtypes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# test list of lists/ndarrays
df = DataFrame([np.arange(5) for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int64' : 5})
df = DataFrame([np.array(np.arange(5),dtype='int32') for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int32' : 5})
# overflow issue? (we always expecte int64 upcasting here)
df = DataFrame({'a' : [2**31,2**31+1]})
result = df.get_dtype_counts()
expected = Series({'int64' : 1 })
assert_series_equal(result, expected)
# GH #2751 (construction with no index specified), make sure we cast to platform values
df = DataFrame([1, 2])
result = df.get_dtype_counts()
expected = Series({'int64': 1 })
assert_series_equal(result, expected)
df = DataFrame([1.,2.])
result = df.get_dtype_counts()
expected = Series({'float64' : 1 })
assert_series_equal(result, expected)
df = DataFrame({'a' : [1, 2]})
result = df.get_dtype_counts()
expected = Series({'int64' : 1})
assert_series_equal(result, expected)
df = DataFrame({'a' : [1., 2.]})
result = df.get_dtype_counts()
expected = Series({'float64' : 1})
assert_series_equal(result, expected)
df = DataFrame({'a' : 1 }, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'int64': 1})
assert_series_equal(result, expected)
df = DataFrame({'a' : 1. }, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'float64': 1 })
assert_series_equal(result, expected)
# with object list
df = DataFrame({'a':[1,2,4,7], 'b':[1.2, 2.3, 5.1, 6.3],
'c':list('abcd'), 'd':[datetime(2000,1,1) for i in range(4)],
'e' : [1.,2,4.,7]})
result = df.get_dtype_counts()
expected = Series({'int64': 1, 'float64' : 2, datetime64name: 1, objectname : 1})
result.sort_index()
expected.sort_index()
assert_series_equal(result, expected)
def test_not_hashable(self):
df = pd.DataFrame([1])
self.assertRaises(TypeError, hash, df)
self.assertRaises(TypeError, hash, self.empty)
def test_timedeltas(self):
df = DataFrame(dict(A = Series(date_range('2012-1-1', periods=3, freq='D')),
B = Series([ timedelta(days=i) for i in range(3) ])))
result = df.get_dtype_counts()
expected = Series({'datetime64[ns]': 1, 'timedelta64[ns]' : 1 })
result.sort()
expected.sort()
assert_series_equal(result, expected)
df['C'] = df['A'] + df['B']
expected = Series({'datetime64[ns]': 2, 'timedelta64[ns]' : 1 })
result = df.get_dtype_counts()
result.sort()
expected.sort()
assert_series_equal(result, expected)
# mixed int types
df['D'] = 1
expected = Series({'datetime64[ns]': 2, 'timedelta64[ns]' : 1, 'int64' : 1 })
result = df.get_dtype_counts()
result.sort()
expected.sort()
assert_series_equal(result, expected)
def test_operators_timedelta64(self):
from datetime import datetime, timedelta
df = DataFrame(dict(A = date_range('2012-1-1', periods=3, freq='D'),
B = date_range('2012-1-2', periods=3, freq='D'),
C = Timestamp('20120101')-timedelta(minutes=5,seconds=5)))
diffs = DataFrame(dict(A = df['A']-df['C'],
B = df['A']-df['B']))
# min
result = diffs.min()
self.assertEqual(result[0], diffs.ix[0,'A'])
self.assertEqual(result[1], diffs.ix[0,'B'])
result = diffs.min(axis=1)
self.assertTrue((result == diffs.ix[0,'B']).all() == True)
# max
result = diffs.max()
self.assertEqual(result[0], diffs.ix[2,'A'])
self.assertEqual(result[1], diffs.ix[2,'B'])
result = diffs.max(axis=1)
self.assertTrue((result == diffs['A']).all() == True)
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A = df['A']-df['C'],
B = df['B']-df['A']))
assert_frame_equal(result,expected)
assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type
result = mixed.min()
expected = Series([_coerce_scalar_to_timedelta_type(timedelta(seconds=5*60+5)),
_coerce_scalar_to_timedelta_type(timedelta(days=-1)),
'foo',
1,
1.0,
Timestamp('20130101')],
index=mixed.columns)
assert_series_equal(result,expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.],index=[0, 1, 2])
assert_series_equal(result,expected)
# works when only those columns are selected
result = mixed[['A','B']].min(1)
expected = Series([ timedelta(days=-1) ] * 3)
assert_series_equal(result,expected)
result = mixed[['A','B']].min()
expected = Series([ timedelta(seconds=5*60+5), timedelta(days=-1) ],index=['A','B'])
assert_series_equal(result,expected)
# GH 3106
df = DataFrame({'time' : date_range('20130102',periods=5),
'time2' : date_range('20130105',periods=5) })
df['off1'] = df['time2']-df['time']
self.assertEqual(df['off1'].dtype, 'timedelta64[ns]')
df['off2'] = df['time']-df['time2']
df._consolidate_inplace()
self.assertTrue(df['off1'].dtype == 'timedelta64[ns]')
self.assertTrue(df['off2'].dtype == 'timedelta64[ns]')
def test_datetimelike_setitem_with_inference(self):
# GH 7592
# assignment of timedeltas with NaT
one_hour = timedelta(hours=1)
df = DataFrame(index=date_range('20130101',periods=4))
df['A'] = np.array([1*one_hour]*4, dtype='m8[ns]')
df.loc[:,'B'] = np.array([2*one_hour]*4, dtype='m8[ns]')
df.loc[:3,'C'] = np.array([3*one_hour]*3, dtype='m8[ns]')
df.ix[:,'D'] = np.array([4*one_hour]*4, dtype='m8[ns]')
df.ix[:3,'E'] = np.array([5*one_hour]*3, dtype='m8[ns]')
df['F'] = np.timedelta64('NaT')
df.ix[:-1,'F'] = np.array([6*one_hour]*3, dtype='m8[ns]')
df.ix[-3:,'G'] = date_range('20130101',periods=3)
df['H'] = np.datetime64('NaT')
result = df.dtypes
expected = Series([np.dtype('timedelta64[ns]')]*6+[np.dtype('datetime64[ns]')]*2,index=list('ABCDEFGH'))
assert_series_equal(result,expected)
def test_new_empty_index(self):
df1 = DataFrame(randn(0, 3))
df2 = DataFrame(randn(0, 3))
df1.index.name = 'foo'
self.assertIsNone(df2.index.name)
def test_astype(self):
casted = self.frame.astype(int)
expected = DataFrame(self.frame.values.astype(int),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(casted, expected)
casted = self.frame.astype(np.int32)
expected = DataFrame(self.frame.values.astype(np.int32),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(casted, expected)
self.frame['foo'] = '5'
casted = self.frame.astype(int)
expected = DataFrame(self.frame.values.astype(int),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(casted, expected)
# mixed casting
def _check_cast(df, v):
self.assertEqual(list(set([ s.dtype.name for _, s in compat.iteritems(df) ]))[0], v)
mn = self.all_mixed._get_numeric_data().copy()
mn['little_float'] = np.array(12345.,dtype='float16')
mn['big_float'] = np.array(123456789101112.,dtype='float64')
casted = mn.astype('float64')
_check_cast(casted, 'float64')
casted = mn.astype('int64')
_check_cast(casted, 'int64')
casted = self.mixed_float.reindex(columns = ['A','B']).astype('float32')
_check_cast(casted, 'float32')
casted = mn.reindex(columns = ['little_float']).astype('float16')
_check_cast(casted, 'float16')
casted = self.mixed_float.reindex(columns = ['A','B']).astype('float16')
_check_cast(casted, 'float16')
casted = mn.astype('float32')
_check_cast(casted, 'float32')
casted = mn.astype('int32')
_check_cast(casted, 'int32')
# to object
casted = mn.astype('O')
_check_cast(casted, 'object')
def test_astype_with_exclude_string(self):
df = self.frame.copy()
expected = self.frame.astype(int)
df['string'] = 'foo'
casted = df.astype(int, raise_on_error = False)
expected['string'] = 'foo'
assert_frame_equal(casted, expected)
df = self.frame.copy()
expected = self.frame.astype(np.int32)
df['string'] = 'foo'
casted = df.astype(np.int32, raise_on_error = False)
expected['string'] = 'foo'
assert_frame_equal(casted, expected)
def test_astype_with_view(self):
tf = self.mixed_float.reindex(columns = ['A','B','C'])
casted = tf.astype(np.int64)
casted = tf.astype(np.float32)
# this is the only real reason to do it this way
tf = np.round(self.frame).astype(np.int32)
casted = tf.astype(np.float32, copy = False)
tf = self.frame.astype(np.float64)
casted = tf.astype(np.int64, copy = False)
def test_astype_cast_nan_int(self):
df = DataFrame(data={"Values": [1.0, 2.0, 3.0, np.nan]})
self.assertRaises(ValueError, df.astype, np.int64)
def test_array_interface(self):
result = np.sqrt(self.frame)
tm.assert_isinstance(result, type(self.frame))
self.assertIs(result.index, self.frame.index)
self.assertIs(result.columns, self.frame.columns)
assert_frame_equal(result, self.frame.apply(np.sqrt))
def test_pickle(self):
unpickled = self.round_trip_pickle(self.mixed_frame)
assert_frame_equal(self.mixed_frame, unpickled)
# buglet
self.mixed_frame._data.ndim
# empty
unpickled = self.round_trip_pickle(self.empty)
repr(unpickled)
def test_to_dict(self):
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
recons_data = DataFrame(test_data).to_dict()
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
self.assertEqual(v2, recons_data[k][k2])
recons_data = DataFrame(test_data).to_dict("l")
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
self.assertEqual(v2, recons_data[k][int(k2) - 1])
recons_data = DataFrame(test_data).to_dict("s")
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
self.assertEqual(v2, recons_data[k][k2])
recons_data = DataFrame(test_data).to_dict("sp")
expected_split = {'columns': ['A', 'B'], 'index': ['1', '2', '3'],
'data': [[1.0, '1'], [2.0, '2'], [nan, '3']]}
tm.assert_almost_equal(recons_data, expected_split)
recons_data = DataFrame(test_data).to_dict("r")
expected_records = [{'A': 1.0, 'B': '1'},
{'A': 2.0, 'B': '2'},
{'A': nan, 'B': '3'}]
tm.assert_almost_equal(recons_data, expected_records)
def test_to_dict_invalid_orient(self):
df = DataFrame({'A':[0, 1]})
self.assertRaises(ValueError, df.to_dict, orient='invalid')
def test_to_records_dt64(self):
df = DataFrame([["one", "two", "three"],
["four", "five", "six"]],
index=date_range("2012-01-01", "2012-01-02"))
self.assertEqual(df.to_records()['index'][0], df.index[0])
rs = df.to_records(convert_datetime64=False)
self.assertEqual(rs['index'][0], df.index.values[0])
def test_to_records_with_multindex(self):
# GH3189
index = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
data = np.zeros((8, 4))
df = DataFrame(data, index=index)
r = df.to_records(index=True)['level_0']
self.assertTrue('bar' in r)
self.assertTrue('one' not in r)
def test_to_records_with_Mapping_type(self):
import email
from email.parser import Parser
import collections
collections.Mapping.register(email.message.Message)
headers = Parser().parsestr('From: <user@example.com>\n'
'To: <someone_else@example.com>\n'
'Subject: Test message\n'
'\n'
'Body would go here\n')
frame = DataFrame.from_records([headers])
all( x in frame for x in ['Type','Subject','From'])
def test_from_records_to_records(self):
# from numpy documentation
arr = np.zeros((2,), dtype=('i4,f4,a10'))
arr[:] = [(1, 2., 'Hello'), (2, 3., "World")]
frame = DataFrame.from_records(arr)
index = np.arange(len(arr))[::-1]
indexed_frame = DataFrame.from_records(arr, index=index)
self.assert_numpy_array_equal(indexed_frame.index, index)
# without names, it should go to last ditch
arr2 = np.zeros((2,3))
tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2))
# wrong length
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with assertRaisesRegexp(ValueError, msg):
DataFrame.from_records(arr, index=index[:-1])
indexed_frame = DataFrame.from_records(arr, index='f1')
# what to do?
records = indexed_frame.to_records()
self.assertEqual(len(records.dtype.names), 3)
records = indexed_frame.to_records(index=False)
self.assertEqual(len(records.dtype.names), 2)
self.assertNotIn('index', records.dtype.names)
def test_from_records_nones(self):
tuples = [(1, 2, None, 3),
(1, 2, None, 3),
(None, 2, 5, 3)]
df = DataFrame.from_records(tuples, columns=['a', 'b', 'c', 'd'])
self.assertTrue(np.isnan(df['c'][0]))
def test_from_records_iterator(self):
arr = np.array([(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5., 5., 6, 6), (7., 7., 8, 8)],
dtype=[('x', np.float64), ('u', np.float32), ('y', np.int64), ('z', np.int32) ])
df = DataFrame.from_records(iter(arr), nrows=2)
xp = DataFrame({'x': np.array([1.0, 3.0], dtype=np.float64),
'u': np.array([1.0, 3.0], dtype=np.float32),
'y': np.array([2, 4], dtype=np.int64),
'z': np.array([2, 4], dtype=np.int32)})
assert_frame_equal(df.reindex_like(xp), xp)
# no dtypes specified here, so just compare with the default
arr = [(1.0, 2), (3.0, 4), (5., 6), (7., 8)]
df = DataFrame.from_records(iter(arr), columns=['x', 'y'],
nrows=2)
assert_frame_equal(df, xp.reindex(columns=['x','y']), check_dtype=False)
def test_from_records_tuples_generator(self):
def tuple_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield (i, letters[i % len(letters)], i/length)
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in tuple_generator(10)] for j in range(len(columns_names))]
data = {'Integer': columns[0], 'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = tuple_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
assert_frame_equal(result, expected)
def test_from_records_lists_generator(self):
def list_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield [i, letters[i % len(letters)], i/length]
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in list_generator(10)] for j in range(len(columns_names))]
data = {'Integer': columns[0], 'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = list_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
assert_frame_equal(result, expected)
def test_from_records_columns_not_modified(self):
tuples = [(1, 2, 3),
(1, 2, 3),
(2, 5, 3)]
columns = ['a', 'b', 'c']
original_columns = list(columns)
df = DataFrame.from_records(tuples, columns=columns, index='a')
self.assertEqual(columns, original_columns)
def test_from_records_decimal(self):
from decimal import Decimal
tuples = [(Decimal('1.5'),), (Decimal('2.5'),), (None,)]
df = DataFrame.from_records(tuples, columns=['a'])
self.assertEqual(df['a'].dtype, object)
df = DataFrame.from_records(tuples, columns=['a'], coerce_float=True)
self.assertEqual(df['a'].dtype, np.float64)
self.assertTrue(np.isnan(df['a'].values[-1]))
def test_from_records_duplicates(self):
result = DataFrame.from_records([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
expected = DataFrame([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
assert_frame_equal(result, expected)
def test_from_records_set_index_name(self):
def create_dict(order_id):
return {'order_id': order_id, 'quantity': np.random.randint(1, 10),
'price': np.random.randint(1, 10)}
documents = [create_dict(i) for i in range(10)]
# demo missing data
documents.append({'order_id': 10, 'quantity': 5})
result = DataFrame.from_records(documents, index='order_id')
self.assertEqual(result.index.name, 'order_id')
# MultiIndex
result = DataFrame.from_records(documents,
index=['order_id', 'quantity'])
self.assertEqual(result.index.names, ('order_id', 'quantity'))
def test_from_records_misc_brokenness(self):
# #2179
data = {1: ['foo'], 2: ['bar']}
result = DataFrame.from_records(data, columns=['a', 'b'])
exp = DataFrame(data, columns=['a', 'b'])
assert_frame_equal(result, exp)
# overlap in index/index_names
data = {'a': [1, 2, 3], 'b': [4, 5, 6]}
result = DataFrame.from_records(data, index=['a', 'b', 'c'])
exp = DataFrame(data, index=['a', 'b', 'c'])
assert_frame_equal(result, exp)
# GH 2623
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi']) # test col upconverts to obj
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
results = df2_obj.get_dtype_counts()
expected = Series({ 'datetime64[ns]' : 1, 'object' : 1 })
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 1])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
results = df2_obj.get_dtype_counts()
expected = Series({ 'datetime64[ns]' : 1, 'int64' : 1 })
def test_from_records_empty(self):
# 3562
result = DataFrame.from_records([], columns=['a','b','c'])
expected = DataFrame(columns=['a','b','c'])
assert_frame_equal(result, expected)
result = DataFrame.from_records([], columns=['a','b','b'])
expected = DataFrame(columns=['a','b','b'])
assert_frame_equal(result, expected)
def test_from_records_empty_with_nonempty_fields_gh3682(self):
a = np.array([(1, 2)], dtype=[('id', np.int64), ('value', np.int64)])
df = DataFrame.from_records(a, index='id')
assert_array_equal(df.index, Index([1], name='id'))
self.assertEqual(df.index.name, 'id')
assert_array_equal(df.columns, Index(['value']))
b = np.array([], dtype=[('id', np.int64), ('value', np.int64)])
df = DataFrame.from_records(b, index='id')
assert_array_equal(df.index, Index([], name='id'))
self.assertEqual(df.index.name, 'id')
def test_from_records_with_datetimes(self):
if sys.version < LooseVersion('2.7'):
raise nose.SkipTest('rec arrays dont work properly with py2.6')
# this may fail on certain platforms because of a numpy issue
# related GH6140
if not is_little_endian():
raise nose.SkipTest("known failure of test on non-little endian")
# construction with a null in a recarray
# GH 6140
expected = DataFrame({ 'EXPIRY' : [datetime(2005, 3, 1, 0, 0), None ]})
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [('EXPIRY', '<M8[ns]')]
try:
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
except (ValueError):
raise nose.SkipTest("known failure of numpy rec array creation")
result = DataFrame.from_records(recarray)
assert_frame_equal(result,expected)
# coercion should work too
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [('EXPIRY', '<M8[m]')]
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
result = DataFrame.from_records(recarray)
assert_frame_equal(result,expected)
def test_to_records_floats(self):
df = DataFrame(np.random.rand(10, 10))
df.to_records()
def test_to_recods_index_name(self):
df = DataFrame(np.random.randn(3, 3))
df.index.name = 'X'
rs = df.to_records()
self.assertIn('X', rs.dtype.fields)
df = DataFrame(np.random.randn(3, 3))
rs = df.to_records()
self.assertIn('index', rs.dtype.fields)
df.index = MultiIndex.from_tuples([('a', 'x'), ('a', 'y'), ('b', 'z')])
df.index.names = ['A', None]
rs = df.to_records()
self.assertIn('level_0', rs.dtype.fields)
def test_join_str_datetime(self):
str_dates = ['20120209', '20120222']
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
A = DataFrame(str_dates, index=lrange(2), columns=['aa'])
C = DataFrame([[1, 2], [3, 4]], index=str_dates, columns=dt_dates)
tst = A.join(C, on='aa')
self.assertEqual(len(tst.columns), 3)
def test_from_records_sequencelike(self):
df = DataFrame({'A' : np.array(np.random.randn(6), dtype = np.float64),
'A1': np.array(np.random.randn(6), dtype = np.float64),
'B' : np.array(np.arange(6), dtype = np.int64),
'C' : ['foo'] * 6,
'D' : np.array([True, False] * 3, dtype=bool),
'E' : np.array(np.random.randn(6), dtype = np.float32),
'E1': np.array(np.random.randn(6), dtype = np.float32),
'F' : np.array(np.arange(6), dtype = np.int32) })
# this is actually tricky to create the recordlike arrays and have the dtypes be intact
blocks = df.blocks
tuples = []
columns = []
dtypes = []
for dtype, b in compat.iteritems(blocks):
columns.extend(b.columns)
dtypes.extend([ (c,np.dtype(dtype).descr[0][1]) for c in b.columns ])
for i in range(len(df.index)):
tup = []
for _, b in compat.iteritems(blocks):
tup.extend(b.irow(i).values)
tuples.append(tuple(tup))
recarray = np.array(tuples, dtype=dtypes).view(np.recarray)
recarray2 = df.to_records()
lists = [list(x) for x in tuples]
# tuples (lose the dtype info)
result = DataFrame.from_records(tuples, columns=columns).reindex(columns=df.columns)
# created recarray and with to_records recarray (have dtype info)
result2 = DataFrame.from_records(recarray, columns=columns).reindex(columns=df.columns)
result3 = DataFrame.from_records(recarray2, columns=columns).reindex(columns=df.columns)
# list of tupels (no dtype info)
result4 = DataFrame.from_records(lists, columns=columns).reindex(columns=df.columns)
assert_frame_equal(result, df, check_dtype=False)
assert_frame_equal(result2, df)
assert_frame_equal(result3, df)
assert_frame_equal(result4, df, check_dtype=False)
# tuples is in the order of the columns
result = DataFrame.from_records(tuples)
self.assert_numpy_array_equal(result.columns, lrange(8))
# test exclude parameter & we are casting the results here (as we don't have dtype info to recover)
columns_to_test = [ columns.index('C'), columns.index('E1') ]
exclude = list(set(range(8))-set(columns_to_test))
result = DataFrame.from_records(tuples, exclude=exclude)
result.columns = [ columns[i] for i in sorted(columns_to_test) ]
assert_series_equal(result['C'], df['C'])
assert_series_equal(result['E1'], df['E1'].astype('float64'))
# empty case
result = DataFrame.from_records([], columns=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 0)
self.assert_numpy_array_equal(result.columns, ['foo', 'bar', 'baz'])
result = DataFrame.from_records([])
self.assertEqual(len(result), 0)
self.assertEqual(len(result.columns), 0)
def test_from_records_dictlike(self):
# test the dict methods
df = DataFrame({'A' : np.array(np.random.randn(6), dtype = np.float64),
'A1': np.array(np.random.randn(6), dtype = np.float64),
'B' : np.array(np.arange(6), dtype = np.int64),
'C' : ['foo'] * 6,
'D' : np.array([True, False] * 3, dtype=bool),
'E' : np.array(np.random.randn(6), dtype = np.float32),
'E1': np.array(np.random.randn(6), dtype = np.float32),
'F' : np.array(np.arange(6), dtype = np.int32) })
# columns is in a different order here than the actual items iterated from the dict
columns = []
for dtype, b in compat.iteritems(df.blocks):
columns.extend(b.columns)
asdict = dict((x, y) for x, y in compat.iteritems(df))
asdict2 = dict((x, y.values) for x, y in compat.iteritems(df))
# dict of series & dict of ndarrays (have dtype info)
results = []
results.append(DataFrame.from_records(asdict).reindex(columns=df.columns))
results.append(DataFrame.from_records(asdict, columns=columns).reindex(columns=df.columns))
results.append(DataFrame.from_records(asdict2, columns=columns).reindex(columns=df.columns))
for r in results:
assert_frame_equal(r, df)
def test_from_records_with_index_data(self):
df = DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
data = np.random.randn(10)
df1 = DataFrame.from_records(df, index=data)
assert(df1.index.equals(Index(data)))
def test_from_records_bad_index_column(self):
df = DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
# should pass
df1 = DataFrame.from_records(df, index=['C'])
assert(df1.index.equals(Index(df.C)))
df1 = DataFrame.from_records(df, index='C')
assert(df1.index.equals(Index(df.C)))
# should fail
self.assertRaises(ValueError, DataFrame.from_records, df, index=[2])
self.assertRaises(KeyError, DataFrame.from_records, df, index=2)
def test_from_records_non_tuple(self):
class Record(object):
def __init__(self, *args):
self.args = args
def __getitem__(self, i):
return self.args[i]
def __iter__(self):
return iter(self.args)
recs = [Record(1, 2, 3), Record(4, 5, 6), Record(7, 8, 9)]
tups = lmap(tuple, recs)
result = DataFrame.from_records(recs)
expected = DataFrame.from_records(tups)
assert_frame_equal(result, expected)
def test_from_records_len0_with_columns(self):
# #2633
result = DataFrame.from_records([], index='foo',
columns=['foo', 'bar'])
self.assertTrue(np.array_equal(result.columns, ['bar']))
self.assertEqual(len(result), 0)
self.assertEqual(result.index.name, 'foo')
def test_get_agg_axis(self):
cols = self.frame._get_agg_axis(0)
self.assertIs(cols, self.frame.columns)
idx = self.frame._get_agg_axis(1)
self.assertIs(idx, self.frame.index)
self.assertRaises(ValueError, self.frame._get_agg_axis, 2)
def test_nonzero(self):
self.assertTrue(self.empty.empty)
self.assertFalse(self.frame.empty)
self.assertFalse(self.mixed_frame.empty)
# corner case
df = DataFrame({'A': [1., 2., 3.],
'B': ['a', 'b', 'c']},
index=np.arange(3))
del df['A']
self.assertFalse(df.empty)
def test_repr_empty(self):
buf = StringIO()
# empty
foo = repr(self.empty)
# empty with index
frame = DataFrame(index=np.arange(1000))
foo = repr(frame)
def test_repr_mixed(self):
buf = StringIO()
# mixed
foo = repr(self.mixed_frame)
self.mixed_frame.info(verbose=False, buf=buf)
@slow
def test_repr_mixed_big(self):
# big mixed
biggie = DataFrame({'A': randn(200),
'B': tm.makeStringIndex(200)},
index=lrange(200))
biggie.loc[:20,'A'] = nan
biggie.loc[:20,'B'] = nan
foo = repr(biggie)
def test_repr(self):
buf = StringIO()
# small one
foo = repr(self.frame)
self.frame.info(verbose=False, buf=buf)
# even smaller
self.frame.reindex(columns=['A']).info(verbose=False, buf=buf)
self.frame.reindex(columns=['A', 'B']).info(verbose=False, buf=buf)
# exhausting cases in DataFrame.info
# columns but no index
no_index = DataFrame(columns=[0, 1, 3])
foo = repr(no_index)
# no columns or index
self.empty.info(buf=buf)
df = DataFrame(["a\n\r\tb"], columns=["a\n\r\td"], index=["a\n\r\tf"])
self.assertFalse("\t" in repr(df))
self.assertFalse("\r" in repr(df))
self.assertFalse("a\n" in repr(df))
def test_repr_dimensions(self):
df = DataFrame([[1, 2,], [3, 4]])
with option_context('display.show_dimensions', True):
self.assertTrue("2 rows x 2 columns" in repr(df))
with option_context('display.show_dimensions', False):
self.assertFalse("2 rows x 2 columns" in repr(df))
with option_context('display.show_dimensions', 'truncate'):
self.assertFalse("2 rows x 2 columns" in repr(df))
@slow
def test_repr_big(self):
buf = StringIO()
# big one
biggie = DataFrame(np.zeros((200, 4)), columns=lrange(4),
index=lrange(200))
foo = repr(biggie)
def test_repr_unsortable(self):
# columns are not sortable
import warnings
warn_filters = warnings.filters
warnings.filterwarnings('ignore',
category=FutureWarning,
module=".*format")
unsortable = DataFrame({'foo': [1] * 50,
datetime.today(): [1] * 50,
'bar': ['bar'] * 50,
datetime.today(
) + timedelta(1): ['bar'] * 50},
index=np.arange(50))
foo = repr(unsortable)
fmt.set_option('display.precision', 3, 'display.column_space', 10)
repr(self.frame)
fmt.set_option('display.max_rows', 10, 'display.max_columns', 2)
repr(self.frame)
fmt.set_option('display.max_rows', 1000, 'display.max_columns', 1000)
repr(self.frame)
self.reset_display_options()
warnings.filters = warn_filters
def test_repr_unicode(self):
uval = u('\u03c3\u03c3\u03c3\u03c3')
bval = uval.encode('utf-8')
df = DataFrame({'A': [uval, uval]})
result = repr(df)
ex_top = ' A'
self.assertEqual(result.split('\n')[0].rstrip(), ex_top)
df = DataFrame({'A': [uval, uval]})
result = repr(df)
self.assertEqual(result.split('\n')[0].rstrip(), ex_top)
def test_unicode_string_with_unicode(self):
df = DataFrame({'A': [u("\u05d0")]})
if compat.PY3:
str(df)
else:
compat.text_type(df)
def test_bytestring_with_unicode(self):
df = DataFrame({'A': [u("\u05d0")]})
if compat.PY3:
bytes(df)
else:
str(df)
def test_very_wide_info_repr(self):
df = DataFrame(np.random.randn(10, 20),
columns=tm.rands_array(10, 20))
repr(df)
def test_repr_column_name_unicode_truncation_bug(self):
# #1906
df = DataFrame({'Id': [7117434],
'StringCol': ('Is it possible to modify drop plot code'
' so that the output graph is displayed '
'in iphone simulator, Is it possible to '
'modify drop plot code so that the '
'output graph is \xe2\x80\xa8displayed '
'in iphone simulator.Now we are adding '
'the CSV file externally. I want to Call'
' the File through the code..')})
result = repr(df)
self.assertIn('StringCol', result)
def test_head_tail(self):
assert_frame_equal(self.frame.head(), self.frame[:5])
assert_frame_equal(self.frame.tail(), self.frame[-5:])
assert_frame_equal(self.frame.head(0), self.frame)
assert_frame_equal(self.frame.tail(0), self.frame)
assert_frame_equal(self.frame.head(-1), self.frame[:-1])
assert_frame_equal(self.frame.tail(-1), self.frame[1:])
assert_frame_equal(self.frame.head(1), self.frame[:1])
assert_frame_equal(self.frame.tail(1), self.frame[-1:])
# with a float index
df = self.frame.copy()
df.index = np.arange(len(self.frame)) + 0.1
assert_frame_equal(df.head(), df.iloc[:5])
assert_frame_equal(df.tail(), df.iloc[-5:])
assert_frame_equal(df.head(0), df)
assert_frame_equal(df.tail(0), df)
assert_frame_equal(df.head(-1), df.iloc[:-1])
assert_frame_equal(df.tail(-1), df.iloc[1:])
#test empty dataframe
empty_df = DataFrame()
assert_frame_equal(empty_df.tail(), empty_df)
assert_frame_equal(empty_df.head(), empty_df)
def test_insert(self):
df = DataFrame(np.random.randn(5, 3), index=np.arange(5),
columns=['c', 'b', 'a'])
df.insert(0, 'foo', df['a'])
self.assert_numpy_array_equal(df.columns, ['foo', 'c', 'b', 'a'])
assert_almost_equal(df['a'], df['foo'])
df.insert(2, 'bar', df['c'])
self.assert_numpy_array_equal(df.columns, ['foo', 'c', 'bar', 'b', 'a'])
assert_almost_equal(df['c'], df['bar'])
# diff dtype
# new item
df['x'] = df['a'].astype('float32')
result = Series(dict(float64 = 5, float32 = 1))
self.assertTrue((df.get_dtype_counts() == result).all())
# replacing current (in different block)
df['a'] = df['a'].astype('float32')
result = Series(dict(float64 = 4, float32 = 2))
self.assertTrue((df.get_dtype_counts() == result).all())
df['y'] = df['a'].astype('int32')
result = Series(dict(float64 = 4, float32 = 2, int32 = 1))
self.assertTrue((df.get_dtype_counts() == result).all())
with assertRaisesRegexp(ValueError, 'already exists'):
df.insert(1, 'a', df['b'])
self.assertRaises(ValueError, df.insert, 1, 'c', df['b'])
df.columns.name = 'some_name'
# preserve columns name field
df.insert(0, 'baz', df['c'])
self.assertEqual(df.columns.name, 'some_name')
def test_delitem(self):
del self.frame['A']
self.assertNotIn('A', self.frame)
def test_pop(self):
self.frame.columns.name = 'baz'
A = self.frame.pop('A')
self.assertNotIn('A', self.frame)
self.frame['foo'] = 'bar'
foo = self.frame.pop('foo')
self.assertNotIn('foo', self.frame)
# TODO self.assertEqual(self.frame.columns.name, 'baz')
def test_pop_non_unique_cols(self):
df = DataFrame({0: [0, 1], 1: [0, 1], 2: [4, 5]})
df.columns = ["a", "b", "a"]
res = df.pop("a")
self.assertEqual(type(res), DataFrame)
self.assertEqual(len(res), 2)
self.assertEqual(len(df.columns), 1)
self.assertTrue("b" in df.columns)
self.assertFalse("a" in df.columns)
self.assertEqual(len(df.index), 2)
def test_iter(self):
self.assertTrue(tm.equalContents(list(self.frame), self.frame.columns))
def test_iterrows(self):
for i, (k, v) in enumerate(self.frame.iterrows()):
exp = self.frame.xs(self.frame.index[i])
assert_series_equal(v, exp)
for i, (k, v) in enumerate(self.mixed_frame.iterrows()):
exp = self.mixed_frame.xs(self.mixed_frame.index[i])
assert_series_equal(v, exp)
def test_itertuples(self):
for i, tup in enumerate(self.frame.itertuples()):
s = Series(tup[1:])
s.name = tup[0]
expected = self.frame.ix[i, :].reset_index(drop=True)
assert_series_equal(s, expected)
df = DataFrame({'floats': np.random.randn(5),
'ints': lrange(5)}, columns=['floats', 'ints'])
for tup in df.itertuples(index=False):
tm.assert_isinstance(tup[1], np.integer)
df = DataFrame(data={"a": [1, 2, 3], "b": [4, 5, 6]})
dfaa = df[['a', 'a']]
self.assertEqual(list(dfaa.itertuples()), [(0, 1, 1), (1, 2, 2), (2, 3, 3)])
def test_len(self):
self.assertEqual(len(self.frame), len(self.frame.index))
def test_operators(self):
garbage = random.random(4)
colSeries = Series(garbage, index=np.array(self.frame.columns))
idSum = self.frame + self.frame
seriesSum = self.frame + colSeries
for col, series in compat.iteritems(idSum):
for idx, val in compat.iteritems(series):
origVal = self.frame[col][idx] * 2
if not np.isnan(val):
self.assertEqual(val, origVal)
else:
self.assertTrue(np.isnan(origVal))
for col, series in compat.iteritems(seriesSum):
for idx, val in compat.iteritems(series):
origVal = self.frame[col][idx] + colSeries[col]
if not np.isnan(val):
self.assertEqual(val, origVal)
else:
self.assertTrue(np.isnan(origVal))
added = self.frame2 + self.frame2
expected = self.frame2 * 2
assert_frame_equal(added, expected)
df = DataFrame({'a': ['a', None, 'b']})
assert_frame_equal(df + df, DataFrame({'a': ['aa', np.nan, 'bb']}))
def test_ops_np_scalar(self):
vals, xs = np.random.rand(5, 3), [nan, 7, -23, 2.718, -3.14, np.inf]
f = lambda x: DataFrame(x, index=list('ABCDE'),
columns=['jim', 'joe', 'jolie'])
df = f(vals)
for x in xs:
assert_frame_equal(df / np.array(x), f(vals / x))
assert_frame_equal(np.array(x) * df, f(vals * x))
assert_frame_equal(df + np.array(x), f(vals + x))
assert_frame_equal(np.array(x) - df, f(x - vals))
def test_operators_boolean(self):
# GH 5808
# empty frames, non-mixed dtype
result = DataFrame(index=[1]) & DataFrame(index=[1])
assert_frame_equal(result,DataFrame(index=[1]))
result = DataFrame(index=[1]) | DataFrame(index=[1])
assert_frame_equal(result,DataFrame(index=[1]))
result = DataFrame(index=[1]) & DataFrame(index=[1,2])
assert_frame_equal(result,DataFrame(index=[1,2]))
result = DataFrame(index=[1],columns=['A']) & DataFrame(index=[1],columns=['A'])
assert_frame_equal(result,DataFrame(index=[1],columns=['A']))
result = DataFrame(True,index=[1],columns=['A']) & DataFrame(True,index=[1],columns=['A'])
assert_frame_equal(result,DataFrame(True,index=[1],columns=['A']))
result = DataFrame(True,index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])
assert_frame_equal(result,DataFrame(True,index=[1],columns=['A']))
# boolean ops
result = DataFrame(1,index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])
assert_frame_equal(result,DataFrame(1,index=[1],columns=['A']))
def f():
DataFrame(1.0,index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])
self.assertRaises(TypeError, f)
def f():
DataFrame('foo',index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])
self.assertRaises(TypeError, f)
def test_operators_none_as_na(self):
df = DataFrame({"col1": [2, 5.0, 123, None],
"col2": [1, 2, 3, 4]}, dtype=object)
ops = [operator.add, operator.sub, operator.mul, operator.truediv]
# since filling converts dtypes from object, changed expected to be object
for op in ops:
filled = df.fillna(np.nan)
result = op(df, 3)
expected = op(filled, 3).astype(object)
expected[com.isnull(expected)] = None
assert_frame_equal(result, expected)
result = op(df, df)
expected = op(filled, filled).astype(object)
expected[com.isnull(expected)] = None
assert_frame_equal(result, expected)
result = op(df, df.fillna(7))
assert_frame_equal(result, expected)
result = op(df.fillna(7), df)
assert_frame_equal(result, expected, check_dtype=False)
def test_comparison_invalid(self):
def check(df,df2):
for (x, y) in [(df,df2),(df2,df)]:
self.assertRaises(TypeError, lambda : x == y)
self.assertRaises(TypeError, lambda : x != y)
self.assertRaises(TypeError, lambda : x >= y)
self.assertRaises(TypeError, lambda : x > y)
self.assertRaises(TypeError, lambda : x < y)
self.assertRaises(TypeError, lambda : x <= y)
# GH4968
# invalid date/int comparisons
df = DataFrame(np.random.randint(10, size=(10, 1)), columns=['a'])
df['dates'] = date_range('20010101', periods=len(df))
df2 = df.copy()
df2['dates'] = df['a']
check(df,df2)
df = DataFrame(np.random.randint(10, size=(10, 2)), columns=['a', 'b'])
df2 = DataFrame({'a': date_range('20010101', periods=len(df)), 'b': date_range('20100101', periods=len(df))})
check(df,df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH4982
df = DataFrame({'dates1': date_range('20010101', periods=10),
'dates2': date_range('20010102', periods=10),
'intcol': np.random.randint(1000000000, size=10),
'floatcol': np.random.randn(10),
'stringcol': list(tm.rands(10))})
df.loc[np.random.rand(len(df)) > 0.5, 'dates2'] = pd.NaT
ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq',
'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
expected = left_f(df, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), df)
tm.assert_frame_equal(result, expected)
# nats
expected = left_f(df, Timestamp('nat'))
result = right_f(Timestamp('nat'), df)
tm.assert_frame_equal(result, expected)
def test_modulo(self):
# GH3590, modulo as ints
p = DataFrame({ 'first' : [3,4,5,8], 'second' : [0,0,0,3] })
### this is technically wrong as the integer portion is coerced to float ###
expected = DataFrame({ 'first' : Series([0,0,0,0],dtype='float64'), 'second' : Series([np.nan,np.nan,np.nan,0]) })
result = p % p
assert_frame_equal(result,expected)
# numpy has a slightly different (wrong) treatement
result2 = DataFrame(p.values % p.values,index=p.index,columns=p.columns,dtype='float64')
result2.iloc[0:3,1] = np.nan
assert_frame_equal(result2,expected)
result = p % 0
expected = DataFrame(np.nan,index=p.index,columns=p.columns)
assert_frame_equal(result,expected)
# numpy has a slightly different (wrong) treatement
result2 = DataFrame(p.values.astype('float64') % 0,index=p.index,columns=p.columns)
assert_frame_equal(result2,expected)
# not commutative with series
p = DataFrame(np.random.randn(10, 5))
s = p[0]
res = s % p
res2 = p % s
self.assertFalse(np.array_equal(res.fillna(0), res2.fillna(0)))
def test_div(self):
# integer div, but deal with the 0's
p = DataFrame({ 'first' : [3,4,5,8], 'second' : [0,0,0,3] })
result = p / p
### this is technically wrong as the integer portion is coerced to float ###
expected = DataFrame({ 'first' : Series([1,1,1,1],dtype='float64'), 'second' : Series([np.inf,np.inf,np.inf,1]) })
assert_frame_equal(result,expected)
result2 = DataFrame(p.values.astype('float64')/p.values,index=p.index,columns=p.columns).fillna(np.inf)
assert_frame_equal(result2,expected)
result = p / 0
expected = DataFrame(np.inf,index=p.index,columns=p.columns)
assert_frame_equal(result,expected)
# numpy has a slightly different (wrong) treatement
result2 = DataFrame(p.values.astype('float64')/0,index=p.index,columns=p.columns).fillna(np.inf)
assert_frame_equal(result2,expected)
p = DataFrame(np.random.randn(10, 5))
s = p[0]
res = s / p
res2 = p / s
self.assertFalse(np.array_equal(res.fillna(0), res2.fillna(0)))
def test_logical_operators(self):
def _check_bin_op(op):
result = op(df1, df2)
expected = DataFrame(op(df1.values, df2.values), index=df1.index,
columns=df1.columns)
self.assertEqual(result.values.dtype, np.bool_)
assert_frame_equal(result, expected)
def _check_unary_op(op):
result = op(df1)
expected = DataFrame(op(df1.values), index=df1.index,
columns=df1.columns)
self.assertEqual(result.values.dtype, np.bool_)
assert_frame_equal(result, expected)
df1 = {'a': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},
'b': {'a': False, 'b': True, 'c': False,
'd': False, 'e': False},
'c': {'a': False, 'b': False, 'c': True,
'd': False, 'e': False},
'd': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},
'e': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True}}
df2 = {'a': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},
'b': {'a': False, 'b': True, 'c': False,
'd': False, 'e': False},
'c': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},
'd': {'a': False, 'b': False, 'c': False,
'd': True, 'e': False},
'e': {'a': False, 'b': False, 'c': False,
'd': False, 'e': True}}
df1 = DataFrame(df1)
df2 = DataFrame(df2)
_check_bin_op(operator.and_)
_check_bin_op(operator.or_)
_check_bin_op(operator.xor)
# operator.neg is deprecated in numpy >= 1.9
_check_unary_op(operator.inv)
def test_logical_typeerror(self):
if not compat.PY3:
self.assertRaises(TypeError, self.frame.__eq__, 'foo')
self.assertRaises(TypeError, self.frame.__lt__, 'foo')
self.assertRaises(TypeError, self.frame.__gt__, 'foo')
self.assertRaises(TypeError, self.frame.__ne__, 'foo')
else:
raise nose.SkipTest('test_logical_typeerror not tested on PY3')
def test_constructor_lists_to_object_dtype(self):
# from #1074
d = DataFrame({'a': [np.nan, False]})
self.assertEqual(d['a'].dtype, np.object_)
self.assertFalse(d['a'][1])
def test_constructor_with_nas(self):
# GH 5016
# na's in indicies
def check(df):
for i in range(len(df.columns)):
df.iloc[:,i]
# allow single nans to succeed
indexer = np.arange(len(df.columns))[isnull(df.columns)]
if len(indexer) == 1:
assert_series_equal(df.iloc[:,indexer[0]],df.loc[:,np.nan])
# multiple nans should fail
else:
def f():
df.loc[:,np.nan]
self.assertRaises(ValueError, f)
df = DataFrame([[1,2,3],[4,5,6]], index=[1,np.nan])
check(df)
df = DataFrame([[1,2,3],[4,5,6]], columns=[1.1,2.2,np.nan])
check(df)
df = DataFrame([[0,1,2,3],[4,5,6,7]], columns=[np.nan,1.1,2.2,np.nan])
check(df)
df = DataFrame([[0.0,1,2,3.0],[4,5,6,7]], columns=[np.nan,1.1,2.2,np.nan])
check(df)
def test_logical_with_nas(self):
d = DataFrame({'a': [np.nan, False], 'b': [True, True]})
# GH4947
# bool comparisons should return bool
result = d['a'] | d['b']
expected = Series([False, True])
assert_series_equal(result, expected)
# GH4604, automatic casting here
result = d['a'].fillna(False) | d['b']
expected = Series([True, True])
assert_series_equal(result, expected)
result = d['a'].fillna(False,downcast=False) | d['b']
expected = Series([True, True])
assert_series_equal(result, expected)
def test_neg(self):
# what to do?
assert_frame_equal(-self.frame, -1 * self.frame)
def test_invert(self):
assert_frame_equal(-(self.frame < 0), ~(self.frame < 0))
def test_first_last_valid(self):
N = len(self.frame.index)
mat = randn(N)
mat[:5] = nan
mat[-5:] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
index = frame.first_valid_index()
self.assertEqual(index, frame.index[5])
index = frame.last_valid_index()
self.assertEqual(index, frame.index[-6])
def test_arith_flex_frame(self):
ops = ['add', 'sub', 'mul', 'div', 'truediv', 'pow', 'floordiv', 'mod']
if not compat.PY3:
aliases = {}
else:
aliases = {'div': 'truediv'}
for op in ops:
try:
alias = aliases.get(op, op)
f = getattr(operator, alias)
result = getattr(self.frame, op)(2 * self.frame)
exp = f(self.frame, 2 * self.frame)
assert_frame_equal(result, exp)
# vs mix float
result = getattr(self.mixed_float, op)(2 * self.mixed_float)
exp = f(self.mixed_float, 2 * self.mixed_float)
assert_frame_equal(result, exp)
_check_mixed_float(result, dtype = dict(C = None))
# vs mix int
if op in ['add','sub','mul']:
result = getattr(self.mixed_int, op)(2 + self.mixed_int)
exp = f(self.mixed_int, 2 + self.mixed_int)
# overflow in the uint
dtype = None
if op in ['sub']:
dtype = dict(B = 'object', C = None)
elif op in ['add','mul']:
dtype = dict(C = None)
assert_frame_equal(result, exp)
_check_mixed_int(result, dtype = dtype)
# rops
r_f = lambda x, y: f(y, x)
result = getattr(self.frame, 'r' + op)(2 * self.frame)
exp = r_f(self.frame, 2 * self.frame)
assert_frame_equal(result, exp)
# vs mix float
result = getattr(self.mixed_float, op)(2 * self.mixed_float)
exp = f(self.mixed_float, 2 * self.mixed_float)
assert_frame_equal(result, exp)
_check_mixed_float(result, dtype = dict(C = None))
result = getattr(self.intframe, op)(2 * self.intframe)
exp = f(self.intframe, 2 * self.intframe)
assert_frame_equal(result, exp)
# vs mix int
if op in ['add','sub','mul']:
result = getattr(self.mixed_int, op)(2 + self.mixed_int)
exp = f(self.mixed_int, 2 + self.mixed_int)
# overflow in the uint
dtype = None
if op in ['sub']:
dtype = dict(B = 'object', C = None)
elif op in ['add','mul']:
dtype = dict(C = None)
assert_frame_equal(result, exp)
_check_mixed_int(result, dtype = dtype)
except:
com.pprint_thing("Failing operation %r" % op)
raise
# ndim >= 3
ndim_5 = np.ones(self.frame.shape + (3, 4, 5))
with assertRaisesRegexp(ValueError, 'shape'):
f(self.frame, ndim_5)
with assertRaisesRegexp(ValueError, 'shape'):
getattr(self.frame, op)(ndim_5)
# res_add = self.frame.add(self.frame)
# res_sub = self.frame.sub(self.frame)
# res_mul = self.frame.mul(self.frame)
# res_div = self.frame.div(2 * self.frame)
# assert_frame_equal(res_add, self.frame + self.frame)
# assert_frame_equal(res_sub, self.frame - self.frame)
# assert_frame_equal(res_mul, self.frame * self.frame)
# assert_frame_equal(res_div, self.frame / (2 * self.frame))
const_add = self.frame.add(1)
assert_frame_equal(const_add, self.frame + 1)
# corner cases
result = self.frame.add(self.frame[:0])
assert_frame_equal(result, self.frame * np.nan)
result = self.frame[:0].add(self.frame)
assert_frame_equal(result, self.frame * np.nan)
with assertRaisesRegexp(NotImplementedError, 'fill_value'):
self.frame.add(self.frame.irow(0), fill_value=3)
with assertRaisesRegexp(NotImplementedError, 'fill_value'):
self.frame.add(self.frame.irow(0), axis='index', fill_value=3)
def test_binary_ops_align(self):
# test aligning binary ops
# GH 6681
index=MultiIndex.from_product([list('abc'),
['one','two','three'],
[1,2,3]],
names=['first','second','third'])
df = DataFrame(np.arange(27*3).reshape(27,3),
index=index,
columns=['value1','value2','value3']).sortlevel()
idx = pd.IndexSlice
for op in ['add','sub','mul','div','truediv']:
opa = getattr(operator,op,None)
if opa is None:
continue
x = Series([ 1.0, 10.0, 100.0], [1,2,3])
result = getattr(df,op)(x,level='third',axis=0)
expected = pd.concat([ opa(df.loc[idx[:,:,i],:],v) for i, v in x.iteritems() ]).sortlevel()
assert_frame_equal(result, expected)
x = Series([ 1.0, 10.0], ['two','three'])
result = getattr(df,op)(x,level='second',axis=0)
expected = pd.concat([ opa(df.loc[idx[:,i],:],v) for i, v in x.iteritems() ]).reindex_like(df).sortlevel()
assert_frame_equal(result, expected)
def test_arith_mixed(self):
left = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 2, 3]})
result = left + left
expected = DataFrame({'A': ['aa', 'bb', 'cc'],
'B': [2, 4, 6]})
assert_frame_equal(result, expected)
def test_arith_getitem_commute(self):
df = DataFrame({'A': [1.1, 3.3], 'B': [2.5, -3.9]})
self._test_op(df, operator.add)
self._test_op(df, operator.sub)
self._test_op(df, operator.mul)
self._test_op(df, operator.truediv)
self._test_op(df, operator.floordiv)
self._test_op(df, operator.pow)
self._test_op(df, lambda x, y: y + x)
self._test_op(df, lambda x, y: y - x)
self._test_op(df, lambda x, y: y * x)
self._test_op(df, lambda x, y: y / x)
self._test_op(df, lambda x, y: y ** x)
self._test_op(df, lambda x, y: x + y)
self._test_op(df, lambda x, y: x - y)
self._test_op(df, lambda x, y: x * y)
self._test_op(df, lambda x, y: x / y)
self._test_op(df, lambda x, y: x ** y)
@staticmethod
def _test_op(df, op):
result = op(df, 1)
if not df.columns.is_unique:
raise ValueError("Only unique columns supported by this test")
for col in result.columns:
assert_series_equal(result[col], op(df[col], 1))
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = DataFrame(data)
other = DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.ix[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
assert_frame_equal(rs, xp)
# DataFrame
self.assertTrue(df.eq(df).values.all())
self.assertFalse(df.ne(df).values.any())
for op in ['eq', 'ne', 'gt', 'lt', 'ge', 'le']:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
assert_frame_equal(f(other.values), o(df, other.values))
# scalar
assert_frame_equal(f(0), o(df, 0))
# NAs
assert_frame_equal(f(np.nan), o(df, np.nan))
with assertRaisesRegexp(ValueError, 'shape'):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
assert_frame_equal(col_eq, df == Series(col_ser))
assert_frame_equal(col_eq, -col_ne)
assert_frame_equal(idx_eq, -idx_ne)
assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
assert_frame_equal(col_eq, df.eq(list(col_ser)))
assert_frame_equal(idx_eq, df.eq(Series(idx_ser), axis=0))
assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
assert_frame_equal(col_gt, df > Series(col_ser))
assert_frame_equal(col_gt, -col_le)
assert_frame_equal(idx_gt, -idx_le)
assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
assert_frame_equal(col_ge, df >= Series(col_ser))
assert_frame_equal(col_ge, -col_lt)
assert_frame_equal(idx_ge, -idx_lt)
assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = Series(np.random.randn(5))
col_ser = Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.ix[0, 0] = np.nan
rs = df.eq(df)
self.assertFalse(rs.ix[0, 0])
rs = df.ne(df)
self.assertTrue(rs.ix[0, 0])
rs = df.gt(df)
self.assertFalse(rs.ix[0, 0])
rs = df.lt(df)
self.assertFalse(rs.ix[0, 0])
rs = df.ge(df)
self.assertFalse(rs.ix[0, 0])
rs = df.le(df)
self.assertFalse(rs.ix[0, 0])
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = DataFrame({'a': arr})
df2 = DataFrame({'a': arr2})
rs = df.gt(df2)
self.assertFalse(rs.values.any())
rs = df.ne(df2)
self.assertTrue(rs.values.all())
arr3 = np.array([2j, np.nan, None])
df3 = DataFrame({'a': arr3})
rs = df3.gt(2j)
self.assertFalse(rs.values.any())
# corner, dtype=object
df1 = DataFrame({'col': ['foo', np.nan, 'bar']})
df2 = DataFrame({'col': ['foo', datetime.now(), 'bar']})
result = df1.ne(df2)
exp = DataFrame({'col': [False, True, False]})
assert_frame_equal(result, exp)
def test_arith_flex_series(self):
df = self.simple
row = df.xs('a')
col = df['two']
# after arithmetic refactor, add truediv here
ops = ['add', 'sub', 'mul', 'mod']
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
assert_frame_equal(f(row), op(df, row))
assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
assert_frame_equal(df.div(row), df / row)
assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH7325
df = DataFrame(np.arange(3*2).reshape((3,2)),dtype='int64')
expected = DataFrame([[np.inf,np.inf],[1.0,1.5],[1.0,1.25]])
result = df.div(df[0],axis='index')
assert_frame_equal(result,expected)
df = DataFrame(np.arange(3*2).reshape((3,2)),dtype='float64')
expected = DataFrame([[np.nan,np.inf],[1.0,1.5],[1.0,1.25]])
result = df.div(df[0],axis='index')
assert_frame_equal(result,expected)
def test_arith_non_pandas_object(self):
df = self.simple
val1 = df.xs('a').values
added = DataFrame(df.values + val1, index=df.index, columns=df.columns)
assert_frame_equal(df + val1, added)
added = DataFrame((df.values.T + val1).T,
index=df.index, columns=df.columns)
assert_frame_equal(df.add(val1, axis=0), added)
val2 = list(df['two'])
added = DataFrame(df.values + val2, index=df.index, columns=df.columns)
assert_frame_equal(df + val2, added)
added = DataFrame((df.values.T + val2).T, index=df.index,
columns=df.columns)
assert_frame_equal(df.add(val2, axis='index'), added)
val3 = np.random.rand(*df.shape)
added = DataFrame(df.values + val3, index=df.index, columns=df.columns)
assert_frame_equal(df.add(val3), added)
def test_combineFrame(self):
frame_copy = self.frame.reindex(self.frame.index[::2])
del frame_copy['D']
frame_copy['C'][:5] = nan
added = self.frame + frame_copy
tm.assert_dict_equal(added['A'].valid(),
self.frame['A'] * 2,
compare_keys=False)
self.assertTrue(np.isnan(added['C'].reindex(frame_copy.index)[:5]).all())
# assert(False)
self.assertTrue(np.isnan(added['D']).all())
self_added = self.frame + self.frame
self.assertTrue(self_added.index.equals(self.frame.index))
added_rev = frame_copy + self.frame
self.assertTrue(np.isnan(added['D']).all())
# corner cases
# empty
plus_empty = self.frame + self.empty
self.assertTrue(np.isnan(plus_empty.values).all())
empty_plus = self.empty + self.frame
self.assertTrue(np.isnan(empty_plus.values).all())
empty_empty = self.empty + self.empty
self.assertTrue(empty_empty.empty)
# out of order
reverse = self.frame.reindex(columns=self.frame.columns[::-1])
assert_frame_equal(reverse + self.frame, self.frame * 2)
# mix vs float64, upcast
added = self.frame + self.mixed_float
_check_mixed_float(added, dtype = 'float64')
added = self.mixed_float + self.frame
_check_mixed_float(added, dtype = 'float64')
# mix vs mix
added = self.mixed_float + self.mixed_float2
_check_mixed_float(added, dtype = dict(C = None))
added = self.mixed_float2 + self.mixed_float
_check_mixed_float(added, dtype = dict(C = None))
# with int
added = self.frame + self.mixed_int
_check_mixed_float(added, dtype = 'float64')
def test_combineSeries(self):
# Series
series = self.frame.xs(self.frame.index[0])
added = self.frame + series
for key, s in compat.iteritems(added):
assert_series_equal(s, self.frame[key] + series[key])
larger_series = series.to_dict()
larger_series['E'] = 1
larger_series = Series(larger_series)
larger_added = self.frame + larger_series
for key, s in compat.iteritems(self.frame):
assert_series_equal(larger_added[key], s + series[key])
self.assertIn('E', larger_added)
self.assertTrue(np.isnan(larger_added['E']).all())
# vs mix (upcast) as needed
added = self.mixed_float + series
_check_mixed_float(added, dtype = 'float64')
added = self.mixed_float + series.astype('float32')
_check_mixed_float(added, dtype = dict(C = None))
added = self.mixed_float + series.astype('float16')
_check_mixed_float(added, dtype = dict(C = None))
#### these raise with numexpr.....as we are adding an int64 to an uint64....weird
# vs int
#added = self.mixed_int + (100*series).astype('int64')
#_check_mixed_int(added, dtype = dict(A = 'int64', B = 'float64', C = 'int64', D = 'int64'))
#added = self.mixed_int + (100*series).astype('int32')
#_check_mixed_int(added, dtype = dict(A = 'int32', B = 'float64', C = 'int32', D = 'int64'))
# TimeSeries
buf = StringIO()
tmp = sys.stderr
sys.stderr = buf
try:
ts = self.tsframe['A']
added = self.tsframe + ts
for key, col in compat.iteritems(self.tsframe):
assert_series_equal(added[key], col + ts)
smaller_frame = self.tsframe[:-5]
smaller_added = smaller_frame + ts
self.assertTrue(smaller_added.index.equals(self.tsframe.index))
smaller_ts = ts[:-5]
smaller_added2 = self.tsframe + smaller_ts
assert_frame_equal(smaller_added, smaller_added2)
# length 0
result = self.tsframe + ts[:0]
# Frame is length 0
result = self.tsframe[:0] + ts
self.assertEqual(len(result), 0)
# empty but with non-empty index
frame = self.tsframe[:1].reindex(columns=[])
result = frame * ts
self.assertEqual(len(result), len(ts))
finally:
sys.stderr = tmp
def test_combineFunc(self):
result = self.frame * 2
self.assert_numpy_array_equal(result.values, self.frame.values * 2)
# vs mix
result = self.mixed_float * 2
for c, s in compat.iteritems(result):
self.assert_numpy_array_equal(s.values, self.mixed_float[c].values * 2)
_check_mixed_float(result, dtype = dict(C = None))
result = self.empty * 2
self.assertIs(result.index, self.empty.index)
self.assertEqual(len(result.columns), 0)
def test_comparisons(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
row = self.simple.xs('a')
ndim_5 = np.ones(df1.shape + (1, 1, 1))
def test_comp(func):
result = func(df1, df2)
self.assert_numpy_array_equal(result.values,
func(df1.values, df2.values))
with assertRaisesRegexp(ValueError, 'Wrong number of dimensions'):
func(df1, ndim_5)
result2 = func(self.simple, row)
self.assert_numpy_array_equal(result2.values,
func(self.simple.values, row.values))
result3 = func(self.frame, 0)
self.assert_numpy_array_equal(result3.values,
func(self.frame.values, 0))
with assertRaisesRegexp(ValueError, 'Can only compare '
'identically-labeled DataFrame'):
func(self.simple, self.simple[:2])
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_string_comparison(self):
df = DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
assert_frame_equal(df[mask_a], df.ix[1:1, :])
assert_frame_equal(df[-mask_a], df.ix[0:0, :])
mask_b = df.b == "foo"
assert_frame_equal(df[mask_b], df.ix[0:0, :])
assert_frame_equal(df[-mask_b], df.ix[1:1, :])
def test_float_none_comparison(self):
df = DataFrame(np.random.randn(8, 3), index=lrange(8),
columns=['A', 'B', 'C'])
self.assertRaises(TypeError, df.__eq__, None)
def test_boolean_comparison(self):
# GH 4576
# boolean comparisons with a tuple/list give unexpected results
df = DataFrame(np.arange(6).reshape((3,2)))
b = np.array([2, 2])
b_r = np.atleast_2d([2,2])
b_c = b_r.T
l = (2,2,2)
tup = tuple(l)
# gt
expected = DataFrame([[False,False],[False,True],[True,True]])
result = df>b
assert_frame_equal(result,expected)
result = df.values>b
assert_array_equal(result,expected.values)
result = df>l
assert_frame_equal(result,expected)
result = df>tup
assert_frame_equal(result,expected)
result = df>b_r
assert_frame_equal(result,expected)
result = df.values>b_r
assert_array_equal(result,expected.values)
self.assertRaises(ValueError, df.__gt__, b_c)
self.assertRaises(ValueError, df.values.__gt__, b_c)
# ==
expected = DataFrame([[False,False],[True,False],[False,False]])
result = df == b
assert_frame_equal(result,expected)
result = df==l
assert_frame_equal(result,expected)
result = df==tup
assert_frame_equal(result,expected)
result = df == b_r
assert_frame_equal(result,expected)
result = df.values == b_r
assert_array_equal(result,expected.values)
self.assertRaises(ValueError, lambda : df == b_c)
self.assertFalse((df.values == b_c))
# with alignment
df = DataFrame(np.arange(6).reshape((3,2)),columns=list('AB'),index=list('abc'))
expected.index=df.index
expected.columns=df.columns
result = df==l
assert_frame_equal(result,expected)
result = df==tup
assert_frame_equal(result,expected)
# not shape compatible
self.assertRaises(ValueError, lambda : df == (2,2))
self.assertRaises(ValueError, lambda : df == [2,2])
def test_to_csv_deprecated_options(self):
pname = '__tmp_to_csv_deprecated_options__'
with ensure_clean(pname) as path:
self.tsframe[1:3] = np.nan
self.tsframe.to_csv(path, nanRep='foo')
recons = read_csv(path,index_col=0,parse_dates=[0],na_values=['foo'])
assert_frame_equal(self.tsframe, recons)
with tm.assert_produces_warning(FutureWarning):
self.frame.to_csv(path, cols=['A', 'B'])
with tm.assert_produces_warning(False):
self.frame.to_csv(path, columns=['A', 'B'])
def test_to_csv_from_csv(self):
pname = '__tmp_to_csv_from_csv__'
with ensure_clean(pname) as path:
self.frame['A'][:5] = nan
self.frame.to_csv(path)
self.frame.to_csv(path, columns=['A', 'B'])
self.frame.to_csv(path, header=False)
self.frame.to_csv(path, index=False)
# test roundtrip
self.tsframe.to_csv(path)
recons = DataFrame.from_csv(path)
assert_frame_equal(self.tsframe, recons)
self.tsframe.to_csv(path, index_label='index')
recons = DataFrame.from_csv(path, index_col=None)
assert(len(recons.columns) == len(self.tsframe.columns) + 1)
# no index
self.tsframe.to_csv(path, index=False)
recons = DataFrame.from_csv(path, index_col=None)
assert_almost_equal(self.tsframe.values, recons.values)
# corner case
dm = DataFrame({'s1': Series(lrange(3), lrange(3)),
's2': Series(lrange(2), lrange(2))})
dm.to_csv(path)
recons = DataFrame.from_csv(path)
assert_frame_equal(dm, recons)
with ensure_clean(pname) as path:
# duplicate index
df = DataFrame(np.random.randn(3, 3), index=['a', 'a', 'b'],
columns=['x', 'y', 'z'])
df.to_csv(path)
result = DataFrame.from_csv(path)
assert_frame_equal(result, df)
midx = MultiIndex.from_tuples([('A', 1, 2), ('A', 1, 2), ('B', 1, 2)])
df = DataFrame(np.random.randn(3, 3), index=midx,
columns=['x', 'y', 'z'])
df.to_csv(path)
result = DataFrame.from_csv(path, index_col=[0, 1, 2],
parse_dates=False)
assert_frame_equal(result, df, check_names=False) # TODO from_csv names index ['Unnamed: 1', 'Unnamed: 2'] should it ?
# column aliases
col_aliases = Index(['AA', 'X', 'Y', 'Z'])
self.frame2.to_csv(path, header=col_aliases)
rs = DataFrame.from_csv(path)
xp = self.frame2.copy()
xp.columns = col_aliases
assert_frame_equal(xp, rs)
self.assertRaises(ValueError, self.frame2.to_csv, path,
header=['AA', 'X'])
with ensure_clean(pname) as path:
import pandas as pd
df1 = DataFrame(np.random.randn(3, 1))
df2 = DataFrame(np.random.randn(3, 1))
df1.to_csv(path)
df2.to_csv(path,mode='a',header=False)
xp = pd.concat([df1,df2])
rs = pd.read_csv(path,index_col=0)
rs.columns = lmap(int,rs.columns)
xp.columns = lmap(int,xp.columns)
assert_frame_equal(xp,rs)
def test_to_csv_cols_reordering(self):
# GH3454
import pandas as pd
def _check_df(df,cols=None):
with ensure_clean() as path:
df.to_csv(path,columns = cols,engine='python')
rs_p = pd.read_csv(path,index_col=0)
df.to_csv(path,columns = cols,chunksize=chunksize)
rs_c = pd.read_csv(path,index_col=0)
if cols:
df = df[cols]
assert (rs_c.columns==rs_p.columns).all()
assert_frame_equal(df,rs_c,check_names=False)
chunksize=5
N = int(chunksize*2.5)
df= mkdf(N, 3)
cs = df.columns
cols = [cs[2],cs[0]]
_check_df(df,cols)
def test_to_csv_legacy_raises_on_dupe_cols(self):
df= mkdf(10, 3)
df.columns = ['a','a','b']
with ensure_clean() as path:
self.assertRaises(NotImplementedError,df.to_csv,path,engine='python')
def test_to_csv_new_dupe_cols(self):
import pandas as pd
def _check_df(df,cols=None):
with ensure_clean() as path:
df.to_csv(path,columns = cols,chunksize=chunksize)
rs_c = pd.read_csv(path,index_col=0)
# we wrote them in a different order
# so compare them in that order
if cols is not None:
if df.columns.is_unique:
rs_c.columns = cols
else:
indexer, missing = df.columns.get_indexer_non_unique(cols)
rs_c.columns = df.columns.take(indexer)
for c in cols:
obj_df = df[c]
obj_rs = rs_c[c]
if isinstance(obj_df,Series):
assert_series_equal(obj_df,obj_rs)
else:
assert_frame_equal(obj_df,obj_rs,check_names=False)
# wrote in the same order
else:
rs_c.columns = df.columns
assert_frame_equal(df,rs_c,check_names=False)
chunksize=5
N = int(chunksize*2.5)
# dupe cols
df= mkdf(N, 3)
df.columns = ['a','a','b']
_check_df(df,None)
# dupe cols with selection
cols = ['b','a']
_check_df(df,cols)
@slow
def test_to_csv_moar(self):
path = '__tmp_to_csv_moar__'
def _do_test(df,path,r_dtype=None,c_dtype=None,rnlvl=None,cnlvl=None,
dupe_col=False):
kwargs = dict(parse_dates=False)
if cnlvl:
if rnlvl is not None:
kwargs['index_col'] = lrange(rnlvl)
kwargs['header'] = lrange(cnlvl)
with ensure_clean(path) as path:
df.to_csv(path,encoding='utf8',chunksize=chunksize,tupleize_cols=False)
recons = DataFrame.from_csv(path,tupleize_cols=False,**kwargs)
else:
kwargs['header'] = 0
with ensure_clean(path) as path:
df.to_csv(path,encoding='utf8',chunksize=chunksize)
recons = DataFrame.from_csv(path,**kwargs)
def _to_uni(x):
if not isinstance(x, compat.text_type):
return x.decode('utf8')
return x
if dupe_col:
# read_Csv disambiguates the columns by
# labeling them dupe.1,dupe.2, etc'. monkey patch columns
recons.columns = df.columns
if rnlvl and not cnlvl:
delta_lvl = [recons.icol(i).values for i in range(rnlvl-1)]
ix=MultiIndex.from_arrays([list(recons.index)]+delta_lvl)
recons.index = ix
recons = recons.iloc[:,rnlvl-1:]
type_map = dict(i='i',f='f',s='O',u='O',dt='O',p='O')
if r_dtype:
if r_dtype == 'u': # unicode
r_dtype='O'
recons.index = np.array(lmap(_to_uni,recons.index),
dtype=r_dtype)
df.index = np.array(lmap(_to_uni,df.index),dtype=r_dtype)
elif r_dtype == 'dt': # unicode
r_dtype='O'
recons.index = np.array(lmap(Timestamp,recons.index),
dtype=r_dtype)
df.index = np.array(lmap(Timestamp,df.index),dtype=r_dtype)
elif r_dtype == 'p':
r_dtype='O'
recons.index = np.array(list(map(Timestamp,
recons.index.to_datetime())),
dtype=r_dtype)
df.index = np.array(list(map(Timestamp,
df.index.to_datetime())),
dtype=r_dtype)
else:
r_dtype= type_map.get(r_dtype)
recons.index = np.array(recons.index,dtype=r_dtype )
df.index = np.array(df.index,dtype=r_dtype )
if c_dtype:
if c_dtype == 'u':
c_dtype='O'
recons.columns = np.array(lmap(_to_uni,recons.columns),
dtype=c_dtype)
df.columns = np.array(lmap(_to_uni,df.columns),dtype=c_dtype )
elif c_dtype == 'dt':
c_dtype='O'
recons.columns = np.array(lmap(Timestamp,recons.columns),
dtype=c_dtype )
df.columns = np.array(lmap(Timestamp,df.columns),dtype=c_dtype)
elif c_dtype == 'p':
c_dtype='O'
recons.columns = np.array(lmap(Timestamp,recons.columns.to_datetime()),
dtype=c_dtype)
df.columns = np.array(lmap(Timestamp,df.columns.to_datetime()),dtype=c_dtype )
else:
c_dtype= type_map.get(c_dtype)
recons.columns = np.array(recons.columns,dtype=c_dtype )
df.columns = np.array(df.columns,dtype=c_dtype )
assert_frame_equal(df,recons,check_names=False,check_less_precise=True)
N = 100
chunksize=1000
# GH3437
from pandas import NaT
def make_dtnat_arr(n,nnat=None):
if nnat is None:
nnat= int(n*0.1) # 10%
s=list(date_range('2000',freq='5min',periods=n))
if nnat:
for i in np.random.randint(0,len(s),nnat):
s[i] = NaT
i = np.random.randint(100)
s[-i] = NaT
s[i] = NaT
return s
# N=35000
s1=make_dtnat_arr(chunksize+5)
s2=make_dtnat_arr(chunksize+5,0)
path = '1.csv'
# s3=make_dtnjat_arr(chunksize+5,0)
with ensure_clean('.csv') as pth:
df=DataFrame(dict(a=s1,b=s2))
df.to_csv(pth,chunksize=chunksize)
recons = DataFrame.from_csv(pth).convert_objects('coerce')
assert_frame_equal(df, recons,check_names=False,check_less_precise=True)
for ncols in [4]:
base = int((chunksize// ncols or 1) or 1)
for nrows in [2,10,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,
base-1,base,base+1]:
_do_test(mkdf(nrows, ncols,r_idx_type='dt',
c_idx_type='s'),path, 'dt','s')
for ncols in [4]:
base = int((chunksize// ncols or 1) or 1)
for nrows in [2,10,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,
base-1,base,base+1]:
_do_test(mkdf(nrows, ncols,r_idx_type='dt',
c_idx_type='s'),path, 'dt','s')
pass
for r_idx_type,c_idx_type in [('i','i'),('s','s'),('u','dt'),('p','p')]:
for ncols in [1,2,3,4]:
base = int((chunksize// ncols or 1) or 1)
for nrows in [2,10,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,
base-1,base,base+1]:
_do_test(mkdf(nrows, ncols,r_idx_type=r_idx_type,
c_idx_type=c_idx_type),path,r_idx_type,c_idx_type)
for ncols in [1,2,3,4]:
base = int((chunksize// ncols or 1) or 1)
for nrows in [10,N-2,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,
base-1,base,base+1]:
_do_test(mkdf(nrows, ncols),path)
for nrows in [10,N-2,N-1,N,N+1,N+2]:
df = mkdf(nrows, 3)
cols = list(df.columns)
cols[:2] = ["dupe","dupe"]
cols[-2:] = ["dupe","dupe"]
ix = list(df.index)
ix[:2] = ["rdupe","rdupe"]
ix[-2:] = ["rdupe","rdupe"]
df.index=ix
df.columns=cols
_do_test(df,path,dupe_col=True)
_do_test(DataFrame(index=lrange(10)),path)
_do_test(mkdf(chunksize//2+1, 2,r_idx_nlevels=2),path,rnlvl=2)
for ncols in [2,3,4]:
base = int(chunksize//ncols)
for nrows in [10,N-2,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,
base-1,base,base+1]:
_do_test(mkdf(nrows, ncols,r_idx_nlevels=2),path,rnlvl=2)
_do_test(mkdf(nrows, ncols,c_idx_nlevels=2),path,cnlvl=2)
_do_test(mkdf(nrows, ncols,r_idx_nlevels=2,c_idx_nlevels=2),
path,rnlvl=2,cnlvl=2)
def test_to_csv_from_csv_w_some_infs(self):
# test roundtrip with inf, -inf, nan, as full columns and mix
self.frame['G'] = np.nan
f = lambda x: [np.inf, np.nan][np.random.rand() < .5]
self.frame['H'] = self.frame.index.map(f)
with ensure_clean() as path:
self.frame.to_csv(path)
recons = DataFrame.from_csv(path)
assert_frame_equal(self.frame, recons, check_names=False) # TODO to_csv drops column name
assert_frame_equal(np.isinf(self.frame), np.isinf(recons), check_names=False)
def test_to_csv_from_csv_w_all_infs(self):
# test roundtrip with inf, -inf, nan, as full columns and mix
self.frame['E'] = np.inf
self.frame['F'] = -np.inf
with ensure_clean() as path:
self.frame.to_csv(path)
recons = DataFrame.from_csv(path)
assert_frame_equal(self.frame, recons, check_names=False) # TODO to_csv drops column name
assert_frame_equal(np.isinf(self.frame), np.isinf(recons), check_names=False)
def test_to_csv_no_index(self):
# GH 3624, after appending columns, to_csv fails
pname = '__tmp_to_csv_no_index__'
with ensure_clean(pname) as path:
df = DataFrame({'c1':[1,2,3], 'c2':[4,5,6]})
df.to_csv(path, index=False)
result = read_csv(path)
assert_frame_equal(df,result)
df['c3'] = Series([7,8,9],dtype='int64')
df.to_csv(path, index=False)
result = read_csv(path)
assert_frame_equal(df,result)
def test_to_csv_headers(self):
# GH6186, the presence or absence of `index` incorrectly
# causes to_csv to have different header semantics.
pname = '__tmp_to_csv_headers__'
from_df = DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
to_df = DataFrame([[1, 2], [3, 4]], columns=['X', 'Y'])
with ensure_clean(pname) as path:
from_df.to_csv(path, header=['X', 'Y'])
recons = DataFrame.from_csv(path)
assert_frame_equal(to_df, recons)
from_df.to_csv(path, index=False, header=['X', 'Y'])
recons = DataFrame.from_csv(path)
recons.reset_index(inplace=True)
assert_frame_equal(to_df, recons)
def test_to_csv_multiindex(self):
pname = '__tmp_to_csv_multiindex__'
frame = self.frame
old_index = frame.index
arrays = np.arange(len(old_index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays, names=['first', 'second'])
frame.index = new_index
with ensure_clean(pname) as path:
frame.to_csv(path, header=False)
frame.to_csv(path, columns=['A', 'B'])
# round trip
frame.to_csv(path)
df = DataFrame.from_csv(path, index_col=[0, 1], parse_dates=False)
assert_frame_equal(frame, df, check_names=False) # TODO to_csv drops column name
self.assertEqual(frame.index.names, df.index.names)
self.frame.index = old_index # needed if setUP becomes a classmethod
# try multiindex with dates
tsframe = self.tsframe
old_index = tsframe.index
new_index = [old_index, np.arange(len(old_index))]
tsframe.index = MultiIndex.from_arrays(new_index)
tsframe.to_csv(path, index_label=['time', 'foo'])
recons = DataFrame.from_csv(path, index_col=[0, 1])
assert_frame_equal(tsframe, recons, check_names=False) # TODO to_csv drops column name
# do not load index
tsframe.to_csv(path)
recons = DataFrame.from_csv(path, index_col=None)
np.testing.assert_equal(len(recons.columns), len(tsframe.columns) + 2)
# no index
tsframe.to_csv(path, index=False)
recons = DataFrame.from_csv(path, index_col=None)
assert_almost_equal(recons.values, self.tsframe.values)
self.tsframe.index = old_index # needed if setUP becomes classmethod
with ensure_clean(pname) as path:
# GH3571, GH1651, GH3141
def _make_frame(names=None):
if names is True:
names = ['first','second']
return DataFrame(np.random.randint(0,10,size=(3,3)),
columns=MultiIndex.from_tuples([('bah', 'foo'),
('bah', 'bar'),
('ban', 'baz')],
names=names),
dtype='int64')
# column & index are multi-index
df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
df.to_csv(path,tupleize_cols=False)
result = read_csv(path,header=[0,1,2,3],index_col=[0,1],tupleize_cols=False)
assert_frame_equal(df,result)
# column is mi
df = mkdf(5,3,r_idx_nlevels=1,c_idx_nlevels=4)
df.to_csv(path,tupleize_cols=False)
result = read_csv(path,header=[0,1,2,3],index_col=0,tupleize_cols=False)
assert_frame_equal(df,result)
# dup column names?
df = mkdf(5,3,r_idx_nlevels=3,c_idx_nlevels=4)
df.to_csv(path,tupleize_cols=False)
result = read_csv(path,header=[0,1,2,3],index_col=[0,1,2],tupleize_cols=False)
assert_frame_equal(df,result)
# writing with no index
df = _make_frame()
df.to_csv(path,tupleize_cols=False,index=False)
result = read_csv(path,header=[0,1],tupleize_cols=False)
assert_frame_equal(df,result)
# we lose the names here
df = _make_frame(True)
df.to_csv(path,tupleize_cols=False,index=False)
result = read_csv(path,header=[0,1],tupleize_cols=False)
self.assertTrue(all([ x is None for x in result.columns.names ]))
result.columns.names = df.columns.names
assert_frame_equal(df,result)
# tupleize_cols=True and index=False
df = _make_frame(True)
df.to_csv(path,tupleize_cols=True,index=False)
result = read_csv(path,header=0,tupleize_cols=True,index_col=None)
result.columns = df.columns
assert_frame_equal(df,result)
# whatsnew example
df = _make_frame()
df.to_csv(path,tupleize_cols=False)
result = read_csv(path,header=[0,1],index_col=[0],tupleize_cols=False)
assert_frame_equal(df,result)
df = _make_frame(True)
df.to_csv(path,tupleize_cols=False)
result = read_csv(path,header=[0,1],index_col=[0],tupleize_cols=False)
assert_frame_equal(df,result)
# column & index are multi-index (compatibility)
df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
df.to_csv(path,tupleize_cols=True)
result = read_csv(path,header=0,index_col=[0,1],tupleize_cols=True)
result.columns = df.columns
assert_frame_equal(df,result)
# invalid options
df = _make_frame(True)
df.to_csv(path,tupleize_cols=False)
# catch invalid headers
with assertRaisesRegexp(CParserError, 'Passed header=\[0,1,2\] are too many rows for this multi_index of columns'):
read_csv(path,tupleize_cols=False,header=lrange(3),index_col=0)
with assertRaisesRegexp(CParserError, 'Passed header=\[0,1,2,3,4,5,6\], len of 7, but only 6 lines in file'):
read_csv(path,tupleize_cols=False,header=lrange(7),index_col=0)
for i in [4,5,6]:
with tm.assertRaises(CParserError):
read_csv(path, tupleize_cols=False, header=lrange(i), index_col=0)
# write with cols
with assertRaisesRegexp(TypeError, 'cannot specify cols with a MultiIndex'):
df.to_csv(path, tupleize_cols=False, columns=['foo', 'bar'])
with ensure_clean(pname) as path:
# empty
tsframe[:0].to_csv(path)
recons = DataFrame.from_csv(path)
exp = tsframe[:0]
exp.index = []
self.assertTrue(recons.columns.equals(exp.columns))
self.assertEqual(len(recons), 0)
def test_to_csv_float32_nanrep(self):
df = DataFrame(np.random.randn(1, 4).astype(np.float32))
df[1] = np.nan
with ensure_clean('__tmp_to_csv_float32_nanrep__.csv') as path:
df.to_csv(path, na_rep=999)
with open(path) as f:
lines = f.readlines()
self.assertEqual(lines[1].split(',')[2], '999')
def test_to_csv_withcommas(self):
# Commas inside fields should be correctly escaped when saving as CSV.
df = DataFrame({'A': [1, 2, 3], 'B': ['5,6', '7,8', '9,0']})
with ensure_clean('__tmp_to_csv_withcommas__.csv') as path:
df.to_csv(path)
df2 = DataFrame.from_csv(path)
assert_frame_equal(df2, df)
def test_to_csv_mixed(self):
def create_cols(name):
return [ "%s%03d" % (name,i) for i in range(5) ]
df_float = DataFrame(np.random.randn(100, 5),dtype='float64',columns=create_cols('float'))
df_int = DataFrame(np.random.randn(100, 5),dtype='int64',columns=create_cols('int'))
df_bool = DataFrame(True,index=df_float.index,columns=create_cols('bool'))
df_object = DataFrame('foo',index=df_float.index,columns=create_cols('object'))
df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=create_cols('date'))
# add in some nans
df_float.ix[30:50,1:3] = np.nan
#### this is a bug in read_csv right now ####
#df_dt.ix[30:50,1:3] = np.nan
df = pd.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1)
# dtype
dtypes = dict()
for n,dtype in [('float',np.float64),('int',np.int64),('bool',np.bool),('object',np.object)]:
for c in create_cols(n):
dtypes[c] = dtype
with ensure_clean() as filename:
df.to_csv(filename)
rs = read_csv(filename, index_col=0, dtype=dtypes, parse_dates=create_cols('date'))
assert_frame_equal(rs, df)
def test_to_csv_dups_cols(self):
df = DataFrame(np.random.randn(1000, 30),columns=lrange(15)+lrange(15),dtype='float64')
with ensure_clean() as filename:
df.to_csv(filename) # single dtype, fine
result = read_csv(filename,index_col=0)
result.columns = df.columns
assert_frame_equal(result,df)
df_float = DataFrame(np.random.randn(1000, 3),dtype='float64')
df_int = DataFrame(np.random.randn(1000, 3),dtype='int64')
df_bool = DataFrame(True,index=df_float.index,columns=lrange(3))
df_object = DataFrame('foo',index=df_float.index,columns=lrange(3))
df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=lrange(3))
df = pd.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1, ignore_index=True)
cols = []
for i in range(5):
cols.extend([0,1,2])
df.columns = cols
from pandas import to_datetime
with ensure_clean() as filename:
df.to_csv(filename)
result = read_csv(filename,index_col=0)
# date cols
for i in ['0.4','1.4','2.4']:
result[i] = to_datetime(result[i])
result.columns = df.columns
assert_frame_equal(result,df)
# GH3457
from pandas.util.testing import makeCustomDataframe as mkdf
N=10
df= mkdf(N, 3)
df.columns = ['a','a','b']
with ensure_clean() as filename:
df.to_csv(filename)
# read_csv will rename the dups columns
result = read_csv(filename,index_col=0)
result = result.rename(columns={ 'a.1' : 'a' })
assert_frame_equal(result,df)
def test_to_csv_chunking(self):
aa=DataFrame({'A':lrange(100000)})
aa['B'] = aa.A + 1.0
aa['C'] = aa.A + 2.0
aa['D'] = aa.A + 3.0
for chunksize in [10000,50000,100000]:
with ensure_clean() as filename:
aa.to_csv(filename,chunksize=chunksize)
rs = read_csv(filename,index_col=0)
assert_frame_equal(rs, aa)
def test_to_csv_wide_frame_formatting(self):
# Issue #8621
df = DataFrame(np.random.randn(1, 100010), columns=None, index=None)
with ensure_clean() as filename:
df.to_csv(filename, header=False, index=False)
rs = read_csv(filename, header=None)
assert_frame_equal(rs, df)
def test_to_csv_bug(self):
f1 = StringIO('a,1.0\nb,2.0')
df = DataFrame.from_csv(f1, header=None)
newdf = DataFrame({'t': df[df.columns[0]]})
with ensure_clean() as path:
newdf.to_csv(path)
recons = read_csv(path, index_col=0)
assert_frame_equal(recons, newdf, check_names=False) # don't check_names as t != 1
def test_to_csv_unicode(self):
df = DataFrame({u('c/\u03c3'): [1, 2, 3]})
with ensure_clean() as path:
df.to_csv(path, encoding='UTF-8')
df2 = read_csv(path, index_col=0, encoding='UTF-8')
assert_frame_equal(df, df2)
df.to_csv(path, encoding='UTF-8', index=False)
df2 = read_csv(path, index_col=None, encoding='UTF-8')
assert_frame_equal(df, df2)
def test_to_csv_unicode_index_col(self):
buf = StringIO('')
df = DataFrame(
[[u("\u05d0"), "d2", "d3", "d4"], ["a1", "a2", "a3", "a4"]],
columns=[u("\u05d0"),
u("\u05d1"), u("\u05d2"), u("\u05d3")],
index=[u("\u05d0"), u("\u05d1")])
df.to_csv(buf, encoding='UTF-8')
buf.seek(0)
df2 = read_csv(buf, index_col=0, encoding='UTF-8')
assert_frame_equal(df, df2)
def test_to_csv_stringio(self):
buf = StringIO()
self.frame.to_csv(buf)
buf.seek(0)
recons = read_csv(buf, index_col=0)
assert_frame_equal(recons, self.frame, check_names=False) # TODO to_csv drops column name
def test_to_csv_float_format(self):
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
df.to_csv(filename, float_format='%.2f')
rs = read_csv(filename, index_col=0)
xp = DataFrame([[0.12, 0.23, 0.57],
[12.32, 123123.20, 321321.20]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
assert_frame_equal(rs, xp)
def test_to_csv_quoting(self):
df = DataFrame({'A': [1, 2, 3], 'B': ['foo', 'bar', 'baz']})
buf = StringIO()
df.to_csv(buf, index=False, quoting=csv.QUOTE_NONNUMERIC)
result = buf.getvalue()
expected = ('"A","B"\n'
'1,"foo"\n'
'2,"bar"\n'
'3,"baz"\n')
self.assertEqual(result, expected)
# quoting windows line terminators, presents with encoding?
# #3503
text = 'a,b,c\n1,"test \r\n",3\n'
df = pd.read_csv(StringIO(text))
buf = StringIO()
df.to_csv(buf, encoding='utf-8', index=False)
self.assertEqual(buf.getvalue(), text)
def test_to_csv_unicodewriter_quoting(self):
df = DataFrame({'A': [1, 2, 3], 'B': ['foo', 'bar', 'baz']})
buf = StringIO()
df.to_csv(buf, index=False, quoting=csv.QUOTE_NONNUMERIC,
encoding='utf-8')
result = buf.getvalue()
expected = ('"A","B"\n'
'1,"foo"\n'
'2,"bar"\n'
'3,"baz"\n')
self.assertEqual(result, expected)
def test_to_csv_quote_none(self):
# GH4328
df = DataFrame({'A': ['hello', '{"hello"}']})
for encoding in (None, 'utf-8'):
buf = StringIO()
df.to_csv(buf, quoting=csv.QUOTE_NONE,
encoding=encoding, index=False)
result = buf.getvalue()
expected = 'A\nhello\n{"hello"}\n'
self.assertEqual(result, expected)
def test_to_csv_index_no_leading_comma(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
index=['one', 'two', 'three'])
buf = StringIO()
df.to_csv(buf, index_label=False)
expected = ('A,B\n'
'one,1,4\n'
'two,2,5\n'
'three,3,6\n')
self.assertEqual(buf.getvalue(), expected)
def test_to_csv_line_terminators(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
index=['one', 'two', 'three'])
buf = StringIO()
df.to_csv(buf, line_terminator='\r\n')
expected = (',A,B\r\n'
'one,1,4\r\n'
'two,2,5\r\n'
'three,3,6\r\n')
self.assertEqual(buf.getvalue(), expected)
buf = StringIO()
df.to_csv(buf) # The default line terminator remains \n
expected = (',A,B\n'
'one,1,4\n'
'two,2,5\n'
'three,3,6\n')
self.assertEqual(buf.getvalue(), expected)
def test_to_csv_from_csv_categorical(self):
# CSV with categoricals should result in the same output as when one would add a "normal"
# Series/DataFrame.
s = Series(pd.Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']))
s2 = Series(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
res = StringIO()
s.to_csv(res)
exp = StringIO()
s2.to_csv(exp)
self.assertEqual(res.getvalue(), exp.getvalue())
df = DataFrame({"s":s})
df2 = DataFrame({"s":s2})
res = StringIO()
df.to_csv(res)
exp = StringIO()
df2.to_csv(exp)
self.assertEqual(res.getvalue(), exp.getvalue())
def test_to_csv_path_is_none(self):
# GH 8215
# Make sure we return string for consistency with
# Series.to_csv()
csv_str = self.frame.to_csv(path=None)
self.assertIsInstance(csv_str, str)
recons = pd.read_csv(StringIO(csv_str), index_col=0)
assert_frame_equal(self.frame, recons)
def test_info(self):
io = StringIO()
self.frame.info(buf=io)
self.tsframe.info(buf=io)
frame = DataFrame(np.random.randn(5, 3))
import sys
sys.stdout = StringIO()
frame.info()
frame.info(verbose=False)
sys.stdout = sys.__stdout__
def test_info_wide(self):
from pandas import set_option, reset_option
io = StringIO()
df = DataFrame(np.random.randn(5, 101))
df.info(buf=io)
io = StringIO()
df.info(buf=io, max_cols=101)
rs = io.getvalue()
self.assertTrue(len(rs.splitlines()) > 100)
xp = rs
set_option('display.max_info_columns', 101)
io = StringIO()
df.info(buf=io)
self.assertEqual(rs, xp)
reset_option('display.max_info_columns')
def test_info_duplicate_columns(self):
io = StringIO()
# it works!
frame = DataFrame(np.random.randn(1500, 4),
columns=['a', 'a', 'b', 'b'])
frame.info(buf=io)
def test_info_shows_column_dtypes(self):
dtypes = ['int64', 'float64', 'datetime64[ns]', 'timedelta64[ns]',
'complex128', 'object', 'bool']
data = {}
n = 10
for i, dtype in enumerate(dtypes):
data[i] = np.random.randint(2, size=n).astype(dtype)
df = DataFrame(data)
buf = StringIO()
df.info(buf=buf)
res = buf.getvalue()
for i, dtype in enumerate(dtypes):
name = '%d %d non-null %s' % (i, n, dtype)
assert name in res
def test_info_max_cols(self):
df = DataFrame(np.random.randn(10, 5))
for len_, verbose in [(5, None), (5, False), (10, True)]:
# For verbose always ^ setting ^ summarize ^ full output
with option_context('max_info_columns', 4):
buf = StringIO()
df.info(buf=buf, verbose=verbose)
res = buf.getvalue()
self.assertEqual(len(res.strip().split('\n')), len_)
for len_, verbose in [(10, None), (5, False), (10, True)]:
# max_cols no exceeded
with option_context('max_info_columns', 5):
buf = StringIO()
df.info(buf=buf, verbose=verbose)
res = buf.getvalue()
self.assertEqual(len(res.strip().split('\n')), len_)
for len_, max_cols in [(10, 5), (5, 4)]:
# setting truncates
with option_context('max_info_columns', 4):
buf = StringIO()
df.info(buf=buf, max_cols=max_cols)
res = buf.getvalue()
self.assertEqual(len(res.strip().split('\n')), len_)
# setting wouldn't truncate
with option_context('max_info_columns', 5):
buf = StringIO()
df.info(buf=buf, max_cols=max_cols)
res = buf.getvalue()
self.assertEqual(len(res.strip().split('\n')), len_)
def test_info_memory_usage(self):
# Ensure memory usage is displayed, when asserted, on the last line
dtypes = ['int64', 'float64', 'datetime64[ns]', 'timedelta64[ns]',
'complex128', 'object', 'bool']
data = {}
n = 10
for i, dtype in enumerate(dtypes):
data[i] = np.random.randint(2, size=n).astype(dtype)
df = DataFrame(data)
buf = StringIO()
# display memory usage case
df.info(buf=buf, memory_usage=True)
res = buf.getvalue().splitlines()
self.assertTrue("memory usage: " in res[-1])
# do not display memory usage cas
df.info(buf=buf, memory_usage=False)
res = buf.getvalue().splitlines()
self.assertTrue("memory usage: " not in res[-1])
df.info(buf=buf, memory_usage=True)
res = buf.getvalue().splitlines()
# memory usage is a lower bound, so print it as XYZ+ MB
self.assertTrue(re.match(r"memory usage: [^+]+\+", res[-1]))
df.iloc[:, :5].info(buf=buf, memory_usage=True)
res = buf.getvalue().splitlines()
# excluded column with object dtype, so estimate is accurate
self.assertFalse(re.match(r"memory usage: [^+]+\+", res[-1]))
df_with_object_index = pd.DataFrame({'a': [1]}, index=['foo'])
df_with_object_index.info(buf=buf, memory_usage=True)
res = buf.getvalue().splitlines()
self.assertTrue(re.match(r"memory usage: [^+]+\+", res[-1]))
# Test a DataFrame with duplicate columns
dtypes = ['int64', 'int64', 'int64', 'float64']
data = {}
n = 100
for i, dtype in enumerate(dtypes):
data[i] = np.random.randint(2, size=n).astype(dtype)
df = DataFrame(data)
df.columns = dtypes
# Ensure df size is as expected
df_size = df.memory_usage().sum()
exp_size = len(dtypes) * n * 8 # cols * rows * bytes
self.assertEqual(df_size, exp_size)
# Ensure number of cols in memory_usage is the same as df
size_df = np.size(df.columns.values) # index=False; default
self.assertEqual(size_df, np.size(df.memory_usage()))
# test for validity
DataFrame(1,index=['a'],columns=['A']).memory_usage(index=True)
DataFrame(1,index=['a'],columns=['A']).index.nbytes
DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']).index.nbytes
DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']).index.values.nbytes
DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']).memory_usage(index=True)
DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']).index.nbytes
DataFrame(1,index=pd.MultiIndex.from_product([['a'],range(1000)]),columns=['A']).index.values.nbytes
def test_dtypes(self):
self.mixed_frame['bool'] = self.mixed_frame['A'] > 0
result = self.mixed_frame.dtypes
expected = Series(dict((k, v.dtype)
for k, v in compat.iteritems(self.mixed_frame)),
index=result.index)
assert_series_equal(result, expected)
# compat, GH 8722
with option_context('use_inf_as_null',True):
df = DataFrame([[1]])
result = df.dtypes
assert_series_equal(result,Series({0:np.dtype('int64')}))
def test_convert_objects(self):
oops = self.mixed_frame.T.T
converted = oops.convert_objects()
assert_frame_equal(converted, self.mixed_frame)
self.assertEqual(converted['A'].dtype, np.float64)
# force numeric conversion
self.mixed_frame['H'] = '1.'
self.mixed_frame['I'] = '1'
# add in some items that will be nan
l = len(self.mixed_frame)
self.mixed_frame['J'] = '1.'
self.mixed_frame['K'] = '1'
self.mixed_frame.ix[0:5,['J','K']] = 'garbled'
converted = self.mixed_frame.convert_objects(convert_numeric=True)
self.assertEqual(converted['H'].dtype, 'float64')
self.assertEqual(converted['I'].dtype, 'int64')
self.assertEqual(converted['J'].dtype, 'float64')
self.assertEqual(converted['K'].dtype, 'float64')
self.assertEqual(len(converted['J'].dropna()), l-5)
self.assertEqual(len(converted['K'].dropna()), l-5)
# via astype
converted = self.mixed_frame.copy()
converted['H'] = converted['H'].astype('float64')
converted['I'] = converted['I'].astype('int64')
self.assertEqual(converted['H'].dtype, 'float64')
self.assertEqual(converted['I'].dtype, 'int64')
# via astype, but errors
converted = self.mixed_frame.copy()
with assertRaisesRegexp(ValueError, 'invalid literal'):
converted['H'].astype('int32')
# mixed in a single column
df = DataFrame(dict(s = Series([1, 'na', 3 ,4])))
result = df.convert_objects(convert_numeric=True)
expected = DataFrame(dict(s = Series([1, np.nan, 3 ,4])))
assert_frame_equal(result, expected)
def test_convert_objects_no_conversion(self):
mixed1 = DataFrame(
{'a': [1, 2, 3], 'b': [4.0, 5, 6], 'c': ['x', 'y', 'z']})
mixed2 = mixed1.convert_objects()
assert_frame_equal(mixed1, mixed2)
def test_append_series_dict(self):
df = DataFrame(np.random.randn(5, 4),
columns=['foo', 'bar', 'baz', 'qux'])
series = df.ix[4]
with assertRaisesRegexp(ValueError, 'Indexes have overlapping values'):
df.append(series, verify_integrity=True)
series.name = None
with assertRaisesRegexp(TypeError, 'Can only append a Series if '
'ignore_index=True'):
df.append(series, verify_integrity=True)
result = df.append(series[::-1], ignore_index=True)
expected = df.append(DataFrame({0: series[::-1]}, index=df.columns).T,
ignore_index=True)
assert_frame_equal(result, expected)
# dict
result = df.append(series.to_dict(), ignore_index=True)
assert_frame_equal(result, expected)
result = df.append(series[::-1][:3], ignore_index=True)
expected = df.append(DataFrame({0: series[::-1][:3]}).T,
ignore_index=True)
assert_frame_equal(result, expected.ix[:, result.columns])
# can append when name set
row = df.ix[4]
row.name = 5
result = df.append(row)
expected = df.append(df[-1:], ignore_index=True)
assert_frame_equal(result, expected)
def test_append_list_of_series_dicts(self):
df = DataFrame(np.random.randn(5, 4),
columns=['foo', 'bar', 'baz', 'qux'])
dicts = [x.to_dict() for idx, x in df.iterrows()]
result = df.append(dicts, ignore_index=True)
expected = df.append(df, ignore_index=True)
assert_frame_equal(result, expected)
# different columns
dicts = [{'foo': 1, 'bar': 2, 'baz': 3, 'peekaboo': 4},
{'foo': 5, 'bar': 6, 'baz': 7, 'peekaboo': 8}]
result = df.append(dicts, ignore_index=True)
expected = df.append(DataFrame(dicts), ignore_index=True)
assert_frame_equal(result, expected)
def test_append_empty_dataframe(self):
# Empty df append empty df
df1 = DataFrame([])
df2 = DataFrame([])
result = df1.append(df2)
expected = df1.copy()
assert_frame_equal(result, expected)
# Non-empty df append empty df
df1 = DataFrame(np.random.randn(5, 2))
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
assert_frame_equal(result, expected)
# Empty df with columns append empty df
df1 = DataFrame(columns=['bar', 'foo'])
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
assert_frame_equal(result, expected)
# Non-Empty df with columns append empty df
df1 = DataFrame(np.random.randn(5, 2), columns=['bar', 'foo'])
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
assert_frame_equal(result, expected)
def test_append_dtypes(self):
# GH 5754
# row appends of different dtypes (so need to do by-item)
# can sometimes infer the correct type
df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(5))
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
assert_frame_equal(result, expected)
df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))
df2 = DataFrame({ 'bar' : 'foo' }, index=lrange(1,2))
result = df1.append(df2)
expected = DataFrame({ 'bar' : [ Timestamp('20130101'), 'foo' ]})
assert_frame_equal(result, expected)
df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))
df2 = DataFrame({ 'bar' : np.nan }, index=lrange(1,2))
result = df1.append(df2)
expected = DataFrame({ 'bar' : Series([ Timestamp('20130101'), np.nan ],dtype='M8[ns]') })
assert_frame_equal(result, expected)
df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))
df2 = DataFrame({ 'bar' : np.nan }, index=lrange(1,2), dtype=object)
result = df1.append(df2)
expected = DataFrame({ 'bar' : Series([ Timestamp('20130101'), np.nan ],dtype='M8[ns]') })
assert_frame_equal(result, expected)
df1 = DataFrame({ 'bar' : np.nan }, index=lrange(1))
df2 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1,2))
result = df1.append(df2)
expected = DataFrame({ 'bar' : Series([ np.nan, Timestamp('20130101')] ,dtype='M8[ns]') })
assert_frame_equal(result, expected)
df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))
df2 = DataFrame({ 'bar' : 1 }, index=lrange(1,2), dtype=object)
result = df1.append(df2)
expected = DataFrame({ 'bar' : Series([ Timestamp('20130101'), 1 ]) })
assert_frame_equal(result, expected)
def test_asfreq(self):
offset_monthly = self.tsframe.asfreq(datetools.bmonthEnd)
rule_monthly = self.tsframe.asfreq('BM')
assert_almost_equal(offset_monthly['A'], rule_monthly['A'])
filled = rule_monthly.asfreq('B', method='pad')
# TODO: actually check that this worked.
# don't forget!
filled_dep = rule_monthly.asfreq('B', method='pad')
# test does not blow up on length-0 DataFrame
zero_length = self.tsframe.reindex([])
result = zero_length.asfreq('BM')
self.assertIsNot(result, zero_length)
def test_asfreq_datetimeindex(self):
df = DataFrame({'A': [1, 2, 3]},
index=[datetime(2011, 11, 1), datetime(2011, 11, 2),
datetime(2011, 11, 3)])
df = df.asfreq('B')
tm.assert_isinstance(df.index, DatetimeIndex)
ts = df['A'].asfreq('B')
tm.assert_isinstance(ts.index, DatetimeIndex)
def test_at_time_between_time_datetimeindex(self):
index = date_range("2012-01-01", "2012-01-05", freq='30min')
df = DataFrame(randn(len(index), 5), index=index)
akey = time(12, 0, 0)
bkey = slice(time(13, 0, 0), time(14, 0, 0))
ainds = [24, 72, 120, 168]
binds = [26, 27, 28, 74, 75, 76, 122, 123, 124, 170, 171, 172]
result = df.at_time(akey)
expected = df.ix[akey]
expected2 = df.ix[ainds]
assert_frame_equal(result, expected)
assert_frame_equal(result, expected2)
self.assertEqual(len(result), 4)
result = df.between_time(bkey.start, bkey.stop)
expected = df.ix[bkey]
expected2 = df.ix[binds]
assert_frame_equal(result, expected)
assert_frame_equal(result, expected2)
self.assertEqual(len(result), 12)
result = df.copy()
result.ix[akey] = 0
result = result.ix[akey]
expected = df.ix[akey].copy()
expected.ix[:] = 0
assert_frame_equal(result, expected)
result = df.copy()
result.ix[akey] = 0
result.ix[akey] = df.ix[ainds]
assert_frame_equal(result, df)
result = df.copy()
result.ix[bkey] = 0
result = result.ix[bkey]
expected = df.ix[bkey].copy()
expected.ix[:] = 0
assert_frame_equal(result, expected)
result = df.copy()
result.ix[bkey] = 0
result.ix[bkey] = df.ix[binds]
assert_frame_equal(result, df)
def test_as_matrix(self):
frame = self.frame
mat = frame.as_matrix()
frameCols = frame.columns
for i, row in enumerate(mat):
for j, value in enumerate(row):
col = frameCols[j]
if np.isnan(value):
self.assertTrue(np.isnan(frame[col][i]))
else:
self.assertEqual(value, frame[col][i])
# mixed type
mat = self.mixed_frame.as_matrix(['foo', 'A'])
self.assertEqual(mat[0, 0], 'bar')
df = DataFrame({'real': [1, 2, 3], 'complex': [1j, 2j, 3j]})
mat = df.as_matrix()
self.assertEqual(mat[0, 0], 1j)
# single block corner case
mat = self.frame.as_matrix(['A', 'B'])
expected = self.frame.reindex(columns=['A', 'B']).values
assert_almost_equal(mat, expected)
def test_as_matrix_duplicates(self):
df = DataFrame([[1, 2, 'a', 'b'],
[1, 2, 'a', 'b']],
columns=['one', 'one', 'two', 'two'])
result = df.values
expected = np.array([[1, 2, 'a', 'b'], [1, 2, 'a', 'b']],
dtype=object)
self.assertTrue(np.array_equal(result, expected))
def test_ftypes(self):
frame = self.mixed_float
expected = Series(dict(A = 'float32:dense', B = 'float32:dense', C = 'float16:dense', D = 'float64:dense'))
expected.sort()
result = frame.ftypes
result.sort()
assert_series_equal(result,expected)
def test_values(self):
self.frame.values[:, 0] = 5.
self.assertTrue((self.frame.values[:, 0] == 5).all())
def test_deepcopy(self):
cp = deepcopy(self.frame)
series = cp['A']
series[:] = 10
for idx, value in compat.iteritems(series):
self.assertNotEqual(self.frame['A'][idx], value)
def test_copy(self):
cop = self.frame.copy()
cop['E'] = cop['A']
self.assertNotIn('E', self.frame)
# copy objects
copy = self.mixed_frame.copy()
self.assertIsNot(copy._data, self.mixed_frame._data)
def _check_method(self, method='pearson', check_minp=False):
if not check_minp:
correls = self.frame.corr(method=method)
exp = self.frame['A'].corr(self.frame['C'], method=method)
assert_almost_equal(correls['A']['C'], exp)
else:
result = self.frame.corr(min_periods=len(self.frame) - 8)
expected = self.frame.corr()
expected.ix['A', 'B'] = expected.ix['B', 'A'] = nan
def test_corr_pearson(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('pearson')
def test_corr_kendall(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('kendall')
def test_corr_spearman(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('spearman')
def test_corr_non_numeric(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
# exclude non-numeric types
result = self.mixed_frame.corr()
expected = self.mixed_frame.ix[:, ['A', 'B', 'C', 'D']].corr()
assert_frame_equal(result, expected)
def test_corr_nooverlap(self):
tm._skip_if_no_scipy()
# nothing in common
for meth in ['pearson', 'kendall', 'spearman']:
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1]})
rs = df.corr(meth)
self.assertTrue(isnull(rs.ix['A', 'B']))
self.assertTrue(isnull(rs.ix['B', 'A']))
self.assertEqual(rs.ix['A', 'A'], 1)
self.assertEqual(rs.ix['B', 'B'], 1)
def test_corr_constant(self):
tm._skip_if_no_scipy()
# constant --> all NA
for meth in ['pearson', 'spearman']:
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
self.assertTrue(isnull(rs.values).all())
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
# it works!
df3.cov()
df3.corr()
def test_cov(self):
# min_periods no NAs (corner case)
expected = self.frame.cov()
result = self.frame.cov(min_periods=len(self.frame))
assert_frame_equal(expected, result)
result = self.frame.cov(min_periods=len(self.frame) + 1)
self.assertTrue(isnull(result.values).all())
# with NAs
frame = self.frame.copy()
frame['A'][:5] = nan
frame['B'][5:10] = nan
result = self.frame.cov(min_periods=len(self.frame) - 8)
expected = self.frame.cov()
expected.ix['A', 'B'] = np.nan
expected.ix['B', 'A'] = np.nan
# regular
self.frame['A'][:5] = nan
self.frame['B'][:10] = nan
cov = self.frame.cov()
assert_almost_equal(cov['A']['C'],
self.frame['A'].cov(self.frame['C']))
# exclude non-numeric types
result = self.mixed_frame.cov()
expected = self.mixed_frame.ix[:, ['A', 'B', 'C', 'D']].cov()
assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0,1.0,10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1,1)),
index=df.columns,columns=df.columns)
assert_frame_equal(result, expected)
df.ix[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1,1)),
index=df.columns,columns=df.columns)
assert_frame_equal(result, expected)
def test_corrwith(self):
a = self.tsframe
noise = Series(randn(len(a)), index=a.index)
b = self.tsframe + noise
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
self.assertNotIn('B', dropped)
dropped = a.corrwith(b, axis=1, drop=True)
self.assertNotIn(a.index[-1], dropped.index)
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(randn(5, 4), index=index, columns=columns)
df2 = DataFrame(randn(4, 4), index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
assert_almost_equal(correls[row], df1.ix[row].corr(df2.ix[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.ix[:, cols].corrwith(df2.ix[:, cols])
assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.ix[:, cols].corrwith(df2.ix[:, cols], axis=1)
assert_series_equal(result, expected)
def test_corrwith_series(self):
result = self.tsframe.corrwith(self.tsframe['A'])
expected = self.tsframe.apply(self.tsframe['A'].corr)
assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000)**2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'],df2['a'])[0][1]
assert_almost_equal(c1, c2)
self.assertTrue(c1 < 1)
def test_drop_names(self):
df = DataFrame([[1, 2, 3],[3, 4, 5],[5, 6, 7]], index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
df.index.name, df.columns.name = 'first', 'second'
df_dropped_b = df.drop('b')
df_dropped_e = df.drop('e', axis=1)
df_inplace_b, df_inplace_e = df.copy(), df.copy()
df_inplace_b.drop('b', inplace=True)
df_inplace_e.drop('e', axis=1, inplace=True)
for obj in (df_dropped_b, df_dropped_e, df_inplace_b, df_inplace_e):
self.assertEqual(obj.index.name, 'first')
self.assertEqual(obj.columns.name, 'second')
self.assertEqual(list(df.columns), ['d', 'e', 'f'])
def test_dropEmptyRows(self):
N = len(self.frame.index)
mat = randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
original = Series(mat, index=self.frame.index)
expected = original.dropna()
inplace_frame1, inplace_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna(how='all')
# check that original was preserved
assert_series_equal(frame['foo'], original)
inplace_frame1.dropna(how='all', inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame1['foo'], expected)
smaller_frame = frame.dropna(how='all', subset=['foo'])
inplace_frame2.dropna(how='all', subset=['foo'], inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame2['foo'], expected)
def test_dropIncompleteRows(self):
N = len(self.frame.index)
mat = randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
frame['bar'] = 5
original = Series(mat, index=self.frame.index)
inp_frame1, inp_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna()
assert_series_equal(frame['foo'], original)
inp_frame1.dropna(inplace=True)
self.assert_numpy_array_equal(smaller_frame['foo'], mat[5:])
self.assert_numpy_array_equal(inp_frame1['foo'], mat[5:])
samesize_frame = frame.dropna(subset=['bar'])
assert_series_equal(frame['foo'], original)
self.assertTrue((frame['bar'] == 5).all())
inp_frame2.dropna(subset=['bar'], inplace=True)
self.assertTrue(samesize_frame.index.equals(self.frame.index))
self.assertTrue(inp_frame2.index.equals(self.frame.index))
def test_dropna(self):
df = DataFrame(np.random.randn(6, 4))
df[2][:2] = nan
dropped = df.dropna(axis=1)
expected = df.ix[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=0)
expected = df.ix[lrange(2, 6)]
inp = df.copy()
inp.dropna(axis=0, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
# threshold
dropped = df.dropna(axis=1, thresh=5)
expected = df.ix[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, thresh=5, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=0, thresh=4)
expected = df.ix[lrange(2, 6)]
inp = df.copy()
inp.dropna(axis=0, thresh=4, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=1, thresh=4)
assert_frame_equal(dropped, df)
dropped = df.dropna(axis=1, thresh=3)
assert_frame_equal(dropped, df)
# subset
dropped = df.dropna(axis=0, subset=[0, 1, 3])
inp = df.copy()
inp.dropna(axis=0, subset=[0, 1, 3], inplace=True)
assert_frame_equal(dropped, df)
assert_frame_equal(inp, df)
# all
dropped = df.dropna(axis=1, how='all')
assert_frame_equal(dropped, df)
df[2] = nan
dropped = df.dropna(axis=1, how='all')
expected = df.ix[:, [0, 1, 3]]
assert_frame_equal(dropped, expected)
# bad input
self.assertRaises(ValueError, df.dropna, axis=3)
def test_drop_and_dropna_caching(self):
# tst that cacher updates
original = Series([1, 2, np.nan])
expected = Series([1, 2], dtype=original.dtype)
df = pd.DataFrame({'A': original.values.copy()})
df2 = df.copy()
df['A'].dropna()
assert_series_equal(df['A'], original)
df['A'].dropna(inplace=True)
assert_series_equal(df['A'], expected)
df2['A'].drop([1])
assert_series_equal(df2['A'], original)
df2['A'].drop([1], inplace=True)
assert_series_equal(df2['A'], original.drop([1]))
def test_dropna_corner(self):
# bad input
self.assertRaises(ValueError, self.frame.dropna, how='foo')
self.assertRaises(TypeError, self.frame.dropna, how=None)
# non-existent column - 8303
self.assertRaises(KeyError, self.frame.dropna, subset=['A','X'])
def test_dropna_multiple_axes(self):
df = DataFrame([[1, np.nan, 2, 3],
[4, np.nan, 5, 6],
[np.nan, np.nan, np.nan, np.nan],
[7, np.nan, 8, 9]])
cp = df.copy()
result = df.dropna(how='all', axis=[0, 1])
result2 = df.dropna(how='all', axis=(0, 1))
expected = df.dropna(how='all').dropna(how='all', axis=1)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(df, cp)
inp = df.copy()
inp.dropna(how='all', axis=(0, 1), inplace=True)
assert_frame_equal(inp, expected)
def test_drop_duplicates(self):
df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates('AAA')
expected = df[:2]
assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', take_last=True)
expected = df.ix[[6, 7]]
assert_frame_equal(result, expected)
# multi column
expected = df.ix[[0, 1, 2, 3]]
result = df.drop_duplicates(np.array(['AAA', 'B']))
assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'])
assert_frame_equal(result, expected)
result = df.drop_duplicates(('AAA', 'B'), take_last=True)
expected = df.ix[[0, 5, 6, 7]]
assert_frame_equal(result, expected)
# consider everything
df2 = df.ix[:, ['AAA', 'B', 'C']]
result = df2.drop_duplicates()
# in this case only
expected = df2.drop_duplicates(['AAA', 'B'])
assert_frame_equal(result, expected)
result = df2.drop_duplicates(take_last=True)
expected = df2.drop_duplicates(['AAA', 'B'], take_last=True)
assert_frame_equal(result, expected)
def test_drop_duplicates_deprecated_warning(self):
df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
expected = df[:2]
# Raises warning
with tm.assert_produces_warning(False):
result = df.drop_duplicates(subset='AAA')
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates(cols='AAA')
assert_frame_equal(result, expected)
# Does not allow both subset and cols
self.assertRaises(TypeError, df.drop_duplicates,
kwargs={'cols': 'AAA', 'subset': 'B'})
# Does not allow unknown kwargs
self.assertRaises(TypeError, df.drop_duplicates,
kwargs={'subset': 'AAA', 'bad_arg': True})
def test_drop_duplicates_tuple(self):
df = DataFrame({('AA', 'AB'): ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates(('AA', 'AB'))
expected = df[:2]
assert_frame_equal(result, expected)
result = df.drop_duplicates(('AA', 'AB'), take_last=True)
expected = df.ix[[6, 7]]
assert_frame_equal(result, expected)
# multi column
expected = df.ix[[0, 1, 2, 3]]
result = df.drop_duplicates((('AA', 'AB'), 'B'))
assert_frame_equal(result, expected)
def test_drop_duplicates_NA(self):
# none
df = DataFrame({'A': [None, None, 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],
'D': lrange(8)})
# single column
result = df.drop_duplicates('A')
expected = df.ix[[0, 2, 3]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('A', take_last=True)
expected = df.ix[[1, 6, 7]]
assert_frame_equal(result, expected)
# multi column
result = df.drop_duplicates(['A', 'B'])
expected = df.ix[[0, 2, 3, 6]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['A', 'B'], take_last=True)
expected = df.ix[[1, 5, 6, 7]]
assert_frame_equal(result, expected)
# nan
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],
'D': lrange(8)})
# single column
result = df.drop_duplicates('C')
expected = df[:2]
assert_frame_equal(result, expected)
result = df.drop_duplicates('C', take_last=True)
expected = df.ix[[3, 7]]
assert_frame_equal(result, expected)
# multi column
result = df.drop_duplicates(['C', 'B'])
expected = df.ix[[0, 1, 2, 4]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['C', 'B'], take_last=True)
expected = df.ix[[1, 3, 6, 7]]
assert_frame_equal(result, expected)
def test_drop_duplicates_inplace(self):
orig = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
df = orig.copy()
df.drop_duplicates('A', inplace=True)
expected = orig[:2]
result = df
assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates('A', take_last=True, inplace=True)
expected = orig.ix[[6, 7]]
result = df
assert_frame_equal(result, expected)
# multi column
df = orig.copy()
df.drop_duplicates(['A', 'B'], inplace=True)
expected = orig.ix[[0, 1, 2, 3]]
result = df
assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates(['A', 'B'], take_last=True, inplace=True)
expected = orig.ix[[0, 5, 6, 7]]
result = df
assert_frame_equal(result, expected)
# consider everything
orig2 = orig.ix[:, ['A', 'B', 'C']].copy()
df2 = orig2.copy()
df2.drop_duplicates(inplace=True)
# in this case only
expected = orig2.drop_duplicates(['A', 'B'])
result = df2
assert_frame_equal(result, expected)
df2 = orig2.copy()
df2.drop_duplicates(take_last=True, inplace=True)
expected = orig2.drop_duplicates(['A', 'B'], take_last=True)
result = df2
assert_frame_equal(result, expected)
def test_duplicated_deprecated_warning(self):
df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# Raises warning
with tm.assert_produces_warning(False):
result = df.duplicated(subset='AAA')
with tm.assert_produces_warning(FutureWarning):
result = df.duplicated(cols='AAA')
# Does not allow both subset and cols
self.assertRaises(TypeError, df.duplicated,
kwargs={'cols': 'AAA', 'subset': 'B'})
# Does not allow unknown kwargs
self.assertRaises(TypeError, df.duplicated,
kwargs={'subset': 'AAA', 'bad_arg': True})
def test_drop_col_still_multiindex(self):
arrays = [['a', 'b', 'c', 'top'],
['', '', '', 'OD'],
['', '', '', 'wx']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(3, 4), columns=index)
del df[('a', '', '')]
assert(isinstance(df.columns, MultiIndex))
def test_drop(self):
simple = DataFrame({"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]})
assert_frame_equal(simple.drop("A", axis=1), simple[['B']])
assert_frame_equal(simple.drop(["A", "B"], axis='columns'),
simple[[]])
assert_frame_equal(simple.drop([0, 1, 3], axis=0), simple.ix[[2], :])
assert_frame_equal(simple.drop([0, 3], axis='index'), simple.ix[[1, 2], :])
#non-unique - wheee!
nu_df = DataFrame(lzip(range(3), range(-3, 1), list('abc')),
columns=['a', 'a', 'b'])
assert_frame_equal(nu_df.drop('a', axis=1), nu_df[['b']])
assert_frame_equal(nu_df.drop('b', axis='columns'), nu_df['a'])
nu_df = nu_df.set_index(pd.Index(['X', 'Y', 'X']))
nu_df.columns = list('abc')
assert_frame_equal(nu_df.drop('X', axis='rows'), nu_df.ix[["Y"], :])
assert_frame_equal(nu_df.drop(['X', 'Y'], axis=0), nu_df.ix[[], :])
# inplace cache issue
# GH 5628
df = pd.DataFrame(np.random.randn(10,3), columns=list('abc'))
expected = df[~(df.b>0)]
df.drop(labels=df[df.b>0].index, inplace=True)
assert_frame_equal(df,expected)
def test_fillna(self):
self.tsframe.ix[:5,'A'] = nan
self.tsframe.ix[-5:,'A'] = nan
zero_filled = self.tsframe.fillna(0)
self.assertTrue((zero_filled.ix[:5,'A'] == 0).all())
padded = self.tsframe.fillna(method='pad')
self.assertTrue(np.isnan(padded.ix[:5,'A']).all())
self.assertTrue((padded.ix[-5:,'A'] == padded.ix[-5,'A']).all())
# mixed type
self.mixed_frame.ix[5:20,'foo'] = nan
self.mixed_frame.ix[-10:,'A'] = nan
result = self.mixed_frame.fillna(value=0)
result = self.mixed_frame.fillna(method='pad')
self.assertRaises(ValueError, self.tsframe.fillna)
self.assertRaises(ValueError, self.tsframe.fillna, 5, method='ffill')
# mixed numeric (but no float16)
mf = self.mixed_float.reindex(columns=['A','B','D'])
mf.ix[-10:,'A'] = nan
result = mf.fillna(value=0)
_check_mixed_float(result, dtype = dict(C = None))
result = mf.fillna(method='pad')
_check_mixed_float(result, dtype = dict(C = None))
# empty frame (GH #2778)
df = DataFrame(columns=['x'])
for m in ['pad','backfill']:
df.x.fillna(method=m,inplace=1)
df.x.fillna(method=m)
# with different dtype (GH3386)
df = DataFrame([['a','a',np.nan,'a'],['b','b',np.nan,'b'],['c','c',np.nan,'c']])
result = df.fillna({ 2: 'foo' })
expected = DataFrame([['a','a','foo','a'],['b','b','foo','b'],['c','c','foo','c']])
assert_frame_equal(result, expected)
df.fillna({ 2: 'foo' }, inplace=True)
assert_frame_equal(df, expected)
# limit and value
df = DataFrame(np.random.randn(10,3))
df.iloc[2:7,0] = np.nan
df.iloc[3:5,2] = np.nan
expected = df.copy()
expected.iloc[2,0] = 999
expected.iloc[3,2] = 999
result = df.fillna(999,limit=1)
assert_frame_equal(result, expected)
# with datelike
# GH 6344
df = DataFrame({
'Date':[pd.NaT, Timestamp("2014-1-1")],
'Date2':[ Timestamp("2013-1-1"), pd.NaT]
})
expected = df.copy()
expected['Date'] = expected['Date'].fillna(df.ix[0,'Date2'])
result = df.fillna(value={'Date':df['Date2']})
assert_frame_equal(result, expected)
def test_fillna_dtype_conversion(self):
# make sure that fillna on an empty frame works
df = DataFrame(index=["A","B","C"], columns = [1,2,3,4,5])
result = df.get_dtype_counts().order()
expected = Series({ 'object' : 5 })
assert_series_equal(result, expected)
result = df.fillna(1)
expected = DataFrame(1, index=["A","B","C"], columns = [1,2,3,4,5])
result = result.get_dtype_counts().order()
expected = Series({ 'int64' : 5 })
assert_series_equal(result, expected)
# empty block
df = DataFrame(index=lrange(3),columns=['A','B'],dtype='float64')
result = df.fillna('nan')
expected = DataFrame('nan',index=lrange(3),columns=['A','B'])
assert_frame_equal(result, expected)
# equiv of replace
df = DataFrame(dict(A = [1,np.nan], B = [1.,2.]))
for v in ['',1,np.nan,1.0]:
expected = df.replace(np.nan,v)
result = df.fillna(v)
assert_frame_equal(result, expected)
def test_ffill(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
assert_frame_equal(self.tsframe.ffill(),
self.tsframe.fillna(method='ffill'))
def test_bfill(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
assert_frame_equal(self.tsframe.bfill(),
self.tsframe.fillna(method='bfill'))
def test_fillna_skip_certain_blocks(self):
# don't try to fill boolean, int blocks
df = DataFrame(np.random.randn(10, 4).astype(int))
# it works!
df.fillna(np.nan)
def test_fillna_inplace(self):
df = DataFrame(np.random.randn(10, 4))
df[1][:4] = np.nan
df[3][-4:] = np.nan
expected = df.fillna(value=0)
self.assertIsNot(expected, df)
df.fillna(value=0, inplace=True)
assert_frame_equal(df, expected)
df[1][:4] = np.nan
df[3][-4:] = np.nan
expected = df.fillna(method='ffill')
self.assertIsNot(expected, df)
df.fillna(method='ffill', inplace=True)
assert_frame_equal(df, expected)
def test_fillna_dict_series(self):
df = DataFrame({'a': [nan, 1, 2, nan, nan],
'b': [1, 2, 3, nan, nan],
'c': [nan, 1, 2, 3, 4]})
result = df.fillna({'a': 0, 'b': 5})
expected = df.copy()
expected['a'] = expected['a'].fillna(0)
expected['b'] = expected['b'].fillna(5)
assert_frame_equal(result, expected)
# it works
result = df.fillna({'a': 0, 'b': 5, 'd': 7})
# Series treated same as dict
result = df.fillna(df.max())
expected = df.fillna(df.max().to_dict())
assert_frame_equal(result, expected)
# disable this for now
with assertRaisesRegexp(NotImplementedError, 'column by column'):
df.fillna(df.max(1), axis=1)
def test_fillna_dataframe(self):
# GH 8377
df = DataFrame({'a': [nan, 1, 2, nan, nan],
'b': [1, 2, 3, nan, nan],
'c': [nan, 1, 2, 3, 4]},
index = list('VWXYZ'))
# df2 may have different index and columns
df2 = DataFrame({'a': [nan, 10, 20, 30, 40],
'b': [50, 60, 70, 80, 90],
'foo': ['bar']*5},
index = list('VWXuZ'))
result = df.fillna(df2)
# only those columns and indices which are shared get filled
expected = DataFrame({'a': [nan, 1, 2, nan, 40],
'b': [1, 2, 3, nan, 90],
'c': [nan, 1, 2, 3, 4]},
index = list('VWXYZ'))
assert_frame_equal(result, expected)
def test_fillna_columns(self):
df = DataFrame(np.random.randn(10, 10))
df.values[:, ::2] = np.nan
result = df.fillna(method='ffill', axis=1)
expected = df.T.fillna(method='pad').T
assert_frame_equal(result, expected)
df.insert(6, 'foo', 5)
result = df.fillna(method='ffill', axis=1)
expected = df.astype(float).fillna(method='ffill', axis=1)
assert_frame_equal(result, expected)
def test_fillna_invalid_method(self):
with assertRaisesRegexp(ValueError, 'ffil'):
self.frame.fillna(method='ffil')
def test_fillna_invalid_value(self):
# list
self.assertRaises(TypeError, self.frame.fillna, [1, 2])
# tuple
self.assertRaises(TypeError, self.frame.fillna, (1, 2))
# frame with series
self.assertRaises(ValueError, self.frame.iloc[:,0].fillna, self.frame)
def test_replace_inplace(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
tsframe = self.tsframe.copy()
tsframe.replace(nan, 0, inplace=True)
assert_frame_equal(tsframe, self.tsframe.fillna(0))
self.assertRaises(TypeError, self.tsframe.replace, nan, inplace=True)
self.assertRaises(TypeError, self.tsframe.replace, nan)
# mixed type
self.mixed_frame.ix[5:20,'foo'] = nan
self.mixed_frame.ix[-10:,'A'] = nan
result = self.mixed_frame.replace(np.nan, 0)
expected = self.mixed_frame.fillna(value=0)
assert_frame_equal(result, expected)
tsframe = self.tsframe.copy()
tsframe.replace([nan], [0], inplace=True)
assert_frame_equal(tsframe, self.tsframe.fillna(0))
def test_regex_replace_scalar(self):
obj = {'a': list('ab..'), 'b': list('efgh')}
dfobj = DataFrame(obj)
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
### simplest cases
## regex -> value
# obj frame
res = dfobj.replace(r'\s*\.\s*', nan, regex=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.replace(r'\s*\.\s*', nan, regex=True)
assert_frame_equal(dfmix, res.fillna('.'))
## regex -> regex
# obj frame
res = dfobj.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.replace(re.compile(r'\s*\.\s*'), nan, regex=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.replace(re.compile(r'\s*\.\s*'), nan, regex=True)
assert_frame_equal(dfmix, res.fillna('.'))
## regex -> regex
# obj frame
res = dfobj.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1')
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1')
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
res = dfmix.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1')
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
res = dfmix.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1')
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
def test_regex_replace_scalar_inplace(self):
obj = {'a': list('ab..'), 'b': list('efgh')}
dfobj = DataFrame(obj)
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
### simplest cases
## regex -> value
# obj frame
res = dfobj.copy()
res.replace(r'\s*\.\s*', nan, regex=True, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(r'\s*\.\s*', nan, regex=True, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
## regex -> regex
# obj frame
res = dfobj.copy()
res.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True, inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(r'\s*(\.)\s*', r'\1\1\1', regex=True, inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.copy()
res.replace(re.compile(r'\s*\.\s*'), nan, regex=True, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(re.compile(r'\s*\.\s*'), nan, regex=True, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
## regex -> regex
# obj frame
res = dfobj.copy()
res.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1', regex=True,
inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(re.compile(r'\s*(\.)\s*'), r'\1\1\1', regex=True,
inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
res = dfobj.copy()
res.replace(regex=r'\s*\.\s*', value=nan, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(regex=r'\s*\.\s*', value=nan, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
## regex -> regex
# obj frame
res = dfobj.copy()
res.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1', inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(regex=r'\s*(\.)\s*', value=r'\1\1\1', inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
# everything with compiled regexs as well
res = dfobj.copy()
res.replace(regex=re.compile(r'\s*\.\s*'), value=nan, inplace=True)
assert_frame_equal(dfobj, res.fillna('.'))
# mixed
res = dfmix.copy()
res.replace(regex=re.compile(r'\s*\.\s*'), value=nan, inplace=True)
assert_frame_equal(dfmix, res.fillna('.'))
## regex -> regex
# obj frame
res = dfobj.copy()
res.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1',
inplace=True)
objc = obj.copy()
objc['a'] = ['a', 'b', '...', '...']
expec = DataFrame(objc)
assert_frame_equal(res, expec)
# with mixed
res = dfmix.copy()
res.replace(regex=re.compile(r'\s*(\.)\s*'), value=r'\1\1\1',
inplace=True)
mixc = mix.copy()
mixc['b'] = ['a', 'b', '...', '...']
expec = DataFrame(mixc)
assert_frame_equal(res, expec)
def test_regex_replace_list_obj(self):
obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
dfobj = DataFrame(obj)
## lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'e|f|g']
values = [nan, 'crap']
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 +
['h'], 'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(e|f|g)']
values = [r'\1\1', r'\1_crap']
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e_crap',
'f_crap',
'g_crap', 'h'],
'c': ['h', 'e_crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.replace(value=values, regex=to_replace_res)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
def test_regex_replace_list_obj_inplace(self):
### same as above with inplace=True
## lists of regexes and values
obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
dfobj = DataFrame(obj)
## lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'e|f|g']
values = [nan, 'crap']
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 +
['h'], 'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(e|f|g)']
values = [r'\1\1', r'\1_crap']
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e_crap',
'f_crap',
'g_crap', 'h'],
'c': ['h', 'e_crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'e']
values = [r'\1\1', r'crap']
res = dfobj.copy()
res.replace(value=values, regex=to_replace_res, inplace=True)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',
'h'],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
def test_regex_replace_list_mixed(self):
## mixed frame to make sure this doesn't break things
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
## lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'a']
values = [nan, 'crap']
mix2 = {'a': lrange(4), 'b': list('ab..'), 'c': list('halo')}
dfmix2 = DataFrame(mix2)
res = dfmix2.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': mix2['a'], 'b': ['crap', 'b', nan, nan],
'c': ['h', 'crap', 'l', 'o']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(a|b)']
values = [r'\1\1', r'\1_crap']
res = dfmix.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a_crap', 'b_crap', '..',
'..']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.replace(to_replace_res, values, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.replace(regex=to_replace_res, value=values)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
def test_regex_replace_list_mixed_inplace(self):
mix = {'a': lrange(4), 'b': list('ab..')}
dfmix = DataFrame(mix)
# the same inplace
## lists of regexes and values
# list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]
to_replace_res = [r'\s*\.\s*', r'a']
values = [nan, 'crap']
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b', nan, nan]})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [re1, re2, .., reN]
to_replace_res = [r'\s*(\.)\s*', r'(a|b)']
values = [r'\1\1', r'\1_crap']
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a_crap', 'b_crap', '..',
'..']})
assert_frame_equal(res, expec)
# list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN
# or vN)]
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.copy()
res.replace(to_replace_res, values, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
to_replace_res = [r'\s*(\.)\s*', r'a', r'(b)']
values = [r'\1\1', r'crap', r'\1_crap']
res = dfmix.copy()
res.replace(regex=to_replace_res, value=values, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})
assert_frame_equal(res, expec)
def test_regex_replace_dict_mixed(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
dfmix = DataFrame(mix)
## dicts
# single dict {re1: v1}, search the whole frame
# need test for this...
# list of dicts {re1: v1, re2: v2, ..., re3: v3}, search the whole
# frame
res = dfmix.replace({'b': r'\s*\.\s*'}, {'b': nan}, regex=True)
res2 = dfmix.copy()
res2.replace({'b': r'\s*\.\s*'}, {'b': nan}, inplace=True, regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
# list of dicts {re1: re11, re2: re12, ..., reN: re1N}, search the
# whole frame
res = dfmix.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, regex=True)
res2 = dfmix.copy()
res2.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, inplace=True,
regex=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', '.ty', '.ty'], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
res = dfmix.replace(regex={'b': r'\s*(\.)\s*'}, value={'b': r'\1ty'})
res2 = dfmix.copy()
res2.replace(regex={'b': r'\s*(\.)\s*'}, value={'b': r'\1ty'},
inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', '.ty', '.ty'], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
# scalar -> dict
# to_replace regex, {value: value}
expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c':
mix['c']})
res = dfmix.replace('a', {'b': nan}, regex=True)
res2 = dfmix.copy()
res2.replace('a', {'b': nan}, regex=True, inplace=True)
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
res = dfmix.replace('a', {'b': nan}, regex=True)
res2 = dfmix.copy()
res2.replace(regex='a', value={'b': nan}, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
def test_regex_replace_dict_nested(self):
# nested dicts will not work until this is implemented for Series
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
dfmix = DataFrame(mix)
res = dfmix.replace({'b': {r'\s*\.\s*': nan}}, regex=True)
res2 = dfmix.copy()
res4 = dfmix.copy()
res2.replace({'b': {r'\s*\.\s*': nan}}, inplace=True, regex=True)
res3 = dfmix.replace(regex={'b': {r'\s*\.\s*': nan}})
res4.replace(regex={'b': {r'\s*\.\s*': nan}}, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
assert_frame_equal(res4, expec)
def test_regex_replace_dict_nested_gh4115(self):
df = pd.DataFrame({'Type':['Q','T','Q','Q','T'], 'tmp':2})
expected = DataFrame({'Type': [0,1,0,0,1], 'tmp': 2})
assert_frame_equal(df.replace({'Type': {'Q':0,'T':1}}), expected)
def test_regex_replace_list_to_scalar(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
res = df.replace([r'\s*\.\s*', 'a|b'], nan, regex=True)
res2 = df.copy()
res3 = df.copy()
res2.replace([r'\s*\.\s*', 'a|b'], nan, regex=True, inplace=True)
res3.replace(regex=[r'\s*\.\s*', 'a|b'], value=nan, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': np.array([nan] * 4),
'c': [nan, nan, nan, 'd']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_str_to_numeric(self):
# what happens when you try to replace a numeric value with a regex?
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
res = df.replace(r'\s*\.\s*', 0, regex=True)
res2 = df.copy()
res2.replace(r'\s*\.\s*', 0, inplace=True, regex=True)
res3 = df.copy()
res3.replace(regex=r'\s*\.\s*', value=0, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', 0, 0], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_regex_list_to_numeric(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
res = df.replace([r'\s*\.\s*', 'b'], 0, regex=True)
res2 = df.copy()
res2.replace([r'\s*\.\s*', 'b'], 0, regex=True, inplace=True)
res3 = df.copy()
res3.replace(regex=[r'\s*\.\s*', 'b'], value=0, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 0, 0, 0], 'c': ['a', 0,
nan,
'd']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_series_of_regexes(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
s1 = Series({'b': r'\s*\.\s*'})
s2 = Series({'b': nan})
res = df.replace(s1, s2, regex=True)
res2 = df.copy()
res2.replace(s1, s2, inplace=True, regex=True)
res3 = df.copy()
res3.replace(regex=s1, value=s2, inplace=True)
expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':
mix['c']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
def test_regex_replace_numeric_to_object_conversion(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
res = df.replace(0, 'a')
expec = DataFrame({'a': ['a', 1, 2, 3], 'b': mix['b'], 'c': mix['c']})
assert_frame_equal(res, expec)
self.assertEqual(res.a.dtype, np.object_)
def test_replace_regex_metachar(self):
metachars = '[]', '()', '\d', '\w', '\s'
for metachar in metachars:
df = DataFrame({'a': [metachar, 'else']})
result = df.replace({'a': {metachar: 'paren'}})
expected = DataFrame({'a': ['paren', 'else']})
tm.assert_frame_equal(result, expected)
def test_replace(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
zero_filled = self.tsframe.replace(nan, -1e8)
assert_frame_equal(zero_filled, self.tsframe.fillna(-1e8))
assert_frame_equal(zero_filled.replace(-1e8, nan), self.tsframe)
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
self.tsframe['B'][:5] = -1e8
# empty
df = DataFrame(index=['a', 'b'])
assert_frame_equal(df, df.replace(5, 7))
def test_replace_list(self):
obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}
dfobj = DataFrame(obj)
## lists of regexes and values
# list of [v1, v2, ..., vN] -> [v1, v2, ..., vN]
to_replace_res = [r'.', r'e']
values = [nan, 'crap']
res = dfobj.replace(to_replace_res, values)
expec = DataFrame({'a': ['a', 'b', nan, nan],
'b': ['crap', 'f', 'g', 'h'], 'c': ['h', 'crap',
'l', 'o']})
assert_frame_equal(res, expec)
# list of [v1, v2, ..., vN] -> [v1, v2, .., vN]
to_replace_res = [r'.', r'f']
values = [r'..', r'crap']
res = dfobj.replace(to_replace_res, values)
expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e', 'crap', 'g',
'h'],
'c': ['h', 'e', 'l', 'o']})
assert_frame_equal(res, expec)
def test_replace_series_dict(self):
# from GH 3064
df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})
result = df.replace(0, {'zero': 0.5, 'one': 1.0})
expected = DataFrame({'zero': {'a': 0.5, 'b': 1}, 'one': {'a': 2.0, 'b': 1.0}})
assert_frame_equal(result, expected)
result = df.replace(0, df.mean())
assert_frame_equal(result, expected)
# series to series/dict
df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})
s = Series({'zero': 0.0, 'one': 2.0})
result = df.replace(s, {'zero': 0.5, 'one': 1.0})
expected = DataFrame({'zero': {'a': 0.5, 'b': 1}, 'one': {'a': 1.0, 'b': 0.0}})
assert_frame_equal(result, expected)
result = df.replace(s, df.mean())
assert_frame_equal(result, expected)
def test_replace_convert(self):
# gh 3907
df = DataFrame([['foo', 'bar', 'bah'], ['bar', 'foo', 'bah']])
m = {'foo': 1, 'bar': 2, 'bah': 3}
rep = df.replace(m)
expec = Series([ np.int64] * 3)
res = rep.dtypes
assert_series_equal(expec, res)
def test_replace_mixed(self):
self.mixed_frame.ix[5:20,'foo'] = nan
self.mixed_frame.ix[-10:,'A'] = nan
result = self.mixed_frame.replace(np.nan, -18)
expected = self.mixed_frame.fillna(value=-18)
assert_frame_equal(result, expected)
assert_frame_equal(result.replace(-18, nan), self.mixed_frame)
result = self.mixed_frame.replace(np.nan, -1e8)
expected = self.mixed_frame.fillna(value=-1e8)
assert_frame_equal(result, expected)
assert_frame_equal(result.replace(-1e8, nan), self.mixed_frame)
# int block upcasting
df = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0,1],dtype='int64') })
expected = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0.5,1],dtype='float64') })
result = df.replace(0, 0.5)
assert_frame_equal(result,expected)
df.replace(0, 0.5, inplace=True)
assert_frame_equal(df,expected)
# int block splitting
df = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0,1],dtype='int64'), 'C' : Series([1,2],dtype='int64') })
expected = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0.5,1],dtype='float64'), 'C' : Series([1,2],dtype='int64') })
result = df.replace(0, 0.5)
assert_frame_equal(result,expected)
# to object block upcasting
df = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0,1],dtype='int64') })
expected = DataFrame({ 'A' : Series([1,'foo'],dtype='object'), 'B' : Series([0,1],dtype='int64') })
result = df.replace(2, 'foo')
assert_frame_equal(result,expected)
expected = DataFrame({ 'A' : Series(['foo','bar'],dtype='object'), 'B' : Series([0,'foo'],dtype='object') })
result = df.replace([1,2], ['foo','bar'])
assert_frame_equal(result,expected)
# test case from
from pandas.util.testing import makeCustomDataframe as mkdf
df = DataFrame({'A' : Series([3,0],dtype='int64'), 'B' : Series([0,3],dtype='int64') })
result = df.replace(3, df.mean().to_dict())
expected = df.copy().astype('float64')
m = df.mean()
expected.iloc[0,0] = m[0]
expected.iloc[1,1] = m[1]
assert_frame_equal(result,expected)
def test_replace_simple_nested_dict(self):
df = DataFrame({'col': range(1, 5)})
expected = DataFrame({'col': ['a', 2, 3, 'b']})
result = df.replace({'col': {1: 'a', 4: 'b'}})
tm.assert_frame_equal(expected, result)
# in this case, should be the same as the not nested version
result = df.replace({1: 'a', 4: 'b'})
tm.assert_frame_equal(expected, result)
def test_replace_simple_nested_dict_with_nonexistent_value(self):
df = DataFrame({'col': range(1, 5)})
expected = DataFrame({'col': ['a', 2, 3, 'b']})
result = df.replace({-1: '-', 1: 'a', 4: 'b'})
tm.assert_frame_equal(expected, result)
result = df.replace({'col': {-1: '-', 1: 'a', 4: 'b'}})
tm.assert_frame_equal(expected, result)
def test_interpolate(self):
pass
def test_replace_value_is_none(self):
self.assertRaises(TypeError, self.tsframe.replace, nan)
orig_value = self.tsframe.iloc[0, 0]
orig2 = self.tsframe.iloc[1, 0]
self.tsframe.iloc[0, 0] = nan
self.tsframe.iloc[1, 0] = 1
result = self.tsframe.replace(to_replace={nan: 0})
expected = self.tsframe.T.replace(to_replace={nan: 0}).T
assert_frame_equal(result, expected)
result = self.tsframe.replace(to_replace={nan: 0, 1: -1e8})
tsframe = self.tsframe.copy()
tsframe.iloc[0, 0] = 0
tsframe.iloc[1, 0] = -1e8
expected = tsframe
assert_frame_equal(expected, result)
self.tsframe.iloc[0, 0] = orig_value
self.tsframe.iloc[1, 0] = orig2
def test_replace_for_new_dtypes(self):
# dtypes
tsframe = self.tsframe.copy().astype(np.float32)
tsframe['A'][:5] = nan
tsframe['A'][-5:] = nan
zero_filled = tsframe.replace(nan, -1e8)
assert_frame_equal(zero_filled, tsframe.fillna(-1e8))
assert_frame_equal(zero_filled.replace(-1e8, nan), tsframe)
tsframe['A'][:5] = nan
tsframe['A'][-5:] = nan
tsframe['B'][:5] = -1e8
b = tsframe['B']
b[b == -1e8] = nan
tsframe['B'] = b
result = tsframe.fillna(method='bfill')
assert_frame_equal(result, tsframe.fillna(method='bfill'))
def test_replace_dtypes(self):
# int
df = DataFrame({'ints': [1, 2, 3]})
result = df.replace(1, 0)
expected = DataFrame({'ints': [0, 2, 3]})
assert_frame_equal(result, expected)
df = DataFrame({'ints': [1, 2, 3]}, dtype=np.int32)
result = df.replace(1, 0)
expected = DataFrame({'ints': [0, 2, 3]}, dtype=np.int32)
assert_frame_equal(result, expected)
df = DataFrame({'ints': [1, 2, 3]}, dtype=np.int16)
result = df.replace(1, 0)
expected = DataFrame({'ints': [0, 2, 3]}, dtype=np.int16)
assert_frame_equal(result, expected)
# bools
df = DataFrame({'bools': [True, False, True]})
result = df.replace(False, True)
self.assertTrue(result.values.all())
# complex blocks
df = DataFrame({'complex': [1j, 2j, 3j]})
result = df.replace(1j, 0j)
expected = DataFrame({'complex': [0j, 2j, 3j]})
assert_frame_equal(result, expected)
# datetime blocks
prev = datetime.today()
now = datetime.today()
df = DataFrame({'datetime64': Index([prev, now, prev])})
result = df.replace(prev, now)
expected = DataFrame({'datetime64': Index([now] * 3)})
assert_frame_equal(result, expected)
def test_replace_input_formats(self):
# both dicts
to_rep = {'A': np.nan, 'B': 0, 'C': ''}
values = {'A': 0, 'B': -1, 'C': 'missing'}
df = DataFrame({'A': [np.nan, 0, np.inf], 'B': [0, 2, 5],
'C': ['', 'asdf', 'fd']})
filled = df.replace(to_rep, values)
expected = {}
for k, v in compat.iteritems(df):
expected[k] = v.replace(to_rep[k], values[k])
assert_frame_equal(filled, DataFrame(expected))
result = df.replace([0, 2, 5], [5, 2, 0])
expected = DataFrame({'A': [np.nan, 5, np.inf], 'B': [5, 2, 0],
'C': ['', 'asdf', 'fd']})
assert_frame_equal(result, expected)
# dict to scalar
filled = df.replace(to_rep, 0)
expected = {}
for k, v in compat.iteritems(df):
expected[k] = v.replace(to_rep[k], 0)
assert_frame_equal(filled, DataFrame(expected))
self.assertRaises(TypeError, df.replace, to_rep, [np.nan, 0, ''])
# scalar to dict
values = {'A': 0, 'B': -1, 'C': 'missing'}
df = DataFrame({'A': [np.nan, 0, np.nan], 'B': [0, 2, 5],
'C': ['', 'asdf', 'fd']})
filled = df.replace(np.nan, values)
expected = {}
for k, v in compat.iteritems(df):
expected[k] = v.replace(np.nan, values[k])
assert_frame_equal(filled, DataFrame(expected))
# list to list
to_rep = [np.nan, 0, '']
values = [-2, -1, 'missing']
result = df.replace(to_rep, values)
expected = df.copy()
for i in range(len(to_rep)):
expected.replace(to_rep[i], values[i], inplace=True)
assert_frame_equal(result, expected)
self.assertRaises(ValueError, df.replace, to_rep, values[1:])
# list to scalar
to_rep = [np.nan, 0, '']
result = df.replace(to_rep, -1)
expected = df.copy()
for i in range(len(to_rep)):
expected.replace(to_rep[i], -1, inplace=True)
assert_frame_equal(result, expected)
def test_replace_limit(self):
pass
def test_replace_dict_no_regex(self):
answer = Series({0: 'Strongly Agree', 1: 'Agree', 2: 'Neutral', 3:
'Disagree', 4: 'Strongly Disagree'})
weights = {'Agree': 4, 'Disagree': 2, 'Neutral': 3, 'Strongly Agree':
5, 'Strongly Disagree': 1}
expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})
result = answer.replace(weights)
tm.assert_series_equal(result, expected)
def test_replace_series_no_regex(self):
answer = Series({0: 'Strongly Agree', 1: 'Agree', 2: 'Neutral', 3:
'Disagree', 4: 'Strongly Disagree'})
weights = Series({'Agree': 4, 'Disagree': 2, 'Neutral': 3,
'Strongly Agree': 5, 'Strongly Disagree': 1})
expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})
result = answer.replace(weights)
tm.assert_series_equal(result, expected)
def test_replace_dict_tuple_list_ordering_remains_the_same(self):
df = DataFrame(dict(A=[nan, 1]))
res1 = df.replace(to_replace={nan: 0, 1: -1e8})
res2 = df.replace(to_replace=(1, nan), value=[-1e8, 0])
res3 = df.replace(to_replace=[1, nan], value=[-1e8, 0])
expected = DataFrame({'A': [0, -1e8]})
tm.assert_frame_equal(res1, res2)
tm.assert_frame_equal(res2, res3)
tm.assert_frame_equal(res3, expected)
def test_replace_doesnt_replace_without_regex(self):
from pandas.compat import StringIO
raw = """fol T_opp T_Dir T_Enh
0 1 0 0 vo
1 2 vr 0 0
2 2 0 0 0
3 3 0 bt 0"""
df = read_csv(StringIO(raw), sep=r'\s+')
res = df.replace({'\D': 1})
tm.assert_frame_equal(df, res)
def test_replace_bool_with_string(self):
df = DataFrame({'a': [True, False], 'b': list('ab')})
result = df.replace(True, 'a')
expected = DataFrame({'a': ['a', False], 'b': df.b})
tm.assert_frame_equal(result, expected)
def test_replace_pure_bool_with_string_no_op(self):
df = DataFrame(np.random.rand(2, 2) > 0.5)
result = df.replace('asdf', 'fdsa')
tm.assert_frame_equal(df, result)
def test_replace_bool_with_bool(self):
df = DataFrame(np.random.rand(2, 2) > 0.5)
result = df.replace(False, True)
expected = DataFrame(np.ones((2, 2), dtype=bool))
tm.assert_frame_equal(result, expected)
def test_replace_with_dict_with_bool_keys(self):
df = DataFrame({0: [True, False], 1: [False, True]})
with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'):
df.replace({'asdf': 'asdb', True: 'yes'})
def test_replace_truthy(self):
df = DataFrame({'a': [True, True]})
r = df.replace([np.inf, -np.inf], np.nan)
e = df
tm.assert_frame_equal(r, e)
def test_replace_int_to_int_chain(self):
df = DataFrame({'a': lrange(1, 5)})
with tm.assertRaisesRegexp(ValueError, "Replacement not allowed .+"):
df.replace({'a': dict(zip(range(1, 5), range(2, 6)))})
def test_replace_str_to_str_chain(self):
a = np.arange(1, 5)
astr = a.astype(str)
bstr = np.arange(2, 6).astype(str)
df = DataFrame({'a': astr})
with tm.assertRaisesRegexp(ValueError, "Replacement not allowed .+"):
df.replace({'a': dict(zip(astr, bstr))})
def test_replace_swapping_bug(self):
df = pd.DataFrame({'a': [True, False, True]})
res = df.replace({'a': {True: 'Y', False: 'N'}})
expect = pd.DataFrame({'a': ['Y', 'N', 'Y']})
tm.assert_frame_equal(res, expect)
df = pd.DataFrame({'a': [0, 1, 0]})
res = df.replace({'a': {0: 'Y', 1: 'N'}})
expect = pd.DataFrame({'a': ['Y', 'N', 'Y']})
tm.assert_frame_equal(res, expect)
def test_replace_period(self):
d = {'fname':
{'out_augmented_AUG_2011.json': pd.Period(year=2011, month=8, freq='M'),
'out_augmented_JAN_2011.json': pd.Period(year=2011, month=1, freq='M'),
'out_augmented_MAY_2012.json': pd.Period(year=2012, month=5, freq='M'),
'out_augmented_SUBSIDY_WEEK.json': pd.Period(year=2011, month=4, freq='M'),
'out_augmented_AUG_2012.json': pd.Period(year=2012, month=8, freq='M'),
'out_augmented_MAY_2011.json': pd.Period(year=2011, month=5, freq='M'),
'out_augmented_SEP_2013.json': pd.Period(year=2013, month=9, freq='M')}}
df = pd.DataFrame(['out_augmented_AUG_2012.json',
'out_augmented_SEP_2013.json',
'out_augmented_SUBSIDY_WEEK.json',
'out_augmented_MAY_2012.json',
'out_augmented_MAY_2011.json',
'out_augmented_AUG_2011.json',
'out_augmented_JAN_2011.json'], columns=['fname'])
tm.assert_equal(set(df.fname.values), set(d['fname'].keys()))
expected = DataFrame({'fname': [d['fname'][k]
for k in df.fname.values]})
result = df.replace(d)
tm.assert_frame_equal(result, expected)
def test_replace_datetime(self):
d = {'fname':
{'out_augmented_AUG_2011.json': pd.Timestamp('2011-08'),
'out_augmented_JAN_2011.json': pd.Timestamp('2011-01'),
'out_augmented_MAY_2012.json': pd.Timestamp('2012-05'),
'out_augmented_SUBSIDY_WEEK.json': pd.Timestamp('2011-04'),
'out_augmented_AUG_2012.json': pd.Timestamp('2012-08'),
'out_augmented_MAY_2011.json': pd.Timestamp('2011-05'),
'out_augmented_SEP_2013.json': pd.Timestamp('2013-09')}}
df = pd.DataFrame(['out_augmented_AUG_2012.json',
'out_augmented_SEP_2013.json',
'out_augmented_SUBSIDY_WEEK.json',
'out_augmented_MAY_2012.json',
'out_augmented_MAY_2011.json',
'out_augmented_AUG_2011.json',
'out_augmented_JAN_2011.json'], columns=['fname'])
tm.assert_equal(set(df.fname.values), set(d['fname'].keys()))
expected = DataFrame({'fname': [d['fname'][k]
for k in df.fname.values]})
result = df.replace(d)
tm.assert_frame_equal(result, expected)
def test_combine_multiple_frames_dtypes(self):
# GH 2759
A = DataFrame(data=np.ones((10, 2)), columns=['foo', 'bar'], dtype=np.float64)
B = DataFrame(data=np.ones((10, 2)), dtype=np.float32)
results = pd.concat((A, B), axis=1).get_dtype_counts()
expected = Series(dict( float64 = 2, float32 = 2 ))
assert_series_equal(results,expected)
def test_ops(self):
# tst ops and reversed ops in evaluation
# GH7198
# smaller hits python, larger hits numexpr
for n in [ 4, 4000 ]:
df = DataFrame(1,index=range(n),columns=list('abcd'))
df.iloc[0] = 2
m = df.mean()
for op_str, op, rop in [('+','__add__','__radd__'),
('-','__sub__','__rsub__'),
('*','__mul__','__rmul__'),
('/','__truediv__','__rtruediv__')]:
base = DataFrame(np.tile(m.values,n).reshape(n,-1),columns=list('abcd'))
expected = eval("base{op}df".format(op=op_str))
# ops as strings
result = eval("m{op}df".format(op=op_str))
assert_frame_equal(result,expected)
# these are commutative
if op in ['+','*']:
result = getattr(df,op)(m)
assert_frame_equal(result,expected)
# these are not
elif op in ['-','/']:
result = getattr(df,rop)(m)
assert_frame_equal(result,expected)
# GH7192
df = DataFrame(dict(A=np.random.randn(25000)))
df.iloc[0:5] = np.nan
expected = (1-np.isnan(df.iloc[0:25]))
result = (1-np.isnan(df)).iloc[0:25]
assert_frame_equal(result,expected)
def test_truncate(self):
offset = datetools.bday
ts = self.tsframe[::3]
start, end = self.tsframe.index[3], self.tsframe.index[6]
start_missing = self.tsframe.index[2]
end_missing = self.tsframe.index[7]
# neither specified
truncated = ts.truncate()
assert_frame_equal(truncated, ts)
# both specified
expected = ts[1:3]
truncated = ts.truncate(start, end)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(start_missing, end_missing)
assert_frame_equal(truncated, expected)
# start specified
expected = ts[1:]
truncated = ts.truncate(before=start)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(before=start_missing)
assert_frame_equal(truncated, expected)
# end specified
expected = ts[:3]
truncated = ts.truncate(after=end)
assert_frame_equal(truncated, expected)
truncated = ts.truncate(after=end_missing)
assert_frame_equal(truncated, expected)
self.assertRaises(ValueError, ts.truncate,
before=ts.index[-1] - 1,
after=ts.index[0] +1)
def test_truncate_copy(self):
index = self.tsframe.index
truncated = self.tsframe.truncate(index[5], index[10])
truncated.values[:] = 5.
self.assertFalse((self.tsframe.values[5:11] == 5).any())
def test_xs(self):
idx = self.frame.index[5]
xs = self.frame.xs(idx)
for item, value in compat.iteritems(xs):
if np.isnan(value):
self.assertTrue(np.isnan(self.frame[item][idx]))
else:
self.assertEqual(value, self.frame[item][idx])
# mixed-type xs
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data)
xs = frame.xs('1')
self.assertEqual(xs.dtype, np.object_)
self.assertEqual(xs['A'], 1)
self.assertEqual(xs['B'], '1')
with tm.assertRaises(KeyError):
self.tsframe.xs(self.tsframe.index[0] - datetools.bday)
# xs get column
series = self.frame.xs('A', axis=1)
expected = self.frame['A']
assert_series_equal(series, expected)
# view is returned if possible
series = self.frame.xs('A', axis=1)
series[:] = 5
self.assertTrue((expected == 5).all())
def test_xs_corner(self):
# pathological mixed-type reordering case
df = DataFrame(index=[0])
df['A'] = 1.
df['B'] = 'foo'
df['C'] = 2.
df['D'] = 'bar'
df['E'] = 3.
xs = df.xs(0)
assert_almost_equal(xs, [1., 'foo', 2., 'bar', 3.])
# no columns but index
df = DataFrame(index=['a', 'b', 'c'])
result = df.xs('a')
expected = Series([])
assert_series_equal(result, expected)
def test_xs_duplicates(self):
df = DataFrame(randn(5, 2), index=['b', 'b', 'c', 'b', 'a'])
cross = df.xs('c')
exp = df.irow(2)
assert_series_equal(cross, exp)
def test_xs_keep_level(self):
df = DataFrame({'day': {0: 'sat', 1: 'sun'},
'flavour': {0: 'strawberry', 1: 'strawberry'},
'sales': {0: 10, 1: 12},
'year': {0: 2008, 1: 2008}}).set_index(['year','flavour','day'])
result = df.xs('sat', level='day', drop_level=False)
expected = df[:1]
assert_frame_equal(result, expected)
result = df.xs([2008, 'sat'], level=['year', 'day'], drop_level=False)
assert_frame_equal(result, expected)
def test_pivot(self):
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data)
pivoted = frame.pivot(
index='index', columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
assert_frame_equal(pivoted, expected)
# name tracking
self.assertEqual(pivoted.index.name, 'index')
self.assertEqual(pivoted.columns.name, 'columns')
# don't specify values
pivoted = frame.pivot(index='index', columns='columns')
self.assertEqual(pivoted.index.name, 'index')
self.assertEqual(pivoted.columns.names, (None, 'columns'))
# pivot multiple columns
wp = tm.makePanel()
lp = wp.to_frame()
df = lp.reset_index()
assert_frame_equal(df.pivot('major', 'minor'), lp.unstack())
def test_pivot_duplicates(self):
data = DataFrame({'a': ['bar', 'bar', 'foo', 'foo', 'foo'],
'b': ['one', 'two', 'one', 'one', 'two'],
'c': [1., 2., 3., 3., 4.]})
with assertRaisesRegexp(ValueError, 'duplicate entries'):
data.pivot('a', 'b', 'c')
def test_pivot_empty(self):
df = DataFrame({}, columns=['a', 'b', 'c'])
result = df.pivot('a', 'b', 'c')
expected = DataFrame({})
assert_frame_equal(result, expected, check_names=False)
def test_pivot_integer_bug(self):
df = DataFrame(data=[("A", "1", "A1"), ("B", "2", "B2")])
result = df.pivot(index=1, columns=0, values=2)
repr(result)
self.assert_numpy_array_equal(result.columns, ['A', 'B'])
def test_reindex(self):
newFrame = self.frame.reindex(self.ts1.index)
for col in newFrame.columns:
for idx, val in compat.iteritems(newFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
self.assertTrue(np.isnan(self.frame[col][idx]))
else:
self.assertEqual(val, self.frame[col][idx])
else:
self.assertTrue(np.isnan(val))
for col, series in compat.iteritems(newFrame):
self.assertTrue(tm.equalContents(series.index, newFrame.index))
emptyFrame = self.frame.reindex(Index([]))
self.assertEqual(len(emptyFrame.index), 0)
# Cython code should be unit-tested directly
nonContigFrame = self.frame.reindex(self.ts1.index[::2])
for col in nonContigFrame.columns:
for idx, val in compat.iteritems(nonContigFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
self.assertTrue(np.isnan(self.frame[col][idx]))
else:
self.assertEqual(val, self.frame[col][idx])
else:
self.assertTrue(np.isnan(val))
for col, series in compat.iteritems(nonContigFrame):
self.assertTrue(tm.equalContents(series.index,
nonContigFrame.index))
# corner cases
# Same index, copies values but not index if copy=False
newFrame = self.frame.reindex(self.frame.index, copy=False)
self.assertIs(newFrame.index, self.frame.index)
# length zero
newFrame = self.frame.reindex([])
self.assertTrue(newFrame.empty)
self.assertEqual(len(newFrame.columns), len(self.frame.columns))
# length zero with columns reindexed with non-empty index
newFrame = self.frame.reindex([])
newFrame = newFrame.reindex(self.frame.index)
self.assertEqual(len(newFrame.index), len(self.frame.index))
self.assertEqual(len(newFrame.columns), len(self.frame.columns))
# pass non-Index
newFrame = self.frame.reindex(list(self.ts1.index))
self.assertTrue(newFrame.index.equals(self.ts1.index))
# copy with no axes
result = self.frame.reindex()
assert_frame_equal(result,self.frame)
self.assertFalse(result is self.frame)
def test_reindex_name_remains(self):
s = Series(random.rand(10))
df = DataFrame(s, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(i)
self.assertEqual(df.index.name, 'iname')
df = df.reindex(Index(np.arange(10), name='tmpname'))
self.assertEqual(df.index.name, 'tmpname')
s = Series(random.rand(10))
df = DataFrame(s.T, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(columns=i)
self.assertEqual(df.columns.name, 'iname')
def test_reindex_int(self):
smaller = self.intframe.reindex(self.intframe.index[::2])
self.assertEqual(smaller['A'].dtype, np.int64)
bigger = smaller.reindex(self.intframe.index)
self.assertEqual(bigger['A'].dtype, np.float64)
smaller = self.intframe.reindex(columns=['A', 'B'])
self.assertEqual(smaller['A'].dtype, np.int64)
def test_reindex_like(self):
other = self.frame.reindex(index=self.frame.index[:10],
columns=['C', 'B'])
assert_frame_equal(other, self.frame.reindex_like(other))
def test_reindex_columns(self):
newFrame = self.frame.reindex(columns=['A', 'B', 'E'])
assert_series_equal(newFrame['B'], self.frame['B'])
self.assertTrue(np.isnan(newFrame['E']).all())
self.assertNotIn('C', newFrame)
# length zero
newFrame = self.frame.reindex(columns=[])
self.assertTrue(newFrame.empty)
def test_reindex_axes(self):
# GH 3317, reindexing by both axes loses freq of the index
from datetime import datetime
df = DataFrame(np.ones((3, 3)), index=[datetime(2012, 1, 1), datetime(2012, 1, 2), datetime(2012, 1, 3)], columns=['a', 'b', 'c'])
time_freq = date_range('2012-01-01', '2012-01-03', freq='d')
some_cols = ['a', 'b']
index_freq = df.reindex(index=time_freq).index.freq
both_freq = df.reindex(index=time_freq, columns=some_cols).index.freq
seq_freq = df.reindex(index=time_freq).reindex(columns=some_cols).index.freq
self.assertEqual(index_freq, both_freq)
self.assertEqual(index_freq, seq_freq)
def test_reindex_fill_value(self):
df = DataFrame(np.random.randn(10, 4))
# axis=0
result = df.reindex(lrange(15))
self.assertTrue(np.isnan(result.values[-5:]).all())
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
# axis=1
result = df.reindex(columns=lrange(5), fill_value=0.)
expected = df.copy()
expected[4] = 0.
assert_frame_equal(result, expected)
result = df.reindex(columns=lrange(5), fill_value=0)
expected = df.copy()
expected[4] = 0
assert_frame_equal(result, expected)
result = df.reindex(columns=lrange(5), fill_value='foo')
expected = df.copy()
expected[4] = 'foo'
assert_frame_equal(result, expected)
# reindex_axis
result = df.reindex_axis(lrange(15), fill_value=0., axis=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
result = df.reindex_axis(lrange(5), fill_value=0., axis=1)
expected = df.reindex(columns=lrange(5)).fillna(0)
assert_frame_equal(result, expected)
# other dtypes
df['foo'] = 'foo'
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
def test_reindex_dups(self):
# GH4746, reindex on duplicate index error messages
arr = np.random.randn(10)
df = DataFrame(arr,index=[1,2,3,4,5,1,2,3,4,5])
# set index is ok
result = df.copy()
result.index = list(range(len(df)))
expected = DataFrame(arr,index=list(range(len(df))))
assert_frame_equal(result,expected)
# reindex fails
self.assertRaises(ValueError, df.reindex, index=list(range(len(df))))
def test_align(self):
af, bf = self.frame.align(self.frame)
self.assertIsNot(af._data, self.frame._data)
af, bf = self.frame.align(self.frame, copy=False)
self.assertIs(af._data, self.frame._data)
# axis = 0
other = self.frame.ix[:-5, :3]
af, bf = self.frame.align(other, axis=0, fill_value=-1)
self.assertTrue(bf.columns.equals(other.columns))
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
diff_b_vals = bf.reindex(diff_b).values
self.assertTrue((diff_a_vals == -1).all())
af, bf = self.frame.align(other, join='right', axis=0)
self.assertTrue(bf.columns.equals(other.columns))
self.assertTrue(bf.index.equals(other.index))
self.assertTrue(af.index.equals(other.index))
# axis = 1
other = self.frame.ix[:-5, :3].copy()
af, bf = self.frame.align(other, axis=1)
self.assertTrue(bf.columns.equals(self.frame.columns))
self.assertTrue(bf.index.equals(other.index))
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
diff_b_vals = bf.reindex(diff_b).values
self.assertTrue((diff_a_vals == -1).all())
af, bf = self.frame.align(other, join='inner', axis=1)
self.assertTrue(bf.columns.equals(other.columns))
af, bf = self.frame.align(other, join='inner', axis=1, method='pad')
self.assertTrue(bf.columns.equals(other.columns))
# test other non-float types
af, bf = self.intframe.align(other, join='inner', axis=1, method='pad')
self.assertTrue(bf.columns.equals(other.columns))
af, bf = self.mixed_frame.align(self.mixed_frame,
join='inner', axis=1, method='pad')
self.assertTrue(bf.columns.equals(self.mixed_frame.columns))
af, bf = self.frame.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=None)
self.assertTrue(bf.index.equals(Index([])))
af, bf = self.frame.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=0)
self.assertTrue(bf.index.equals(Index([])))
# mixed floats/ints
af, bf = self.mixed_float.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=0)
self.assertTrue(bf.index.equals(Index([])))
af, bf = self.mixed_int.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=0)
self.assertTrue(bf.index.equals(Index([])))
# try to align dataframe to series along bad axis
self.assertRaises(ValueError, self.frame.align, af.ix[0, :3],
join='inner', axis=2)
def _check_align(self, a, b, axis, fill_axis, how, method, limit=None):
aa, ab = a.align(b, axis=axis, join=how, method=method, limit=limit,
fill_axis=fill_axis)
join_index, join_columns = None, None
ea, eb = a, b
if axis is None or axis == 0:
join_index = a.index.join(b.index, how=how)
ea = ea.reindex(index=join_index)
eb = eb.reindex(index=join_index)
if axis is None or axis == 1:
join_columns = a.columns.join(b.columns, how=how)
ea = ea.reindex(columns=join_columns)
eb = eb.reindex(columns=join_columns)
ea = ea.fillna(axis=fill_axis, method=method, limit=limit)
eb = eb.fillna(axis=fill_axis, method=method, limit=limit)
assert_frame_equal(aa, ea)
assert_frame_equal(ab, eb)
def test_align_fill_method_inner(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('inner', meth, ax, fax)
def test_align_fill_method_outer(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('outer', meth, ax, fax)
def test_align_fill_method_left(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('left', meth, ax, fax)
def test_align_fill_method_right(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('right', meth, ax, fax)
def _check_align_fill(self, kind, meth, ax, fax):
left = self.frame.ix[0:4, :10]
right = self.frame.ix[2:, 6:]
empty = self.frame.ix[:0, :0]
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty left
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty right
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# both empty
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
def test_align_int_fill_bug(self):
# GH #910
X = np.arange(10*10, dtype='float64').reshape(10, 10)
Y = np.ones((10, 1), dtype=int)
df1 = DataFrame(X)
df1['0.X'] = Y.squeeze()
df2 = df1.astype(float)
result = df1 - df1.mean()
expected = df2 - df2.mean()
assert_frame_equal(result, expected)
def test_where(self):
default_frame = DataFrame(np.random.randn(5, 3),columns=['A','B','C'])
def _safe_add(df):
# only add to the numeric items
def is_ok(s):
return issubclass(s.dtype.type, (np.integer,np.floating)) and s.dtype != 'uint8'
return DataFrame(dict([ (c,s+1) if is_ok(s) else (c,s) for c, s in compat.iteritems(df) ]))
def _check_get(df, cond, check_dtypes = True):
other1 = _safe_add(df)
rs = df.where(cond, other1)
rs2 = df.where(cond.values, other1)
for k, v in rs.iteritems():
assert_series_equal(v, Series(np.where(cond[k], df[k], other1[k]),index=v.index))
assert_frame_equal(rs, rs2)
# dtypes
if check_dtypes:
self.assertTrue((rs.dtypes == df.dtypes).all() == True)
# check getting
for df in [ default_frame, self.mixed_frame, self.mixed_float, self.mixed_int ]:
cond = df > 0
_check_get(df, cond)
# upcasting case (GH # 2794)
df = DataFrame(dict([ (c,Series([1]*3,dtype=c)) for c in ['int64','int32','float32','float64'] ]))
df.ix[1,:] = 0
result = df.where(df>=0).get_dtype_counts()
#### when we don't preserve boolean casts ####
#expected = Series({ 'float32' : 1, 'float64' : 3 })
expected = Series({ 'float32' : 1, 'float64' : 1, 'int32' : 1, 'int64' : 1 })
assert_series_equal(result, expected)
# aligning
def _check_align(df, cond, other, check_dtypes = True):
rs = df.where(cond, other)
for i, k in enumerate(rs.columns):
result = rs[k]
d = df[k].values
c = cond[k].reindex(df[k].index).fillna(False).values
if np.isscalar(other):
o = other
else:
if isinstance(other,np.ndarray):
o = Series(other[:,i],index=result.index).values
else:
o = other[k].values
new_values = d if c.all() else np.where(c, d, o)
expected = Series(new_values,index=result.index)
# since we can't always have the correct numpy dtype
# as numpy doesn't know how to downcast, don't check
assert_series_equal(result, expected, check_dtype=False)
# dtypes
# can't check dtype when other is an ndarray
if check_dtypes and not isinstance(other,np.ndarray):
self.assertTrue((rs.dtypes == df.dtypes).all() == True)
for df in [ self.mixed_frame, self.mixed_float, self.mixed_int ]:
# other is a frame
cond = (df > 0)[1:]
_check_align(df, cond, _safe_add(df))
# check other is ndarray
cond = df > 0
_check_align(df, cond, (_safe_add(df).values))
# integers are upcast, so don't check the dtypes
cond = df > 0
check_dtypes = all([ not issubclass(s.type,np.integer) for s in df.dtypes ])
_check_align(df, cond, np.nan, check_dtypes = check_dtypes)
# invalid conditions
df = default_frame
err1 = (df + 1).values[0:2, :]
self.assertRaises(ValueError, df.where, cond, err1)
err2 = cond.ix[:2, :].values
other1 = _safe_add(df)
self.assertRaises(ValueError, df.where, err2, other1)
self.assertRaises(ValueError, df.mask, True)
self.assertRaises(ValueError, df.mask, 0)
# where inplace
def _check_set(df, cond, check_dtypes = True):
dfi = df.copy()
econd = cond.reindex_like(df).fillna(True)
expected = dfi.mask(~econd)
dfi.where(cond, np.nan, inplace=True)
assert_frame_equal(dfi, expected)
# dtypes (and confirm upcasts)x
if check_dtypes:
for k, v in compat.iteritems(df.dtypes):
if issubclass(v.type,np.integer) and not cond[k].all():
v = np.dtype('float64')
self.assertEqual(dfi[k].dtype, v)
for df in [ default_frame, self.mixed_frame, self.mixed_float, self.mixed_int ]:
cond = df > 0
_check_set(df, cond)
cond = df >= 0
_check_set(df, cond)
# aligining
cond = (df >= 0)[1:]
_check_set(df, cond)
def test_where_bug(self):
# GH 2793
df = DataFrame({'a': [1.0, 2.0, 3.0, 4.0], 'b': [4.0, 3.0, 2.0, 1.0]}, dtype = 'float64')
expected = DataFrame({'a': [np.nan, np.nan, 3.0, 4.0], 'b': [4.0, 3.0, np.nan, np.nan]}, dtype = 'float64')
result = df.where(df > 2, np.nan)
assert_frame_equal(result, expected)
result = df.copy()
result.where(result > 2, np.nan, inplace=True)
assert_frame_equal(result, expected)
# mixed
for dtype in ['int16','int8','int32','int64']:
df = DataFrame({'a': np.array([1, 2, 3, 4],dtype=dtype), 'b': np.array([4.0, 3.0, 2.0, 1.0], dtype = 'float64') })
expected = DataFrame({'a': [np.nan, np.nan, 3.0, 4.0], 'b': [4.0, 3.0, np.nan, np.nan]}, dtype = 'float64')
result = df.where(df > 2, np.nan)
assert_frame_equal(result, expected)
result = df.copy()
result.where(result > 2, np.nan, inplace=True)
assert_frame_equal(result, expected)
# transpositional issue
# GH7506
a = DataFrame({ 0 : [1,2], 1 : [3,4], 2 : [5,6]})
b = DataFrame({ 0 : [np.nan,8], 1:[9,np.nan], 2:[np.nan,np.nan]})
do_not_replace = b.isnull() | (a > b)
expected = a.copy()
expected[~do_not_replace] = b
result = a.where(do_not_replace,b)
assert_frame_equal(result,expected)
a = DataFrame({ 0 : [4,6], 1 : [1,0]})
b = DataFrame({ 0 : [np.nan,3],1:[3,np.nan]})
do_not_replace = b.isnull() | (a > b)
expected = a.copy()
expected[~do_not_replace] = b
result = a.where(do_not_replace,b)
assert_frame_equal(result,expected)
def test_where_datetime(self):
# GH 3311
df = DataFrame(dict(A = date_range('20130102',periods=5),
B = date_range('20130104',periods=5),
C = np.random.randn(5)))
stamp = datetime(2013,1,3)
result = df[df>stamp]
expected = df.copy()
expected.loc[[0,1],'A'] = np.nan
assert_frame_equal(result,expected)
def test_where_none(self):
# GH 4667
# setting with None changes dtype
df = DataFrame({'series': Series(range(10))}).astype(float)
df[df > 7] = None
expected = DataFrame({'series': Series([0,1,2,3,4,5,6,7,np.nan,np.nan]) })
assert_frame_equal(df, expected)
# GH 7656
df = DataFrame([{'A': 1, 'B': np.nan, 'C': 'Test'}, {'A': np.nan, 'B': 'Test', 'C': np.nan}])
expected = df.where(~isnull(df), None)
with tm.assertRaisesRegexp(TypeError, 'boolean setting on mixed-type'):
df.where(~isnull(df), None, inplace=True)
def test_where_align(self):
def create():
df = DataFrame(np.random.randn(10,3))
df.iloc[3:5,0] = np.nan
df.iloc[4:6,1] = np.nan
df.iloc[5:8,2] = np.nan
return df
# series
df = create()
expected = df.fillna(df.mean())
result = df.where(pd.notnull(df),df.mean(),axis='columns')
assert_frame_equal(result, expected)
df.where(pd.notnull(df),df.mean(),inplace=True,axis='columns')
assert_frame_equal(df, expected)
df = create().fillna(0)
expected = df.apply(lambda x, y: x.where(x>0,y), y=df[0])
result = df.where(df>0,df[0],axis='index')
assert_frame_equal(result, expected)
result = df.where(df>0,df[0],axis='rows')
assert_frame_equal(result, expected)
# frame
df = create()
expected = df.fillna(1)
result = df.where(pd.notnull(df),DataFrame(1,index=df.index,columns=df.columns))
assert_frame_equal(result, expected)
def test_where_complex(self):
# GH 6345
expected = DataFrame([[1+1j, 2], [np.nan, 4+1j]], columns=['a', 'b'])
df = DataFrame([[1+1j, 2], [5+1j, 4+1j]], columns=['a', 'b'])
df[df.abs() >= 5] = np.nan
assert_frame_equal(df,expected)
def test_mask(self):
df = DataFrame(np.random.randn(5, 3))
cond = df > 0
rs = df.where(cond, np.nan)
assert_frame_equal(rs, df.mask(df <= 0))
assert_frame_equal(rs, df.mask(~cond))
def test_mask_edge_case_1xN_frame(self):
# GH4071
df = DataFrame([[1, 2]])
res = df.mask(DataFrame([[True, False]]))
expec = DataFrame([[nan, 2]])
assert_frame_equal(res, expec)
#----------------------------------------------------------------------
# Transposing
def test_transpose(self):
frame = self.frame
dft = frame.T
for idx, series in compat.iteritems(dft):
for col, value in compat.iteritems(series):
if np.isnan(value):
self.assertTrue(np.isnan(frame[col][idx]))
else:
self.assertEqual(value, frame[col][idx])
# mixed type
index, data = tm.getMixedTypeDict()
mixed = DataFrame(data, index=index)
mixed_T = mixed.T
for col, s in compat.iteritems(mixed_T):
self.assertEqual(s.dtype, np.object_)
def test_transpose_get_view(self):
dft = self.frame.T
dft.values[:, 5:10] = 5
self.assertTrue((self.frame.values[5:10] == 5).all())
#----------------------------------------------------------------------
# Renaming
def test_rename(self):
mapping = {
'A': 'a',
'B': 'b',
'C': 'c',
'D': 'd'
}
renamed = self.frame.rename(columns=mapping)
renamed2 = self.frame.rename(columns=str.lower)
assert_frame_equal(renamed, renamed2)
assert_frame_equal(renamed2.rename(columns=str.upper),
self.frame, check_names=False)
# index
data = {
'A': {'foo': 0, 'bar': 1}
}
# gets sorted alphabetical
df = DataFrame(data)
renamed = df.rename(index={'foo': 'bar', 'bar': 'foo'})
self.assert_numpy_array_equal(renamed.index, ['foo', 'bar'])
renamed = df.rename(index=str.upper)
self.assert_numpy_array_equal(renamed.index, ['BAR', 'FOO'])
# have to pass something
self.assertRaises(TypeError, self.frame.rename)
# partial columns
renamed = self.frame.rename(columns={'C': 'foo', 'D': 'bar'})
self.assert_numpy_array_equal(renamed.columns, ['A', 'B', 'foo', 'bar'])
# other axis
renamed = self.frame.T.rename(index={'C': 'foo', 'D': 'bar'})
self.assert_numpy_array_equal(renamed.index, ['A', 'B', 'foo', 'bar'])
# index with name
index = Index(['foo', 'bar'], name='name')
renamer = DataFrame(data, index=index)
renamed = renamer.rename(index={'foo': 'bar', 'bar': 'foo'})
self.assert_numpy_array_equal(renamed.index, ['bar', 'foo'])
self.assertEqual(renamed.index.name, renamer.index.name)
# MultiIndex
tuples_index = [('foo1', 'bar1'), ('foo2', 'bar2')]
tuples_columns = [('fizz1', 'buzz1'), ('fizz2', 'buzz2')]
index = MultiIndex.from_tuples(tuples_index, names=['foo', 'bar'])
columns = MultiIndex.from_tuples(tuples_columns, names=['fizz', 'buzz'])
renamer = DataFrame([(0,0),(1,1)], index=index, columns=columns)
renamed = renamer.rename(index={'foo1': 'foo3', 'bar2': 'bar3'},
columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'})
new_index = MultiIndex.from_tuples([('foo3', 'bar1'), ('foo2', 'bar3')])
new_columns = MultiIndex.from_tuples([('fizz3', 'buzz1'), ('fizz2', 'buzz3')])
self.assert_numpy_array_equal(renamed.index, new_index)
self.assert_numpy_array_equal(renamed.columns, new_columns)
self.assertEqual(renamed.index.names, renamer.index.names)
self.assertEqual(renamed.columns.names, renamer.columns.names)
def test_rename_nocopy(self):
renamed = self.frame.rename(columns={'C': 'foo'}, copy=False)
renamed['foo'] = 1.
self.assertTrue((self.frame['C'] == 1.).all())
def test_rename_inplace(self):
self.frame.rename(columns={'C': 'foo'})
self.assertIn('C', self.frame)
self.assertNotIn('foo', self.frame)
c_id = id(self.frame['C'])
frame = self.frame.copy()
frame.rename(columns={'C': 'foo'}, inplace=True)
self.assertNotIn('C', frame)
self.assertIn('foo', frame)
self.assertNotEqual(id(frame['foo']), c_id)
def test_rename_bug(self):
# GH 5344
# rename set ref_locs, and set_index was not resetting
df = DataFrame({ 0 : ['foo','bar'], 1 : ['bah','bas'], 2 : [1,2]})
df = df.rename(columns={0 : 'a'})
df = df.rename(columns={1 : 'b'})
df = df.set_index(['a','b'])
df.columns = ['2001-01-01']
expected = DataFrame([[1],[2]],index=MultiIndex.from_tuples([('foo','bah'),('bar','bas')],
names=['a','b']),
columns=['2001-01-01'])
assert_frame_equal(df,expected)
#----------------------------------------------------------------------
# Time series related
def test_diff(self):
the_diff = self.tsframe.diff(1)
assert_series_equal(the_diff['A'],
self.tsframe['A'] - self.tsframe['A'].shift(1))
# int dtype
a = 10000000000000000
b = a + 1
s = Series([a, b])
rs = DataFrame({'s': s}).diff()
self.assertEqual(rs.s[1], 1)
# mixed numeric
tf = self.tsframe.astype('float32')
the_diff = tf.diff(1)
assert_series_equal(the_diff['A'],
tf['A'] - tf['A'].shift(1))
def test_diff_timedelta(self):
# GH 4533
df = DataFrame(dict(time=[Timestamp('20130101 9:01'),
Timestamp('20130101 9:02')],
value=[1.0,2.0]))
res = df.diff()
exp = DataFrame([[pd.NaT, np.nan],
[Timedelta('00:01:00'), 1]],
columns=['time', 'value'])
assert_frame_equal(res, exp)
def test_diff_mixed_dtype(self):
df = DataFrame(np.random.randn(5, 3))
df['A'] = np.array([1, 2, 3, 4, 5], dtype=object)
result = df.diff()
self.assertEqual(result[0].dtype, np.float64)
def test_diff_neg_n(self):
rs = self.tsframe.diff(-1)
xp = self.tsframe - self.tsframe.shift(-1)
assert_frame_equal(rs, xp)
def test_diff_float_n(self):
rs = self.tsframe.diff(1.)
xp = self.tsframe.diff(1)
assert_frame_equal(rs, xp)
def test_pct_change(self):
rs = self.tsframe.pct_change(fill_method=None)
assert_frame_equal(rs, self.tsframe / self.tsframe.shift(1) - 1)
rs = self.tsframe.pct_change(2)
filled = self.tsframe.fillna(method='pad')
assert_frame_equal(rs, filled / filled.shift(2) - 1)
rs = self.tsframe.pct_change(fill_method='bfill', limit=1)
filled = self.tsframe.fillna(method='bfill', limit=1)
assert_frame_equal(rs, filled / filled.shift(1) - 1)
rs = self.tsframe.pct_change(freq='5D')
filled = self.tsframe.fillna(method='pad')
assert_frame_equal(rs, filled / filled.shift(freq='5D') - 1)
def test_pct_change_shift_over_nas(self):
s = Series([1., 1.5, np.nan, 2.5, 3.])
df = DataFrame({'a': s, 'b': s})
chg = df.pct_change()
expected = Series([np.nan, 0.5, np.nan, 2.5 / 1.5 - 1, .2])
edf = DataFrame({'a': expected, 'b': expected})
assert_frame_equal(chg, edf)
def test_shift(self):
# naive shift
shiftedFrame = self.tsframe.shift(5)
self.assertTrue(shiftedFrame.index.equals(self.tsframe.index))
shiftedSeries = self.tsframe['A'].shift(5)
assert_series_equal(shiftedFrame['A'], shiftedSeries)
shiftedFrame = self.tsframe.shift(-5)
self.assertTrue(shiftedFrame.index.equals(self.tsframe.index))
shiftedSeries = self.tsframe['A'].shift(-5)
assert_series_equal(shiftedFrame['A'], shiftedSeries)
# shift by 0
unshifted = self.tsframe.shift(0)
assert_frame_equal(unshifted, self.tsframe)
# shift by DateOffset
shiftedFrame = self.tsframe.shift(5, freq=datetools.BDay())
self.assertEqual(len(shiftedFrame), len(self.tsframe))
shiftedFrame2 = self.tsframe.shift(5, freq='B')
assert_frame_equal(shiftedFrame, shiftedFrame2)
d = self.tsframe.index[0]
shifted_d = d + datetools.BDay(5)
assert_series_equal(self.tsframe.xs(d),
shiftedFrame.xs(shifted_d))
# shift int frame
int_shifted = self.intframe.shift(1)
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
self.assertTrue(shifted.index.equals(ps.index))
tm.assert_dict_equal(unshifted.ix[:, 0].valid(), ps.ix[:, 0],
compare_keys=False)
shifted2 = ps.shift(1, 'B')
shifted3 = ps.shift(1, datetools.bday)
assert_frame_equal(shifted2, shifted3)
assert_frame_equal(ps, shifted2.shift(-1, 'B'))
assertRaisesRegexp(ValueError, 'does not match PeriodIndex freq',
ps.shift, freq='D')
# shift other axis
# GH 6371
df = DataFrame(np.random.rand(10,5))
expected = pd.concat([DataFrame(np.nan,index=df.index,columns=[0]),df.iloc[:,0:-1]],ignore_index=True,axis=1)
result = df.shift(1,axis=1)
assert_frame_equal(result,expected)
# shift named axis
df = DataFrame(np.random.rand(10,5))
expected = pd.concat([DataFrame(np.nan,index=df.index,columns=[0]),df.iloc[:,0:-1]],ignore_index=True,axis=1)
result = df.shift(1,axis='columns')
assert_frame_equal(result,expected)
def test_shift_bool(self):
df = DataFrame({'high': [True, False],
'low': [False, False]})
rs = df.shift(1)
xp = DataFrame(np.array([[np.nan, np.nan],
[True, False]], dtype=object),
columns=['high', 'low'])
assert_frame_equal(rs, xp)
def test_shift_empty(self):
# Regression test for #8019
df = DataFrame({'foo': []})
rs = df.shift(-1)
assert_frame_equal(df, rs)
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_frame_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=datetools.bday)
assert_frame_equal(shifted, shifted3)
assertRaisesRegexp(ValueError, 'does not match', ps.tshift, freq='M')
# DatetimeIndex
shifted = self.tsframe.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(self.tsframe, unshifted)
shifted2 = self.tsframe.tshift(freq=self.tsframe.index.freq)
assert_frame_equal(shifted, shifted2)
inferred_ts = DataFrame(self.tsframe.values,
Index(np.asarray(self.tsframe.index)),
columns=self.tsframe.columns)
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_frame_equal(shifted, self.tsframe.tshift(1))
assert_frame_equal(unshifted, inferred_ts)
no_freq = self.tsframe.ix[[0, 5, 7], :]
self.assertRaises(ValueError, no_freq.tshift)
def test_apply(self):
# ufunc
applied = self.frame.apply(np.sqrt)
assert_series_equal(np.sqrt(self.frame['A']), applied['A'])
# aggregator
applied = self.frame.apply(np.mean)
self.assertEqual(applied['A'], np.mean(self.frame['A']))
d = self.frame.index[0]
applied = self.frame.apply(np.mean, axis=1)
self.assertEqual(applied[d], np.mean(self.frame.xs(d)))
self.assertIs(applied.index, self.frame.index) # want this
# invalid axis
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
self.assertRaises(ValueError, df.apply, lambda x: x, 2)
def test_apply_mixed_datetimelike(self):
# mixed datetimelike
# GH 7778
df = DataFrame({ 'A' : date_range('20130101',periods=3), 'B' : pd.to_timedelta(np.arange(3),unit='s') })
result = df.apply(lambda x: x, axis=1)
assert_frame_equal(result, df)
def test_apply_empty(self):
# empty
applied = self.empty.apply(np.sqrt)
self.assertTrue(applied.empty)
applied = self.empty.apply(np.mean)
self.assertTrue(applied.empty)
no_rows = self.frame[:0]
result = no_rows.apply(lambda x: x.mean())
expected = Series(np.nan, index=self.frame.columns)
assert_series_equal(result, expected)
no_cols = self.frame.ix[:, []]
result = no_cols.apply(lambda x: x.mean(), axis=1)
expected = Series(np.nan, index=self.frame.index)
assert_series_equal(result, expected)
# 2476
xp = DataFrame(index=['a'])
rs = xp.apply(lambda x: x['a'], axis=1)
assert_frame_equal(xp, rs)
# reduce with an empty DataFrame
x = []
result = self.empty.apply(x.append, axis=1, reduce=False)
assert_frame_equal(result, self.empty)
result = self.empty.apply(x.append, axis=1, reduce=True)
assert_series_equal(result, Series([]))
empty_with_cols = DataFrame(columns=['a', 'b', 'c'])
result = empty_with_cols.apply(x.append, axis=1, reduce=False)
assert_frame_equal(result, empty_with_cols)
result = empty_with_cols.apply(x.append, axis=1, reduce=True)
assert_series_equal(result, Series([]))
# Ensure that x.append hasn't been called
self.assertEqual(x, [])
def test_apply_standard_nonunique(self):
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
rs = df.apply(lambda s: s[0], axis=1)
xp = Series([1, 4, 7], ['a', 'a', 'c'])
assert_series_equal(rs, xp)
rs = df.T.apply(lambda s: s[0], axis=0)
assert_series_equal(rs, xp)
def test_apply_broadcast(self):
broadcasted = self.frame.apply(np.mean, broadcast=True)
agged = self.frame.apply(np.mean)
for col, ts in compat.iteritems(broadcasted):
self.assertTrue((ts == agged[col]).all())
broadcasted = self.frame.apply(np.mean, axis=1, broadcast=True)
agged = self.frame.apply(np.mean, axis=1)
for idx in broadcasted.index:
self.assertTrue((broadcasted.xs(idx) == agged[idx]).all())
def test_apply_raw(self):
result0 = self.frame.apply(np.mean, raw=True)
result1 = self.frame.apply(np.mean, axis=1, raw=True)
expected0 = self.frame.apply(lambda x: x.values.mean())
expected1 = self.frame.apply(lambda x: x.values.mean(), axis=1)
assert_series_equal(result0, expected0)
assert_series_equal(result1, expected1)
# no reduction
result = self.frame.apply(lambda x: x * 2, raw=True)
expected = self.frame * 2
assert_frame_equal(result, expected)
def test_apply_axis1(self):
d = self.frame.index[0]
tapplied = self.frame.apply(np.mean, axis=1)
self.assertEqual(tapplied[d], np.mean(self.frame.xs(d)))
def test_apply_ignore_failures(self):
result = self.mixed_frame._apply_standard(np.mean, 0,
ignore_failures=True)
expected = self.mixed_frame._get_numeric_data().apply(np.mean)
assert_series_equal(result, expected)
def test_apply_mixed_dtype_corner(self):
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df[:0].apply(np.mean, axis=1)
# the result here is actually kind of ambiguous, should it be a Series
# or a DataFrame?
expected = Series(np.nan, index=[])
assert_series_equal(result, expected)
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df.apply(lambda x: x['A'], axis=1)
expected = Series(['foo'],index=[0])
assert_series_equal(result, expected)
result = df.apply(lambda x: x['B'], axis=1)
expected = Series([1.],index=[0])
assert_series_equal(result, expected)
def test_apply_empty_infer_type(self):
no_cols = DataFrame(index=['a', 'b', 'c'])
no_index = DataFrame(columns=['a', 'b', 'c'])
def _check(df, f):
test_res = f(np.array([], dtype='f8'))
is_reduction = not isinstance(test_res, np.ndarray)
def _checkit(axis=0, raw=False):
res = df.apply(f, axis=axis, raw=raw)
if is_reduction:
agg_axis = df._get_agg_axis(axis)
tm.assert_isinstance(res, Series)
self.assertIs(res.index, agg_axis)
else:
tm.assert_isinstance(res, DataFrame)
_checkit()
_checkit(axis=1)
_checkit(raw=True)
_checkit(axis=0, raw=True)
_check(no_cols, lambda x: x)
_check(no_cols, lambda x: x.mean())
_check(no_index, lambda x: x)
_check(no_index, lambda x: x.mean())
result = no_cols.apply(lambda x: x.mean(), broadcast=True)
tm.assert_isinstance(result, DataFrame)
def test_apply_with_args_kwds(self):
def add_some(x, howmuch=0):
return x + howmuch
def agg_and_add(x, howmuch=0):
return x.mean() + howmuch
def subtract_and_divide(x, sub, divide=1):
return (x - sub) / divide
result = self.frame.apply(add_some, howmuch=2)
exp = self.frame.apply(lambda x: x + 2)
assert_frame_equal(result, exp)
result = self.frame.apply(agg_and_add, howmuch=2)
exp = self.frame.apply(lambda x: x.mean() + 2)
assert_series_equal(result, exp)
res = self.frame.apply(subtract_and_divide, args=(2,), divide=2)
exp = self.frame.apply(lambda x: (x - 2.) / 2.)
assert_frame_equal(res, exp)
def test_apply_yield_list(self):
result = self.frame.apply(list)
assert_frame_equal(result, self.frame)
def test_apply_reduce_Series(self):
self.frame.ix[::2, 'A'] = np.nan
expected = self.frame.mean(1)
result = self.frame.apply(np.mean, axis=1)
assert_series_equal(result, expected)
def test_apply_differently_indexed(self):
df = DataFrame(np.random.randn(20, 10))
result0 = df.apply(Series.describe, axis=0)
expected0 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(result0, expected0)
result1 = df.apply(Series.describe, axis=1)
expected1 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df.T)),
columns=df.index).T
assert_frame_equal(result1, expected1)
def test_apply_modify_traceback(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
data.loc[4,'C'] = np.nan
def transform(row):
if row['C'].startswith('shin') and row['A'] == 'foo':
row['D'] = 7
return row
def transform2(row):
if (notnull(row['C']) and row['C'].startswith('shin')
and row['A'] == 'foo'):
row['D'] = 7
return row
try:
transformed = data.apply(transform, axis=1)
except AttributeError as e:
self.assertEqual(len(e.args), 2)
self.assertEqual(e.args[1], 'occurred at index 4')
self.assertEqual(e.args[0], "'float' object has no attribute 'startswith'")
def test_apply_bug(self):
# GH 6125
import datetime
positions = pd.DataFrame([[1, 'ABC0', 50], [1, 'YUM0', 20],
[1, 'DEF0', 20], [2, 'ABC1', 50],
[2, 'YUM1', 20], [2, 'DEF1', 20]],
columns=['a', 'market', 'position'])
def f(r):
return r['market']
expected = positions.apply(f, axis=1)
positions = DataFrame([[datetime.datetime(2013, 1, 1), 'ABC0', 50],
[datetime.datetime(2013, 1, 2), 'YUM0', 20],
[datetime.datetime(2013, 1, 3), 'DEF0', 20],
[datetime.datetime(2013, 1, 4), 'ABC1', 50],
[datetime.datetime(2013, 1, 5), 'YUM1', 20],
[datetime.datetime(2013, 1, 6), 'DEF1', 20]],
columns=['a', 'market', 'position'])
result = positions.apply(f, axis=1)
assert_series_equal(result,expected)
def test_swapaxes(self):
df = DataFrame(np.random.randn(10, 5))
assert_frame_equal(df.T, df.swapaxes(0, 1))
assert_frame_equal(df.T, df.swapaxes(1, 0))
assert_frame_equal(df, df.swapaxes(0, 0))
self.assertRaises(ValueError, df.swapaxes, 2, 5)
def test_apply_convert_objects(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
result = data.apply(lambda x: x, axis=1)
assert_frame_equal(result.convert_objects(), data)
def test_apply_attach_name(self):
result = self.frame.apply(lambda x: x.name)
expected = Series(self.frame.columns, index=self.frame.columns)
assert_series_equal(result, expected)
result = self.frame.apply(lambda x: x.name, axis=1)
expected = Series(self.frame.index, index=self.frame.index)
assert_series_equal(result, expected)
# non-reductions
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)))
expected = DataFrame(np.tile(self.frame.columns,
(len(self.frame.index), 1)),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)),
axis=1)
expected = DataFrame(np.tile(self.frame.index,
(len(self.frame.columns), 1)).T,
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(result, expected)
def test_apply_multi_index(self):
s = DataFrame([[1,2], [3,4], [5,6]])
s.index = MultiIndex.from_arrays([['a','a','b'], ['c','d','d']])
s.columns = ['col1','col2']
res = s.apply(lambda x: Series({'min': min(x), 'max': max(x)}), 1)
tm.assert_isinstance(res.index, MultiIndex)
def test_applymap(self):
applied = self.frame.applymap(lambda x: x * 2)
assert_frame_equal(applied, self.frame * 2)
result = self.frame.applymap(type)
# GH #465, function returning tuples
result = self.frame.applymap(lambda x: (x, x))
tm.assert_isinstance(result['A'][0], tuple)
# GH 2909, object conversion to float in constructor?
df = DataFrame(data=[1,'a'])
result = df.applymap(lambda x: x)
self.assertEqual(result.dtypes[0], object)
df = DataFrame(data=[1.,'a'])
result = df.applymap(lambda x: x)
self.assertEqual(result.dtypes[0], object)
# GH2786
df = DataFrame(np.random.random((3,4)))
df2 = df.copy()
cols = ['a','a','a','a']
df.columns = cols
expected = df2.applymap(str)
expected.columns = cols
result = df.applymap(str)
assert_frame_equal(result,expected)
# datetime/timedelta
df['datetime'] = Timestamp('20130101')
df['timedelta'] = Timedelta('1 min')
result = df.applymap(str)
for f in ['datetime','timedelta']:
self.assertEquals(result.loc[0,f],str(df.loc[0,f]))
def test_filter(self):
# items
filtered = self.frame.filter(['A', 'B', 'E'])
self.assertEqual(len(filtered.columns), 2)
self.assertNotIn('E', filtered)
filtered = self.frame.filter(['A', 'B', 'E'], axis='columns')
self.assertEqual(len(filtered.columns), 2)
self.assertNotIn('E', filtered)
# other axis
idx = self.frame.index[0:4]
filtered = self.frame.filter(idx, axis='index')
expected = self.frame.reindex(index=idx)
assert_frame_equal(filtered,expected)
# like
fcopy = self.frame.copy()
fcopy['AA'] = 1
filtered = fcopy.filter(like='A')
self.assertEqual(len(filtered.columns), 2)
self.assertIn('AA', filtered)
# like with ints in column names
df = DataFrame(0., index=[0, 1, 2], columns=[0, 1, '_A', '_B'])
filtered = df.filter(like='_')
self.assertEqual(len(filtered.columns), 2)
# pass in None
with assertRaisesRegexp(TypeError, 'Must pass'):
self.frame.filter(items=None)
# objects
filtered = self.mixed_frame.filter(like='foo')
self.assertIn('foo', filtered)
# unicode columns, won't ascii-encode
df = self.frame.rename(columns={'B': u('\u2202')})
filtered = df.filter(like='C')
self.assertTrue('C' in filtered)
def test_filter_regex_search(self):
fcopy = self.frame.copy()
fcopy['AA'] = 1
# regex
filtered = fcopy.filter(regex='[A]+')
self.assertEqual(len(filtered.columns), 2)
self.assertIn('AA', filtered)
# doesn't have to be at beginning
df = DataFrame({'aBBa': [1, 2],
'BBaBB': [1, 2],
'aCCa': [1, 2],
'aCCaBB': [1, 2]})
result = df.filter(regex='BB')
exp = df[[x for x in df.columns if 'BB' in x]]
assert_frame_equal(result, exp)
def test_filter_corner(self):
empty = DataFrame()
result = empty.filter([])
assert_frame_equal(result, empty)
result = empty.filter(like='foo')
assert_frame_equal(result, empty)
def test_select(self):
f = lambda x: x.weekday() == 2
result = self.tsframe.select(f, axis=0)
expected = self.tsframe.reindex(
index=self.tsframe.index[[f(x) for x in self.tsframe.index]])
assert_frame_equal(result, expected)
result = self.frame.select(lambda x: x in ('B', 'D'), axis=1)
expected = self.frame.reindex(columns=['B', 'D'])
assert_frame_equal(result, expected, check_names=False) # TODO should reindex check_names?
def test_reorder_levels(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]],
names=['L0', 'L1', 'L2'])
df = DataFrame({'A': np.arange(6), 'B': np.arange(6)}, index=index)
# no change, position
result = df.reorder_levels([0, 1, 2])
assert_frame_equal(df, result)
# no change, labels
result = df.reorder_levels(['L0', 'L1', 'L2'])
assert_frame_equal(df, result)
# rotate, position
result = df.reorder_levels([1, 2, 0])
e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']],
labels=[[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0]],
names=['L1', 'L2', 'L0'])
expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)},
index=e_idx)
assert_frame_equal(result, expected)
result = df.reorder_levels([0, 0, 0])
e_idx = MultiIndex(levels=[['bar'], ['bar'], ['bar']],
labels=[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
names=['L0', 'L0', 'L0'])
expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)},
index=e_idx)
assert_frame_equal(result, expected)
result = df.reorder_levels(['L0', 'L0', 'L0'])
assert_frame_equal(result, expected)
def test_sort_index(self):
frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# axis=0
unordered = frame.ix[[3, 2, 4, 1]]
sorted_df = unordered.sort_index()
expected = frame
assert_frame_equal(sorted_df, expected)
sorted_df = unordered.sort_index(ascending=False)
expected = frame[::-1]
assert_frame_equal(sorted_df, expected)
# axis=1
unordered = frame.ix[:, ['D', 'B', 'C', 'A']]
sorted_df = unordered.sort_index(axis=1)
expected = frame
assert_frame_equal(sorted_df, expected)
sorted_df = unordered.sort_index(axis=1, ascending=False)
expected = frame.ix[:, ::-1]
assert_frame_equal(sorted_df, expected)
# by column
sorted_df = frame.sort_index(by='A')
indexer = frame['A'].argsort().values
expected = frame.ix[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_index(by='A', ascending=False)
indexer = indexer[::-1]
expected = frame.ix[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort(columns='A', ascending=False)
assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort(columns=['A'], ascending=[False])
assert_frame_equal(sorted_df, expected)
# check for now
sorted_df = frame.sort(columns='A')
assert_frame_equal(sorted_df, expected[::-1])
expected = frame.sort_index(by='A')
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort(columns=['A', 'B'], ascending=False)
expected = frame.sort_index(by=['A', 'B'], ascending=False)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort(columns=['A', 'B'])
assert_frame_equal(sorted_df, expected[::-1])
self.assertRaises(ValueError, frame.sort_index, axis=2, inplace=True)
msg = 'When sorting by column, axis must be 0'
with assertRaisesRegexp(ValueError, msg):
frame.sort_index(by='A', axis=1)
msg = r'Length of ascending \(5\) != length of by \(2\)'
with assertRaisesRegexp(ValueError, msg):
frame.sort_index(by=['A', 'B'], axis=0, ascending=[True] * 5)
def test_sort_nan(self):
# GH3917
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# sort one column only
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort(['A'], na_position='first')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort(['A'], na_position='first', ascending=False)
assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{'A': [1, 1, 2, 4, 6, 8, nan],
'B': [2, 9, nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2])
sorted_df = df.sort(['A','B'])
assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 2, 9, nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5])
sorted_df = df.sort(['A','B'], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort(['A','B'], ascending=[1,0], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{'A': [8, 6, 4, 2, 1, 1, nan],
'B': [4, 5, 5, nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2])
sorted_df = df.sort(['A','B'], ascending=[0,1], na_position='last')
assert_frame_equal(sorted_df, expected)
# Test DataFrame with nan label
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index = [1, 2, 3, 4, 5, 6, nan])
# NaN label, ascending=True, na_position='last'
sorted_df = df.sort(kind='quicksort', ascending=True, na_position='last')
expected = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index = [1, 2, 3, 4, 5, 6, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=True, na_position='first'
sorted_df = df.sort(na_position='first')
expected = DataFrame({'A': [4, 1, 2, nan, 1, 6, 8],
'B': [5, 9, nan, 5, 2, 5, 4]},
index = [nan, 1, 2, 3, 4, 5, 6])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='last'
sorted_df = df.sort(kind='quicksort', ascending=False)
expected = DataFrame({'A': [8, 6, 1, nan, 2, 1, 4],
'B': [4, 5, 2, 5, nan, 9, 5]},
index = [6, 5, 4, 3, 2, 1, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='first'
sorted_df = df.sort(kind='quicksort', ascending=False, na_position='first')
expected = DataFrame({'A': [4, 8, 6, 1, nan, 2, 1],
'B': [5, 4, 5, 2, 5, nan, 9]},
index = [nan, 6, 5, 4, 3, 2, 1])
assert_frame_equal(sorted_df, expected)
def test_stable_descending_sort(self):
# GH #6399
df = DataFrame([[2, 'first'], [2, 'second'], [1, 'a'], [1, 'b']],
columns=['sort_col', 'order'])
sorted_df = df.sort_index(by='sort_col', kind='mergesort',
ascending=False)
assert_frame_equal(df, sorted_df)
def test_stable_descending_multicolumn_sort(self):
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# test stable mergesort
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 2, 9]},
index=[2, 5, 4, 6, 1, 3, 0])
sorted_df = df.sort(['A','B'], ascending=[0,1], na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort(['A','B'], ascending=[0,0], na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
def test_sort_index_multicolumn(self):
import random
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
random.shuffle(A)
random.shuffle(B)
frame = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
result = frame.sort_index(by=['A', 'B'])
indexer = np.lexsort((frame['B'], frame['A']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
result = frame.sort_index(by=['A', 'B'], ascending=False)
indexer = np.lexsort((frame['B'].rank(ascending=False),
frame['A'].rank(ascending=False)))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
result = frame.sort_index(by=['B', 'A'])
indexer = np.lexsort((frame['A'], frame['B']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
def test_sort_index_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# axis=0
unordered = frame.ix[[3, 2, 4, 1]]
a_id = id(unordered['A'])
df = unordered.copy()
df.sort_index(inplace=True)
expected = frame
assert_frame_equal(df, expected)
self.assertNotEqual(a_id, id(df['A']))
df = unordered.copy()
df.sort_index(ascending=False, inplace=True)
expected = frame[::-1]
assert_frame_equal(df, expected)
# axis=1
unordered = frame.ix[:, ['D', 'B', 'C', 'A']]
df = unordered.copy()
df.sort_index(axis=1, inplace=True)
expected = frame
assert_frame_equal(df, expected)
df = unordered.copy()
df.sort_index(axis=1, ascending=False, inplace=True)
expected = frame.ix[:, ::-1]
assert_frame_equal(df, expected)
def test_sort_index_different_sortorder(self):
import random
A = np.arange(20).repeat(5)
B = np.tile(np.arange(5), 20)
indexer = np.random.permutation(100)
A = A.take(indexer)
B = B.take(indexer)
df = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
result = df.sort_index(by=['A', 'B'], ascending=[1, 0])
ex_indexer = np.lexsort((df.B.max() - df.B, df.A))
expected = df.take(ex_indexer)
assert_frame_equal(result, expected)
# test with multiindex, too
idf = df.set_index(['A', 'B'])
result = idf.sort_index(ascending=[1, 0])
expected = idf.take(ex_indexer)
assert_frame_equal(result, expected)
# also, Series!
result = idf['C'].sort_index(ascending=[1, 0])
assert_series_equal(result, expected['C'])
def test_sort_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
sorted_df = frame.copy()
sorted_df.sort(columns='A', inplace=True)
expected = frame.sort_index(by='A')
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort(columns='A', ascending=False, inplace=True)
expected = frame.sort_index(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort(columns=['A', 'B'], ascending=False, inplace=True)
expected = frame.sort_index(by=['A', 'B'], ascending=False)
assert_frame_equal(sorted_df, expected)
def test_sort_index_duplicates(self):
df = DataFrame([lrange(5,9), lrange(4)],
columns=['a', 'a', 'b', 'b'])
with assertRaisesRegexp(ValueError, 'duplicate'):
df.sort_index(by='a')
with assertRaisesRegexp(ValueError, 'duplicate'):
df.sort_index(by=['a'])
with assertRaisesRegexp(ValueError, 'duplicate'):
# multi-column 'by' is separate codepath
df.sort_index(by=['a', 'b'])
# with multi-index
# GH4370
df = DataFrame(np.random.randn(4,2),columns=MultiIndex.from_tuples([('a',0),('a',1)]))
with assertRaisesRegexp(ValueError, 'levels'):
df.sort_index(by='a')
# convert tuples to a list of tuples
expected = df.sort_index(by=[('a',1)])
result = df.sort_index(by=('a',1))
assert_frame_equal(result, expected)
def test_sortlevel(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4]], mi)
res = df.sortlevel('A', sort_remaining=False)
assert_frame_equal(df, res)
res = df.sortlevel(['A', 'B'], sort_remaining=False)
assert_frame_equal(df, res)
def test_sort_datetimes(self):
# GH 3461, argsort / lexsort differences for a datetime column
df = DataFrame(['a','a','a','b','c','d','e','f','g'],
columns=['A'],
index=date_range('20130101',periods=9))
dts = [Timestamp(x)
for x in ['2004-02-11','2004-01-21','2004-01-26',
'2005-09-20','2010-10-04','2009-05-12',
'2008-11-12','2010-09-28','2010-09-28']]
df['B'] = dts[::2] + dts[1::2]
df['C'] = 2.
df['A1'] = 3.
df1 = df.sort(columns='A')
df2 = df.sort(columns=['A'])
assert_frame_equal(df1,df2)
df1 = df.sort(columns='B')
df2 = df.sort(columns=['B'])
assert_frame_equal(df1,df2)
def test_frame_column_inplace_sort_exception(self):
s = self.frame['A']
with assertRaisesRegexp(ValueError, "This Series is a view"):
s.sort()
cp = s.copy()
cp.sort() # it works!
def test_combine_first(self):
# disjoint
head, tail = self.frame[:5], self.frame[5:]
combined = head.combine_first(tail)
reordered_frame = self.frame.reindex(combined.index)
assert_frame_equal(combined, reordered_frame)
self.assertTrue(tm.equalContents(combined.columns, self.frame.columns))
assert_series_equal(combined['A'], reordered_frame['A'])
# same index
fcopy = self.frame.copy()
fcopy['A'] = 1
del fcopy['C']
fcopy2 = self.frame.copy()
fcopy2['B'] = 0
del fcopy2['D']
combined = fcopy.combine_first(fcopy2)
self.assertTrue((combined['A'] == 1).all())
assert_series_equal(combined['B'], fcopy['B'])
assert_series_equal(combined['C'], fcopy2['C'])
assert_series_equal(combined['D'], fcopy['D'])
# overlap
head, tail = reordered_frame[:10].copy(), reordered_frame
head['A'] = 1
combined = head.combine_first(tail)
self.assertTrue((combined['A'][:10] == 1).all())
# reverse overlap
tail['A'][:10] = 0
combined = tail.combine_first(head)
self.assertTrue((combined['A'][:10] == 0).all())
# no overlap
f = self.frame[:10]
g = self.frame[10:]
combined = f.combine_first(g)
assert_series_equal(combined['A'].reindex(f.index), f['A'])
assert_series_equal(combined['A'].reindex(g.index), g['A'])
# corner cases
comb = self.frame.combine_first(self.empty)
assert_frame_equal(comb, self.frame)
comb = self.empty.combine_first(self.frame)
assert_frame_equal(comb, self.frame)
comb = self.frame.combine_first(DataFrame(index=["faz", "boo"]))
self.assertTrue("faz" in comb.index)
# #2525
df = DataFrame({'a': [1]}, index=[datetime(2012, 1, 1)])
df2 = DataFrame({}, columns=['b'])
result = df.combine_first(df2)
self.assertTrue('b' in result)
def test_combine_first_mixed_bug(self):
idx = Index(['a', 'b', 'c', 'e'])
ser1 = Series([5.0, -9.0, 4.0, 100.], index=idx)
ser2 = Series(['a', 'b', 'c', 'e'], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame1 = DataFrame({"col0": ser1,
"col2": ser2,
"col3": ser3})
idx = Index(['a', 'b', 'c', 'f'])
ser1 = Series([5.0, -9.0, 4.0, 100.], index=idx)
ser2 = Series(['a', 'b', 'c', 'f'], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame2 = DataFrame({"col1": ser1,
"col2": ser2,
"col5": ser3})
combined = frame1.combine_first(frame2)
self.assertEqual(len(combined.columns), 5)
# gh 3016 (same as in update)
df = DataFrame([[1.,2.,False, True],[4.,5.,True,False]],
columns=['A','B','bool1','bool2'])
other = DataFrame([[45,45]],index=[0],columns=['A','B'])
result = df.combine_first(other)
assert_frame_equal(result, df)
df.ix[0,'A'] = np.nan
result = df.combine_first(other)
df.ix[0,'A'] = 45
assert_frame_equal(result, df)
# doc example
df1 = DataFrame({'A' : [1., np.nan, 3., 5., np.nan],
'B' : [np.nan, 2., 3., np.nan, 6.]})
df2 = DataFrame({'A' : [5., 2., 4., np.nan, 3., 7.],
'B' : [np.nan, np.nan, 3., 4., 6., 8.]})
result = df1.combine_first(df2)
expected = DataFrame({ 'A' : [1,2,3,5,3,7.], 'B' : [np.nan,2,3,4,6,8] })
assert_frame_equal(result,expected)
# GH3552, return object dtype with bools
df1 = DataFrame([[np.nan, 3.,True], [-4.6, np.nan, True], [np.nan, 7., False]])
df2 = DataFrame([[-42.6, np.nan, True], [-5., 1.6, False]], index=[1, 2])
result = df1.combine_first(df2)[2]
expected = Series([True,True,False])
assert_series_equal(result,expected)
# GH 3593, converting datetime64[ns] incorrecly
df0 = DataFrame({"a":[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]})
df1 = DataFrame({"a":[None, None, None]})
df2 = df1.combine_first(df0)
assert_frame_equal(df2,df0)
df2 = df0.combine_first(df1)
assert_frame_equal(df2,df0)
df0 = DataFrame({"a":[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]})
df1 = DataFrame({"a":[datetime(2000, 1, 2), None, None]})
df2 = df1.combine_first(df0)
result = df0.copy()
result.iloc[0,:] = df1.iloc[0,:]
assert_frame_equal(df2,result)
df2 = df0.combine_first(df1)
assert_frame_equal(df2,df0)
def test_update(self):
df = DataFrame([[1.5, nan, 3.],
[1.5, nan, 3.],
[1.5, nan, 3],
[1.5, nan, 3]])
other = DataFrame([[3.6, 2., np.nan],
[np.nan, np.nan, 7]], index=[1, 3])
df.update(other)
expected = DataFrame([[1.5, nan, 3],
[3.6, 2, 3],
[1.5, nan, 3],
[1.5, nan, 7.]])
assert_frame_equal(df, expected)
def test_update_dtypes(self):
# gh 3016
df = DataFrame([[1.,2.,False, True],[4.,5.,True,False]],
columns=['A','B','bool1','bool2'])
other = DataFrame([[45,45]],index=[0],columns=['A','B'])
df.update(other)
expected = DataFrame([[45.,45.,False, True],[4.,5.,True,False]],
columns=['A','B','bool1','bool2'])
assert_frame_equal(df, expected)
def test_update_nooverwrite(self):
df = DataFrame([[1.5, nan, 3.],
[1.5, nan, 3.],
[1.5, nan, 3],
[1.5, nan, 3]])
other = DataFrame([[3.6, 2., np.nan],
[np.nan, np.nan, 7]], index=[1, 3])
df.update(other, overwrite=False)
expected = DataFrame([[1.5, nan, 3],
[1.5, 2, 3],
[1.5, nan, 3],
[1.5, nan, 3.]])
assert_frame_equal(df, expected)
def test_update_filtered(self):
df = DataFrame([[1.5, nan, 3.],
[1.5, nan, 3.],
[1.5, nan, 3],
[1.5, nan, 3]])
other = DataFrame([[3.6, 2., np.nan],
[np.nan, np.nan, 7]], index=[1, 3])
df.update(other, filter_func=lambda x: x > 2)
expected = DataFrame([[1.5, nan, 3],
[1.5, nan, 3],
[1.5, nan, 3],
[1.5, nan, 7.]])
assert_frame_equal(df, expected)
def test_update_raise(self):
df = DataFrame([[1.5, 1, 3.],
[1.5, nan, 3.],
[1.5, nan, 3],
[1.5, nan, 3]])
other = DataFrame([[2., nan],
[nan, 7]], index=[1, 3], columns=[1, 2])
with assertRaisesRegexp(ValueError, "Data overlaps"):
df.update(other, raise_conflict=True)
def test_update_from_non_df(self):
d = {'a': Series([1, 2, 3, 4]), 'b': Series([5, 6, 7, 8])}
df = DataFrame(d)
d['a'] = Series([5, 6, 7, 8])
df.update(d)
expected = DataFrame(d)
assert_frame_equal(df, expected)
d = {'a': [1, 2, 3, 4], 'b': [5, 6, 7, 8]}
df = DataFrame(d)
d['a'] = [5, 6, 7, 8]
df.update(d)
expected = DataFrame(d)
assert_frame_equal(df, expected)
def test_combineAdd(self):
# trivial
comb = self.frame.combineAdd(self.frame)
assert_frame_equal(comb, self.frame * 2)
# more rigorous
a = DataFrame([[1., nan, nan, 2., nan]],
columns=np.arange(5))
b = DataFrame([[2., 3., nan, 2., 6., nan]],
columns=np.arange(6))
expected = DataFrame([[3., 3., nan, 4., 6., nan]],
columns=np.arange(6))
result = a.combineAdd(b)
assert_frame_equal(result, expected)
result2 = a.T.combineAdd(b.T)
assert_frame_equal(result2, expected.T)
expected2 = a.combine(b, operator.add, fill_value=0.)
assert_frame_equal(expected, expected2)
# corner cases
comb = self.frame.combineAdd(self.empty)
assert_frame_equal(comb, self.frame)
comb = self.empty.combineAdd(self.frame)
assert_frame_equal(comb, self.frame)
# integer corner case
df1 = DataFrame({'x': [5]})
df2 = DataFrame({'x': [1]})
df3 = DataFrame({'x': [6]})
comb = df1.combineAdd(df2)
assert_frame_equal(comb, df3)
# mixed type GH2191
df1 = DataFrame({'A': [1, 2], 'B': [3, 4]})
df2 = DataFrame({'A': [1, 2], 'C': [5, 6]})
rs = df1.combineAdd(df2)
xp = DataFrame({'A': [2, 4], 'B': [3, 4.], 'C': [5, 6.]})
assert_frame_equal(xp, rs)
# TODO: test integer fill corner?
def test_combineMult(self):
# trivial
comb = self.frame.combineMult(self.frame)
assert_frame_equal(comb, self.frame ** 2)
# corner cases
comb = self.frame.combineMult(self.empty)
assert_frame_equal(comb, self.frame)
comb = self.empty.combineMult(self.frame)
assert_frame_equal(comb, self.frame)
def test_combine_generic(self):
df1 = self.frame
df2 = self.frame.ix[:-5, ['A', 'B', 'C']]
combined = df1.combine(df2, np.add)
combined2 = df2.combine(df1, np.add)
self.assertTrue(combined['D'].isnull().all())
self.assertTrue(combined2['D'].isnull().all())
chunk = combined.ix[:-5, ['A', 'B', 'C']]
chunk2 = combined2.ix[:-5, ['A', 'B', 'C']]
exp = self.frame.ix[:-5, ['A', 'B', 'C']].reindex_like(chunk) * 2
assert_frame_equal(chunk, exp)
assert_frame_equal(chunk2, exp)
def test_clip(self):
median = self.frame.median().median()
capped = self.frame.clip_upper(median)
self.assertFalse((capped.values > median).any())
floored = self.frame.clip_lower(median)
self.assertFalse((floored.values < median).any())
double = self.frame.clip(upper=median, lower=median)
self.assertFalse((double.values != median).any())
def test_dataframe_clip(self):
# GH #2747
df = DataFrame(np.random.randn(1000,2))
for lb, ub in [(-1,1),(1,-1)]:
clipped_df = df.clip(lb, ub)
lb, ub = min(lb,ub), max(ub,lb)
lb_mask = df.values <= lb
ub_mask = df.values >= ub
mask = ~lb_mask & ~ub_mask
self.assertTrue((clipped_df.values[lb_mask] == lb).all() == True)
self.assertTrue((clipped_df.values[ub_mask] == ub).all() == True)
self.assertTrue((clipped_df.values[mask] == df.values[mask]).all() == True)
def test_get_X_columns(self):
# numeric and object columns
df = DataFrame({'a': [1, 2, 3],
'b' : [True, False, True],
'c': ['foo', 'bar', 'baz'],
'd': [None, None, None],
'e': [3.14, 0.577, 2.773]})
self.assert_numpy_array_equal(df._get_numeric_data().columns,
['a', 'b', 'e'])
def test_is_mixed_type(self):
self.assertFalse(self.frame._is_mixed_type)
self.assertTrue(self.mixed_frame._is_mixed_type)
def test_get_numeric_data(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo', 'f' : Timestamp('20010102')},
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, 'float64' : 1, datetime64name: 1, objectname : 1})
result.sort_index()
expected.sort_index()
assert_series_equal(result, expected)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
'd' : np.array([1.]*10,dtype='float32'),
'e' : np.array([1]*10,dtype='int32'),
'f' : np.array([1]*10,dtype='int16'),
'g' : Timestamp('20010102')},
index=np.arange(10))
result = df._get_numeric_data()
expected = df.ix[:, ['a', 'b','d','e','f']]
assert_frame_equal(result, expected)
only_obj = df.ix[:, ['c','g']]
result = only_obj._get_numeric_data()
expected = df.ix[:, []]
assert_frame_equal(result, expected)
df = DataFrame.from_dict({'a':[1,2], 'b':['foo','bar'],'c':[np.pi,np.e]})
result = df._get_numeric_data()
expected = DataFrame.from_dict({'a':[1,2], 'c':[np.pi,np.e]})
assert_frame_equal(result, expected)
df = result.copy()
result = df._get_numeric_data()
expected = df
assert_frame_equal(result, expected)
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Boolean data and integer data is included in .describe() output, string data isn't
self.assert_numpy_array_equal(df.describe().columns, ['bool_data', 'int_data'])
bool_describe = df.describe()['bool_data']
# Both the min and the max values should stay booleans
self.assertEqual(bool_describe['min'].dtype, np.bool_)
self.assertEqual(bool_describe['max'].dtype, np.bool_)
self.assertFalse(bool_describe['min'])
self.assertTrue(bool_describe['max'])
# For numeric operations, like mean or median, the values True/False are cast to
# the integer values 1 and 0
assert_almost_equal(bool_describe['mean'], 0.4)
assert_almost_equal(bool_describe['50%'], 0)
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
assert_almost_equal(test.values, [2, 150, 'abcde'])
assert_series_equal(test, df.T.sum(axis=1))
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f,
has_skipna=False,
has_numeric_only=True,
check_dtype=False,
check_dates=True)
# corner case
frame = DataFrame()
ct1 = frame.count(1)
tm.assert_isinstance(ct1, Series)
ct2 = frame.count(0)
tm.assert_isinstance(ct2, Series)
# GH #423
df = DataFrame(index=lrange(10))
result = df.count(1)
expected = Series(0, index=df.index)
assert_series_equal(result, expected)
df = DataFrame(columns=lrange(10))
result = df.count(0)
expected = Series(0, index=df.columns)
assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
assert_series_equal(result, expected)
def test_sum(self):
self._check_stat_op('sum', np.sum, has_numeric_only=True)
# mixed types (with upcasting happening)
self._check_stat_op('sum', np.sum, frame=self.mixed_float.astype('float32'),
has_numeric_only=True, check_dtype=False, check_less_precise=True)
def test_stat_operators_attempt_obj_array(self):
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'],
dtype='O')
methods = ['sum', 'mean', 'prod', 'var', 'std', 'skew', 'min', 'max']
# GH #676
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
for meth in methods:
self.assertEqual(df.values.dtype, np.object_)
result = getattr(df, meth)(1)
expected = getattr(df.astype('f8'), meth)(1)
assert_series_equal(result, expected)
def test_mean(self):
self._check_stat_op('mean', np.mean, check_dates=True)
def test_product(self):
self._check_stat_op('product', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper, check_dates=True)
def test_min(self):
self._check_stat_op('min', np.min, check_dates=True)
self._check_stat_op('min', np.min, frame=self.intframe)
def test_cummin(self):
self.tsframe.ix[5:10, 0] = nan
self.tsframe.ix[10:15, 1] = nan
self.tsframe.ix[15:, 2] = nan
# axis = 0
cummin = self.tsframe.cummin()
expected = self.tsframe.apply(Series.cummin)
assert_frame_equal(cummin, expected)
# axis = 1
cummin = self.tsframe.cummin(axis=1)
expected = self.tsframe.apply(Series.cummin, axis=1)
assert_frame_equal(cummin, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummin()
# fix issue
cummin_xs = self.tsframe.cummin(axis=1)
self.assertEqual(np.shape(cummin_xs), np.shape(self.tsframe))
def test_cummax(self):
self.tsframe.ix[5:10, 0] = nan
self.tsframe.ix[10:15, 1] = nan
self.tsframe.ix[15:, 2] = nan
# axis = 0
cummax = self.tsframe.cummax()
expected = self.tsframe.apply(Series.cummax)
assert_frame_equal(cummax, expected)
# axis = 1
cummax = self.tsframe.cummax(axis=1)
expected = self.tsframe.apply(Series.cummax, axis=1)
assert_frame_equal(cummax, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummax()
# fix issue
cummax_xs = self.tsframe.cummax(axis=1)
self.assertEqual(np.shape(cummax_xs), np.shape(self.tsframe))
def test_max(self):
self._check_stat_op('max', np.max, check_dates=True)
self._check_stat_op('max', np.max, frame=self.intframe)
def test_mad(self):
f = lambda x: np.abs(x - x.mean()).mean()
self._check_stat_op('mad', f)
def test_var_std(self):
alt = lambda x: np.var(x, ddof=1)
self._check_stat_op('var', alt)
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op('std', alt)
result = self.tsframe.std(ddof=4)
expected = self.tsframe.apply(lambda x: x.std(ddof=4))
assert_almost_equal(result, expected)
result = self.tsframe.var(ddof=4)
expected = self.tsframe.apply(lambda x: x.var(ddof=4))
assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
self.assertFalse((result < 0).any())
if nanops._USE_BOTTLENECK:
nanops._USE_BOTTLENECK = False
result = nanops.nanvar(arr, axis=0)
self.assertFalse((result < 0).any())
nanops._USE_BOTTLENECK = True
def test_sem(self):
alt = lambda x: np.std(x, ddof=1)/np.sqrt(len(x))
self._check_stat_op('sem', alt)
result = self.tsframe.sem(ddof=4)
expected = self.tsframe.apply(lambda x: x.std(ddof=4)/np.sqrt(len(x)))
assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
self.assertFalse((result < 0).any())
if nanops._USE_BOTTLENECK:
nanops._USE_BOTTLENECK = False
result = nanops.nansem(arr, axis=0)
self.assertFalse((result < 0).any())
nanops._USE_BOTTLENECK = True
def test_skew(self):
tm._skip_if_no_scipy()
from scipy.stats import skew
def alt(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', alt)
def test_kurt(self):
tm._skip_if_no_scipy()
from scipy.stats import kurtosis
def alt(x):
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
self._check_stat_op('kurt', alt)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
assert_series_equal(df.kurt(), df.kurt(level=0).xs('bar'))
def _check_stat_op(self, name, alternative, frame=None, has_skipna=True,
has_numeric_only=False, check_dtype=True, check_dates=False,
check_less_precise=False):
if frame is None:
frame = self.frame
# set some NAs
frame.ix[5:10] = np.nan
frame.ix[15:20, -2:] = np.nan
f = getattr(frame, name)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
_f = getattr(df, name)
result = _f()
self.assertIsInstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, name)()
self.assertIsInstance(result, Series)
self.assertTrue(len(result))
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
self.assertEqual(lcd_dtype, result0.dtype)
self.assertEqual(lcd_dtype, result1.dtype)
# result = f(axis=1)
# comp = frame.apply(alternative, axis=1).reindex(result.index)
# assert_series_equal(result, comp)
# bad axis
assertRaisesRegexp(ValueError, 'No axis named 2', f, axis=2)
# make sure works on mixed-type frame
getattr(self.mixed_frame, name)(axis=0)
getattr(self.mixed_frame, name)(axis=1)
if has_numeric_only:
getattr(self.mixed_frame, name)(axis=0, numeric_only=True)
getattr(self.mixed_frame, name)(axis=1, numeric_only=True)
getattr(self.frame, name)(axis=0, numeric_only=False)
getattr(self.frame, name)(axis=1, numeric_only=False)
# all NA case
if has_skipna:
all_na = self.frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
self.assertTrue(np.isnan(r0).all())
self.assertTrue(np.isnan(r1).all())
def test_mode(self):
df = pd.DataFrame({"A": [12, 12, 11, 12, 19, 11],
"B": [10, 10, 10, np.nan, 3, 4],
"C": [8, 8, 8, 9, 9, 9],
"D": range(6),
"E": [8, 8, 1, 1, 3, 3]})
assert_frame_equal(df[["A"]].mode(),
pd.DataFrame({"A": [12]}))
assert_frame_equal(df[["D"]].mode(),
pd.DataFrame(pd.Series([], dtype="int64"),
columns=["D"]))
assert_frame_equal(df[["E"]].mode(),
pd.DataFrame(pd.Series([1, 3, 8], dtype="int64"),
columns=["E"]))
assert_frame_equal(df[["A", "B"]].mode(),
pd.DataFrame({"A": [12], "B": [10.]}))
assert_frame_equal(df.mode(),
pd.DataFrame({"A": [12, np.nan, np.nan],
"B": [10, np.nan, np.nan],
"C": [8, 9, np.nan],
"D": [np.nan, np.nan, np.nan],
"E": [1, 3, 8]}))
# outputs in sorted order
df["C"] = list(reversed(df["C"]))
com.pprint_thing(df["C"])
com.pprint_thing(df["C"].mode())
a, b = (df[["A", "B", "C"]].mode(),
pd.DataFrame({"A": [12, np.nan],
"B": [10, np.nan],
"C": [8, 9]}))
com.pprint_thing(a)
com.pprint_thing(b)
assert_frame_equal(a, b)
# should work with heterogeneous types
df = pd.DataFrame({"A": range(6),
"B": pd.date_range('2011', periods=6),
"C": list('abcdef')})
exp = pd.DataFrame({"A": pd.Series([], dtype=df["A"].dtype),
"B": pd.Series([], dtype=df["B"].dtype),
"C": pd.Series([], dtype=df["C"].dtype)})
assert_frame_equal(df.mode(), exp)
# and also when not empty
df.loc[1, "A"] = 0
df.loc[4, "B"] = df.loc[3, "B"]
df.loc[5, "C"] = 'e'
exp = pd.DataFrame({"A": pd.Series([0], dtype=df["A"].dtype),
"B": pd.Series([df.loc[3, "B"]], dtype=df["B"].dtype),
"C": pd.Series(['e'], dtype=df["C"].dtype)})
assert_frame_equal(df.mode(), exp)
def test_sum_corner(self):
axis0 = self.empty.sum(0)
axis1 = self.empty.sum(1)
tm.assert_isinstance(axis0, Series)
tm.assert_isinstance(axis1, Series)
self.assertEqual(len(axis0), 0)
self.assertEqual(len(axis1), 0)
def test_sum_object(self):
values = self.frame.values.astype(int)
frame = DataFrame(values, index=self.frame.index,
columns=self.frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self):
# ensure this works, bug report
bools = np.isnan(self.frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self):
# unit test when have object data
the_mean = self.mixed_frame.mean(axis=0)
the_sum = self.mixed_frame.sum(axis=0, numeric_only=True)
self.assertTrue(the_sum.index.equals(the_mean.index))
self.assertTrue(len(the_mean.index) < len(self.mixed_frame.columns))
# xs sum mixed type, just want to know it works...
the_mean = self.mixed_frame.mean(axis=1)
the_sum = self.mixed_frame.sum(axis=1, numeric_only=True)
self.assertTrue(the_sum.index.equals(the_mean.index))
# take mean of boolean column
self.frame['bool'] = self.frame['A'] > 0
means = self.frame.mean(0)
self.assertEqual(means['bool'], self.frame['bool'].values.mean())
def test_stats_mixed_type(self):
# don't blow up
self.mixed_frame.std(1)
self.mixed_frame.var(1)
self.mixed_frame.mean(1)
self.mixed_frame.skew(1)
def test_median_corner(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper, frame=self.intframe,
check_dtype=False, check_dates=True)
def test_quantile(self):
from numpy import percentile
q = self.tsframe.quantile(0.1, axis=0)
self.assertEqual(q['A'], percentile(self.tsframe['A'], 10))
q = self.tsframe.quantile(0.9, axis=1)
q = self.intframe.quantile(0.1)
self.assertEqual(q['A'], percentile(self.intframe['A'], 10))
# test degenerate case
q = DataFrame({'x': [], 'y': []}).quantile(0.1, axis=0)
assert(np.isnan(q['x']) and np.isnan(q['y']))
# non-numeric exclusion
df = DataFrame({'col1':['A','A','B','B'], 'col2':[1,2,3,4]})
rs = df.quantile(0.5)
xp = df.median()
assert_series_equal(rs, xp)
# axis
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
result = df.quantile(.5, axis=1)
expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3])
assert_series_equal(result, expected)
result = df.quantile([.5, .75], axis=1)
expected = DataFrame({1: [1.5, 1.75], 2: [2.5, 2.75],
3: [3.5, 3.75]}, index=["0.5", "0.75"])
assert_frame_equal(result, expected)
# We may want to break API in the future to change this
# so that we exclude non-numeric along the same axis
# See GH #7312
df = DataFrame([[1, 2, 3],
['a', 'b', 4]])
result = df.quantile(.5, axis=1)
expected = Series([3., 4.], index=[0, 1])
assert_series_equal(result, expected)
def test_quantile_multi(self):
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]],
columns=['a', 'b', 'c'])
result = df.quantile([.25, .5])
expected = DataFrame([[1.5, 1.5, 1.5], [2., 2., 2.]],
index=[.25, .5], columns=['a', 'b', 'c'])
assert_frame_equal(result, expected)
# axis = 1
result = df.quantile([.25, .5], axis=1)
expected = DataFrame([[1.5, 1.5, 1.5], [2., 2., 2.]],
index=[.25, .5], columns=[0, 1, 2])
# empty
result = DataFrame({'x': [], 'y': []}).quantile([0.1, .9], axis=0)
expected = DataFrame({'x': [np.nan, np.nan], 'y': [np.nan, np.nan]},
index=[.1, .9])
assert_frame_equal(result, expected)
def test_quantile_datetime(self):
df = DataFrame({'a': pd.to_datetime(['2010', '2011']), 'b': [0, 5]})
# exclude datetime
result = df.quantile(.5)
expected = Series([2.5], index=['b'])
# datetime
result = df.quantile(.5, numeric_only=False)
expected = Series([Timestamp('2010-07-02 12:00:00'), 2.5],
index=['a', 'b'])
assert_series_equal(result, expected)
# datetime w/ multi
result = df.quantile([.5], numeric_only=False)
expected = DataFrame([[Timestamp('2010-07-02 12:00:00'), 2.5]],
index=[.5], columns=['a', 'b'])
assert_frame_equal(result, expected)
# axis = 1
df['c'] = pd.to_datetime(['2011', '2012'])
result = df[['a', 'c']].quantile(.5, axis=1, numeric_only=False)
expected = Series([Timestamp('2010-07-02 12:00:00'),
Timestamp('2011-07-02 12:00:00')],
index=[0, 1])
assert_series_equal(result, expected)
result = df[['a', 'c']].quantile([.5], axis=1, numeric_only=False)
expected = DataFrame([[Timestamp('2010-07-02 12:00:00'),
Timestamp('2011-07-02 12:00:00')]],
index=[0.5], columns=[0, 1])
assert_frame_equal(result, expected)
def test_cumsum(self):
self.tsframe.ix[5:10, 0] = nan
self.tsframe.ix[10:15, 1] = nan
self.tsframe.ix[15:, 2] = nan
# axis = 0
cumsum = self.tsframe.cumsum()
expected = self.tsframe.apply(Series.cumsum)
assert_frame_equal(cumsum, expected)
# axis = 1
cumsum = self.tsframe.cumsum(axis=1)
expected = self.tsframe.apply(Series.cumsum, axis=1)
assert_frame_equal(cumsum, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cumsum()
# fix issue
cumsum_xs = self.tsframe.cumsum(axis=1)
self.assertEqual(np.shape(cumsum_xs), np.shape(self.tsframe))
def test_cumprod(self):
self.tsframe.ix[5:10, 0] = nan
self.tsframe.ix[10:15, 1] = nan
self.tsframe.ix[15:, 2] = nan
# axis = 0
cumprod = self.tsframe.cumprod()
expected = self.tsframe.apply(Series.cumprod)
assert_frame_equal(cumprod, expected)
# axis = 1
cumprod = self.tsframe.cumprod(axis=1)
expected = self.tsframe.apply(Series.cumprod, axis=1)
assert_frame_equal(cumprod, expected)
# fix issue
cumprod_xs = self.tsframe.cumprod(axis=1)
self.assertEqual(np.shape(cumprod_xs), np.shape(self.tsframe))
# ints
df = self.tsframe.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
# ints32
df = self.tsframe.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
def test_rank(self):
tm._skip_if_no_scipy()
from scipy.stats import rankdata
self.frame['A'][::2] = np.nan
self.frame['B'][::3] = np.nan
self.frame['C'][::4] = np.nan
self.frame['D'][::5] = np.nan
ranks0 = self.frame.rank()
ranks1 = self.frame.rank(1)
mask = np.isnan(self.frame.values)
fvals = self.frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fvals)
exp0[mask] = np.nan
exp1 = np.apply_along_axis(rankdata, 1, fvals)
exp1[mask] = np.nan
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
# integers
df = DataFrame(np.random.randint(0, 5, size=40).reshape((10, 4)))
result = df.rank()
exp = df.astype(float).rank()
assert_frame_equal(result, exp)
result = df.rank(1)
exp = df.astype(float).rank(1)
assert_frame_equal(result, exp)
def test_rank2(self):
from datetime import datetime
df = DataFrame([[1, 3, 2], [1, 2, 3]])
expected = DataFrame([[1.0, 3.0, 2.0], [1, 2, 3]]) / 3.0
result = df.rank(1, pct=True)
assert_frame_equal(result, expected)
df = DataFrame([[1, 3, 2], [1, 2, 3]])
expected = df.rank(0) / 2.0
result = df.rank(0, pct=True)
assert_frame_equal(result, expected)
df = DataFrame([['b', 'c', 'a'], ['a', 'c', 'b']])
expected = DataFrame([[2.0, 3.0, 1.0], [1, 3, 2]])
result = df.rank(1, numeric_only=False)
assert_frame_equal(result, expected)
expected = DataFrame([[2.0, 1.5, 1.0], [1, 1.5, 2]])
result = df.rank(0, numeric_only=False)
assert_frame_equal(result, expected)
df = DataFrame([['b', np.nan, 'a'], ['a', 'c', 'b']])
expected = DataFrame([[2.0, nan, 1.0], [1.0, 3.0, 2.0]])
result = df.rank(1, numeric_only=False)
assert_frame_equal(result, expected)
expected = DataFrame([[2.0, nan, 1.0], [1.0, 1.0, 2.0]])
result = df.rank(0, numeric_only=False)
assert_frame_equal(result, expected)
# f7u12, this does not work without extensive workaround
data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],
[datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 1)]]
df = DataFrame(data)
# check the rank
expected = DataFrame([[2., nan, 1.],
[2., 3., 1.]])
result = df.rank(1, numeric_only=False)
assert_frame_equal(result, expected)
# mixed-type frames
self.mixed_frame['datetime'] = datetime.now()
self.mixed_frame['timedelta'] = timedelta(days=1,seconds=1)
result = self.mixed_frame.rank(1)
expected = self.mixed_frame.rank(1, numeric_only=True)
assert_frame_equal(result, expected)
df = DataFrame({"a":[1e-20, -5, 1e-20+1e-40, 10, 1e60, 1e80, 1e-30]})
exp = DataFrame({"a":[ 3.5, 1. , 3.5, 5. , 6. , 7. , 2. ]})
assert_frame_equal(df.rank(), exp)
def test_rank_na_option(self):
tm._skip_if_no_scipy()
from scipy.stats import rankdata
self.frame['A'][::2] = np.nan
self.frame['B'][::3] = np.nan
self.frame['C'][::4] = np.nan
self.frame['D'][::5] = np.nan
# bottom
ranks0 = self.frame.rank(na_option='bottom')
ranks1 = self.frame.rank(1, na_option='bottom')
fvals = self.frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fvals)
exp1 = np.apply_along_axis(rankdata, 1, fvals)
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
# top
ranks0 = self.frame.rank(na_option='top')
ranks1 = self.frame.rank(1, na_option='top')
fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values
fval1 = self.frame.T
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
fval1 = fval1.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fval0)
exp1 = np.apply_along_axis(rankdata, 1, fval1)
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
# descending
# bottom
ranks0 = self.frame.rank(na_option='top', ascending=False)
ranks1 = self.frame.rank(1, na_option='top', ascending=False)
fvals = self.frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, -fvals)
exp1 = np.apply_along_axis(rankdata, 1, -fvals)
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
# descending
# top
ranks0 = self.frame.rank(na_option='bottom', ascending=False)
ranks1 = self.frame.rank(1, na_option='bottom', ascending=False)
fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values
fval1 = self.frame.T
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
fval1 = fval1.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, -fval0)
exp1 = np.apply_along_axis(rankdata, 1, -fval1)
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
def test_axis_aliases(self):
f = self.frame
# reg name
expected = f.sum(axis=0)
result = f.sum(axis='index')
assert_series_equal(result, expected)
expected = f.sum(axis=1)
result = f.sum(axis='columns')
assert_series_equal(result, expected)
def test_combine_first_mixed(self):
a = Series(['a', 'b'], index=lrange(2))
b = Series(lrange(2), index=lrange(2))
f = DataFrame({'A': a, 'B': b})
a = Series(['a', 'b'], index=lrange(5, 7))
b = Series(lrange(2), index=lrange(5, 7))
g = DataFrame({'A': a, 'B': b})
combined = f.combine_first(g)
def test_more_asMatrix(self):
values = self.mixed_frame.as_matrix()
self.assertEqual(values.shape[1], len(self.mixed_frame.columns))
def test_reindex_boolean(self):
frame = DataFrame(np.ones((10, 2), dtype=bool),
index=np.arange(0, 20, 2),
columns=[0, 2])
reindexed = frame.reindex(np.arange(10))
self.assertEqual(reindexed.values.dtype, np.object_)
self.assertTrue(isnull(reindexed[0][1]))
reindexed = frame.reindex(columns=lrange(3))
self.assertEqual(reindexed.values.dtype, np.object_)
self.assertTrue(isnull(reindexed[1]).all())
def test_reindex_objects(self):
reindexed = self.mixed_frame.reindex(columns=['foo', 'A', 'B'])
self.assertIn('foo', reindexed)
reindexed = self.mixed_frame.reindex(columns=['A', 'B'])
self.assertNotIn('foo', reindexed)
def test_reindex_corner(self):
index = Index(['a', 'b', 'c'])
dm = self.empty.reindex(index=[1, 2, 3])
reindexed = dm.reindex(columns=index)
self.assertTrue(reindexed.columns.equals(index))
# ints are weird
smaller = self.intframe.reindex(columns=['A', 'B', 'E'])
self.assertEqual(smaller['E'].dtype, np.float64)
def test_reindex_axis(self):
cols = ['A', 'B', 'E']
reindexed1 = self.intframe.reindex_axis(cols, axis=1)
reindexed2 = self.intframe.reindex(columns=cols)
assert_frame_equal(reindexed1, reindexed2)
rows = self.intframe.index[0:5]
reindexed1 = self.intframe.reindex_axis(rows, axis=0)
reindexed2 = self.intframe.reindex(index=rows)
assert_frame_equal(reindexed1, reindexed2)
self.assertRaises(ValueError, self.intframe.reindex_axis, rows, axis=2)
# no-op case
cols = self.frame.columns.copy()
newFrame = self.frame.reindex_axis(cols, axis=1)
assert_frame_equal(newFrame, self.frame)
def test_reindex_with_nans(self):
df = DataFrame([[1, 2], [3, 4], [np.nan, np.nan], [7, 8], [9, 10]],
columns=['a', 'b'],
index=[100.0, 101.0, np.nan, 102.0, 103.0])
result = df.reindex(index=[101.0, 102.0, 103.0])
expected = df.iloc[[1, 3, 4]]
assert_frame_equal(result, expected)
result = df.reindex(index=[103.0])
expected = df.iloc[[4]]
assert_frame_equal(result, expected)
result = df.reindex(index=[101.0])
expected = df.iloc[[1]]
assert_frame_equal(result, expected)
def test_reindex_multi(self):
df = DataFrame(np.random.randn(3, 3))
result = df.reindex(lrange(4), lrange(4))
expected = df.reindex(lrange(4)).reindex(columns=lrange(4))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randint(0, 10, (3, 3)))
result = df.reindex(lrange(4), lrange(4))
expected = df.reindex(lrange(4)).reindex(columns=lrange(4))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randint(0, 10, (3, 3)))
result = df.reindex(lrange(2), lrange(2))
expected = df.reindex(lrange(2)).reindex(columns=lrange(2))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randn(5, 3) + 1j, columns=['a', 'b', 'c'])
result = df.reindex(index=[0, 1], columns=['a', 'b'])
expected = df.reindex([0, 1]).reindex(columns=['a', 'b'])
assert_frame_equal(result, expected)
def test_rename_objects(self):
renamed = self.mixed_frame.rename(columns=str.upper)
self.assertIn('FOO', renamed)
self.assertNotIn('foo', renamed)
def test_fill_corner(self):
self.mixed_frame.ix[5:20,'foo'] = nan
self.mixed_frame.ix[-10:,'A'] = nan
filled = self.mixed_frame.fillna(value=0)
self.assertTrue((filled.ix[5:20,'foo'] == 0).all())
del self.mixed_frame['foo']
empty_float = self.frame.reindex(columns=[])
result = empty_float.fillna(value=0)
def test_count_objects(self):
dm = DataFrame(self.mixed_frame._series)
df = DataFrame(self.mixed_frame._series)
tm.assert_series_equal(dm.count(), df.count())
tm.assert_series_equal(dm.count(1), df.count(1))
def test_cumsum_corner(self):
dm = DataFrame(np.arange(20).reshape(4, 5),
index=lrange(4), columns=lrange(5))
result = dm.cumsum()
#----------------------------------------------------------------------
# Stacking / unstacking
def test_stack_unstack(self):
stacked = self.frame.stack()
stacked_df = DataFrame({'foo': stacked, 'bar': stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
assert_frame_equal(unstacked, self.frame)
assert_frame_equal(unstacked_df['bar'], self.frame)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
assert_frame_equal(unstacked_cols.T, self.frame)
assert_frame_equal(unstacked_cols_df['bar'].T, self.frame)
def test_stack_ints(self):
df = DataFrame(
np.random.randn(30, 27),
columns=MultiIndex.from_tuples(
list(itertools.product(range(3), repeat=3))
)
)
assert_frame_equal(
df.stack(level=[1, 2]),
df.stack(level=1).stack(level=1)
)
assert_frame_equal(
df.stack(level=[-2, -1]),
df.stack(level=1).stack(level=1)
)
df_named = df.copy()
df_named.columns.set_names(range(3), inplace=True)
assert_frame_equal(
df_named.stack(level=[1, 2]),
df_named.stack(level=1).stack(level=1)
)
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ['exp', 'animal', 1]
assert_frame_equal(df2.stack(level=['animal', 1]),
animal_hair_stacked, check_names=False)
assert_frame_equal(df2.stack(level=['exp', 1]),
exp_hair_stacked, check_names=False)
# When mixed types are passed and the ints are not level
# names, raise
self.assertRaises(ValueError, df2.stack, level=['animal', 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ['exp', 'animal', 0]
assert_frame_equal(df3.stack(level=['animal', 0]),
animal_hair_stacked, check_names=False)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=['exp', 'animal'])
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
assert_frame_equal(df2.stack(level=[1, 2]), animal_hair_stacked,
check_names=False )
assert_frame_equal(df2.stack(level=[0, 1]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 2]), exp_hair_stacked,
check_names=False)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
assert_frame_equal(df3.stack(level=[0, 1]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 0]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 1]), exp_hair_stacked,
check_names=False)
def test_unstack_bool(self):
df = DataFrame([False, False],
index=MultiIndex.from_arrays([['a', 'b'], ['c', 'l']]),
columns=['col'])
rs = df.unstack()
xp = DataFrame(np.array([[False, np.nan], [np.nan, False]],
dtype=object),
index=['a', 'b'],
columns=MultiIndex.from_arrays([['col', 'col'],
['c', 'l']]))
assert_frame_equal(rs, xp)
def test_unstack_to_series(self):
# check reversibility
data = self.frame.unstack()
self.assertTrue(isinstance(data, Series))
undo = data.unstack().T
assert_frame_equal(undo, self.frame)
# check NA handling
data = DataFrame({'x': [1, 2, np.NaN], 'y': [3.0, 4, np.NaN]})
data.index = Index(['a', 'b', 'c'])
result = data.unstack()
midx = MultiIndex(levels=[['x', 'y'], ['a', 'b', 'c']],
labels=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)
assert_series_equal(result, expected)
# check composability of unstack
old_data = data.copy()
for _ in range(4):
data = data.unstack()
assert_frame_equal(old_data, data)
def test_unstack_dtypes(self):
# GH 2929
rows = [[1, 1, 3, 4],
[1, 2, 3, 4],
[2, 1, 3, 4],
[2, 2, 3, 4]]
df = DataFrame(rows, columns=list('ABCD'))
result = df.get_dtype_counts()
expected = Series({'int64' : 4})
assert_series_equal(result, expected)
# single dtype
df2 = df.set_index(['A','B'])
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'int64' : 4})
assert_series_equal(result, expected)
# mixed
df2 = df.set_index(['A','B'])
df2['C'] = 3.
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'int64' : 2, 'float64' : 2})
assert_series_equal(result, expected)
df2['D'] = 'foo'
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'float64' : 2, 'object' : 2})
assert_series_equal(result, expected)
def test_unstack_non_unique_index_names(self):
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')],
names=['c1', 'c1'])
df = DataFrame([1, 2], index=idx)
with tm.assertRaises(ValueError):
df.unstack('c1')
with tm.assertRaises(ValueError):
df.T.stack('c1')
def test_stack_datetime_column_multiIndex(self):
# GH 8039
t = datetime(2014, 1, 1)
df = DataFrame([1, 2, 3, 4], columns=MultiIndex.from_tuples([(t, 'A', 'B')]))
result = df.stack()
eidx = MultiIndex.from_product([(0, 1, 2, 3), ('B',)])
ecols = MultiIndex.from_tuples([(t, 'A')])
expected = DataFrame([1, 2, 3, 4], index=eidx, columns=ecols)
assert_frame_equal(result, expected)
def test_stack_partial_multiIndex(self):
# GH 8844
def _test_stack_with_multiindex(multiindex):
df = DataFrame(np.arange(3 * len(multiindex)).reshape(3, len(multiindex)),
columns=multiindex)
for level in (-1, 0, 1, [0, 1], [1, 0]):
result = df.stack(level=level, dropna=False)
if isinstance(level, int):
# Stacking a single level should not make any all-NaN rows,
# so df.stack(level=level, dropna=False) should be the same
# as df.stack(level=level, dropna=True).
expected = df.stack(level=level, dropna=True)
if isinstance(expected, Series):
assert_series_equal(result, expected)
else:
assert_frame_equal(result, expected)
df.columns = MultiIndex.from_tuples(df.columns.get_values(),
names=df.columns.names)
expected = df.stack(level=level, dropna=False)
if isinstance(expected, Series):
assert_series_equal(result, expected)
else:
assert_frame_equal(result, expected)
full_multiindex = MultiIndex.from_tuples([('B', 'x'), ('B', 'z'),
('A', 'y'),
('C', 'x'), ('C', 'u')],
names=['Upper', 'Lower'])
for multiindex_columns in ([0, 1, 2, 3, 4],
[0, 1, 2, 3], [0, 1, 2, 4],
[0, 1, 2], [1, 2, 3], [2, 3, 4],
[0, 1], [0, 2], [0, 3],
[0], [2], [4]):
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
if len(multiindex_columns) > 1:
multiindex_columns.reverse()
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
df = DataFrame(np.arange(6).reshape(2, 3), columns=full_multiindex[[0, 1, 3]])
result = df.stack(dropna=False)
expected = DataFrame([[0, 2], [1, nan], [3, 5], [4, nan]],
index=MultiIndex(levels=[[0, 1], ['u', 'x', 'y', 'z']],
labels=[[0, 0, 1, 1], [1, 3, 1, 3]],
names=[None, 'Lower']),
columns=Index(['B', 'C'], name='Upper'),
dtype=df.dtypes[0])
assert_frame_equal(result, expected)
def test_repr_with_mi_nat(self):
df = DataFrame({'X': [1, 2]},
index=[[pd.NaT, pd.Timestamp('20130101')], ['a', 'b']])
res = repr(df)
exp = ' X\nNaT a 1\n2013-01-01 b 2'
nose.tools.assert_equal(res, exp)
def test_reset_index(self):
stacked = self.frame.stack()[::2]
stacked = DataFrame({'foo': stacked, 'bar': stacked})
names = ['first', 'second']
stacked.index.names = names
deleveled = stacked.reset_index()
for i, (lev, lab) in enumerate(zip(stacked.index.levels,
stacked.index.labels)):
values = lev.take(lab)
name = names[i]
assert_almost_equal(values, deleveled[name])
stacked.index.names = [None, None]
deleveled2 = stacked.reset_index()
self.assert_numpy_array_equal(deleveled['first'],
deleveled2['level_0'])
self.assert_numpy_array_equal(deleveled['second'],
deleveled2['level_1'])
# default name assigned
rdf = self.frame.reset_index()
self.assert_numpy_array_equal(rdf['index'], self.frame.index.values)
# default name assigned, corner case
df = self.frame.copy()
df['index'] = 'foo'
rdf = df.reset_index()
self.assert_numpy_array_equal(rdf['level_0'], self.frame.index.values)
# but this is ok
self.frame.index.name = 'index'
deleveled = self.frame.reset_index()
self.assert_numpy_array_equal(deleveled['index'],
self.frame.index.values)
self.assert_numpy_array_equal(deleveled.index,
np.arange(len(deleveled)))
# preserve column names
self.frame.columns.name = 'columns'
resetted = self.frame.reset_index()
self.assertEqual(resetted.columns.name, 'columns')
# only remove certain columns
frame = self.frame.reset_index().set_index(['index', 'A', 'B'])
rs = frame.reset_index(['A', 'B'])
assert_frame_equal(rs, self.frame, check_names=False) # TODO should reset_index check_names ?
rs = frame.reset_index(['index', 'A', 'B'])
assert_frame_equal(rs, self.frame.reset_index(), check_names=False)
rs = frame.reset_index(['index', 'A', 'B'])
assert_frame_equal(rs, self.frame.reset_index(), check_names=False)
rs = frame.reset_index('A')
xp = self.frame.reset_index().set_index(['index', 'B'])
assert_frame_equal(rs, xp, check_names=False)
# test resetting in place
df = self.frame.copy()
resetted = self.frame.reset_index()
df.reset_index(inplace=True)
assert_frame_equal(df, resetted, check_names=False)
frame = self.frame.reset_index().set_index(['index', 'A', 'B'])
rs = frame.reset_index('A', drop=True)
xp = self.frame.copy()
del xp['A']
xp = xp.set_index(['B'], append=True)
assert_frame_equal(rs, xp, check_names=False)
def test_reset_index_right_dtype(self):
time = np.arange(0.0, 10, np.sqrt(2) / 2)
s1 = Series((9.81 * time ** 2) / 2,
index=Index(time, name='time'),
name='speed')
df = DataFrame(s1)
resetted = s1.reset_index()
self.assertEqual(resetted['time'].dtype, np.float64)
resetted = df.reset_index()
self.assertEqual(resetted['time'].dtype, np.float64)
def test_reset_index_multiindex_col(self):
vals = np.random.randn(3, 3).astype(object)
idx = ['x', 'y', 'z']
full = np.hstack(([[x] for x in idx], vals))
df = DataFrame(vals, Index(idx, name='a'),
columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']])
rs = df.reset_index()
xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'],
['', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index(col_fill=None)
xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index(col_level=1, col_fill='blah')
xp = DataFrame(full, columns=[['blah', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
df = DataFrame(vals,
MultiIndex.from_arrays([[0, 1, 2], ['x', 'y', 'z']],
names=['d', 'a']),
columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']])
rs = df.reset_index('a', )
xp = DataFrame(full, Index([0, 1, 2], name='d'),
columns=[['a', 'b', 'b', 'c'],
['', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index('a', col_fill=None)
xp = DataFrame(full, Index(lrange(3), name='d'),
columns=[['a', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index('a', col_fill='blah', col_level=1)
xp = DataFrame(full, Index(lrange(3), name='d'),
columns=[['blah', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
def test_reset_index_with_datetimeindex_cols(self):
# GH5818
#
df = pd.DataFrame([[1, 2], [3, 4]],
columns=pd.date_range('1/1/2013', '1/2/2013'),
index=['A', 'B'])
result = df.reset_index()
expected = pd.DataFrame([['A', 1, 2], ['B', 3, 4]],
columns=['index', datetime(2013, 1, 1),
datetime(2013, 1, 2)])
assert_frame_equal(result, expected)
#----------------------------------------------------------------------
# Tests to cope with refactored internals
def test_as_matrix_numeric_cols(self):
self.frame['foo'] = 'bar'
values = self.frame.as_matrix(['A', 'B', 'C', 'D'])
self.assertEqual(values.dtype, np.float64)
def test_as_matrix_lcd(self):
# mixed lcd
values = self.mixed_float.as_matrix(['A', 'B', 'C', 'D'])
self.assertEqual(values.dtype, np.float64)
values = self.mixed_float.as_matrix(['A', 'B', 'C' ])
self.assertEqual(values.dtype, np.float32)
values = self.mixed_float.as_matrix(['C'])
self.assertEqual(values.dtype, np.float16)
values = self.mixed_int.as_matrix(['A','B','C','D'])
self.assertEqual(values.dtype, np.int64)
values = self.mixed_int.as_matrix(['A','D'])
self.assertEqual(values.dtype, np.int64)
# guess all ints are cast to uints....
values = self.mixed_int.as_matrix(['A','B','C'])
self.assertEqual(values.dtype, np.int64)
values = self.mixed_int.as_matrix(['A','C'])
self.assertEqual(values.dtype, np.int32)
values = self.mixed_int.as_matrix(['C','D'])
self.assertEqual(values.dtype, np.int64)
values = self.mixed_int.as_matrix(['A'])
self.assertEqual(values.dtype, np.int32)
values = self.mixed_int.as_matrix(['C'])
self.assertEqual(values.dtype, np.uint8)
def test_constructor_with_convert(self):
# this is actually mostly a test of lib.maybe_convert_objects
# #2845
df = DataFrame({'A' : [2**63-1] })
result = df['A']
expected = Series(np.asarray([2**63-1], np.int64))
assert_series_equal(result, expected)
df = DataFrame({'A' : [2**63] })
result = df['A']
expected = Series(np.asarray([2**63], np.object_))
assert_series_equal(result, expected)
df = DataFrame({'A' : [datetime(2005, 1, 1), True] })
result = df['A']
expected = Series(np.asarray([datetime(2005, 1, 1), True], np.object_))
assert_series_equal(result, expected)
df = DataFrame({'A' : [None, 1] })
result = df['A']
expected = Series(np.asarray([np.nan, 1], np.float_))
assert_series_equal(result, expected)
df = DataFrame({'A' : [1.0, 2] })
result = df['A']
expected = Series(np.asarray([1.0, 2], np.float_))
assert_series_equal(result, expected)
df = DataFrame({'A' : [1.0+2.0j, 3] })
result = df['A']
expected = Series(np.asarray([1.0+2.0j, 3], np.complex_))
assert_series_equal(result, expected)
df = DataFrame({'A' : [1.0+2.0j, 3.0] })
result = df['A']
expected = Series(np.asarray([1.0+2.0j, 3.0], np.complex_))
assert_series_equal(result, expected)
df = DataFrame({'A' : [1.0+2.0j, True] })
result = df['A']
expected = Series(np.asarray([1.0+2.0j, True], np.object_))
assert_series_equal(result, expected)
df = DataFrame({'A' : [1.0, None] })
result = df['A']
expected = Series(np.asarray([1.0, np.nan], np.float_))
assert_series_equal(result, expected)
df = DataFrame({'A' : [1.0+2.0j, None] })
result = df['A']
expected = Series(np.asarray([1.0+2.0j, np.nan], np.complex_))
assert_series_equal(result, expected)
df = DataFrame({'A' : [2.0, 1, True, None] })
result = df['A']
expected = Series(np.asarray([2.0, 1, True, None], np.object_))
assert_series_equal(result, expected)
df = DataFrame({'A' : [2.0, 1, datetime(2006, 1, 1), None] })
result = df['A']
expected = Series(np.asarray([2.0, 1, datetime(2006, 1, 1),
None], np.object_))
assert_series_equal(result, expected)
def test_construction_with_mixed(self):
# test construction edge cases with mixed types
# f7u12, this does not work without extensive workaround
data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],
[datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 1)]]
df = DataFrame(data)
# check dtypes
result = df.get_dtype_counts().order()
expected = Series({ 'datetime64[ns]' : 3 })
# mixed-type frames
self.mixed_frame['datetime'] = datetime.now()
self.mixed_frame['timedelta'] = timedelta(days=1,seconds=1)
self.assertEqual(self.mixed_frame['datetime'].dtype, 'M8[ns]')
self.assertEqual(self.mixed_frame['timedelta'].dtype, 'm8[ns]')
result = self.mixed_frame.get_dtype_counts().order()
expected = Series({ 'float64' : 4,
'object' : 1,
'datetime64[ns]' : 1,
'timedelta64[ns]' : 1}).order()
assert_series_equal(result,expected)
def test_construction_with_conversions(self):
# convert from a numpy array of non-ns timedelta64
arr = np.array([1,2,3],dtype='timedelta64[s]')
s = Series(arr)
expected = Series(timedelta_range('00:00:01',periods=3,freq='s'))
assert_series_equal(s,expected)
df = DataFrame(index=range(3))
df['A'] = arr
expected = DataFrame({'A' : timedelta_range('00:00:01',periods=3,freq='s')},
index=range(3))
assert_frame_equal(df,expected)
# convert from a numpy array of non-ns datetime64
#### note that creating a numpy datetime64 is in LOCAL time!!!!
#### seems to work for M8[D], but not for M8[s]
s = Series(np.array(['2013-01-01','2013-01-02','2013-01-03'],dtype='datetime64[D]'))
assert_series_equal(s,Series(date_range('20130101',periods=3,freq='D')))
#s = Series(np.array(['2013-01-01 00:00:01','2013-01-01 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]'))
#assert_series_equal(s,date_range('20130101 00:00:01',period=3,freq='s'))
expected = DataFrame({
'dt1' : Timestamp('20130101'),
'dt2' : date_range('20130101',periods=3),
#'dt3' : date_range('20130101 00:00:01',periods=3,freq='s'),
},index=range(3))
df = DataFrame(index=range(3))
df['dt1'] = np.datetime64('2013-01-01')
df['dt2'] = np.array(['2013-01-01','2013-01-02','2013-01-03'],dtype='datetime64[D]')
#df['dt3'] = np.array(['2013-01-01 00:00:01','2013-01-01 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]')
assert_frame_equal(df, expected)
def test_constructor_frame_copy(self):
cop = DataFrame(self.frame, copy=True)
cop['A'] = 5
self.assertTrue((cop['A'] == 5).all())
self.assertFalse((self.frame['A'] == 5).all())
def test_constructor_ndarray_copy(self):
df = DataFrame(self.frame.values)
self.frame.values[5] = 5
self.assertTrue((df.values[5] == 5).all())
df = DataFrame(self.frame.values, copy=True)
self.frame.values[6] = 6
self.assertFalse((df.values[6] == 6).all())
def test_constructor_series_copy(self):
series = self.frame._series
df = DataFrame({'A': series['A']})
df['A'][:] = 5
self.assertFalse((series['A'] == 5).all())
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
return DataFrame(data = list(itertools.repeat((datetime(2001, 1, 1), "aa", 20), 9)),
columns=["A", "B", "C"], dtype=dtype)
self.assertRaises(NotImplementedError, f, [("A","datetime64[h]"), ("B","str"), ("C","int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
f('M8[ns]')
def test_assign_columns(self):
self.frame['hi'] = 'there'
frame = self.frame.copy()
frame.columns = ['foo', 'bar', 'baz', 'quux', 'foo2']
assert_series_equal(self.frame['C'], frame['baz'])
assert_series_equal(self.frame['hi'], frame['foo2'])
def test_columns_with_dups(self):
# GH 3468 related
# basic
df = DataFrame([[1,2]], columns=['a','a'])
df.columns = ['a','a.1']
str(df)
expected = DataFrame([[1,2]], columns=['a','a.1'])
assert_frame_equal(df, expected)
df = DataFrame([[1,2,3]], columns=['b','a','a'])
df.columns = ['b','a','a.1']
str(df)
expected = DataFrame([[1,2,3]], columns=['b','a','a.1'])
assert_frame_equal(df, expected)
# with a dup index
df = DataFrame([[1,2]], columns=['a','a'])
df.columns = ['b','b']
str(df)
expected = DataFrame([[1,2]], columns=['b','b'])
assert_frame_equal(df, expected)
# multi-dtype
df = DataFrame([[1,2,1.,2.,3.,'foo','bar']], columns=['a','a','b','b','d','c','c'])
df.columns = list('ABCDEFG')
str(df)
expected = DataFrame([[1,2,1.,2.,3.,'foo','bar']], columns=list('ABCDEFG'))
assert_frame_equal(df, expected)
# this is an error because we cannot disambiguate the dup columns
self.assertRaises(Exception, lambda x: DataFrame([[1,2,'foo','bar']], columns=['a','a','a','a']))
# dups across blocks
df_float = DataFrame(np.random.randn(10, 3),dtype='float64')
df_int = DataFrame(np.random.randn(10, 3),dtype='int64')
df_bool = DataFrame(True,index=df_float.index,columns=df_float.columns)
df_object = DataFrame('foo',index=df_float.index,columns=df_float.columns)
df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=df_float.columns)
df = pd.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1)
self.assertEqual(len(df._data._blknos), len(df.columns))
self.assertEqual(len(df._data._blklocs), len(df.columns))
# testing iget
for i in range(len(df.columns)):
df.iloc[:,i]
# dup columns across dtype GH 2079/2194
vals = [[1, -1, 2.], [2, -2, 3.]]
rs = DataFrame(vals, columns=['A', 'A', 'B'])
xp = DataFrame(vals)
xp.columns = ['A', 'A', 'B']
assert_frame_equal(rs, xp)
def test_insert_column_bug_4032(self):
# GH4032, inserting a column and renaming causing errors
df = DataFrame({'b': [1.1, 2.2]})
df = df.rename(columns={})
df.insert(0, 'a', [1, 2])
result = df.rename(columns={})
str(result)
expected = DataFrame([[1,1.1],[2, 2.2]],columns=['a','b'])
assert_frame_equal(result,expected)
df.insert(0, 'c', [1.3, 2.3])
result = df.rename(columns={})
str(result)
expected = DataFrame([[1.3,1,1.1],[2.3,2, 2.2]],columns=['c','a','b'])
assert_frame_equal(result,expected)
def test_cast_internals(self):
casted = DataFrame(self.frame._data, dtype=int)
expected = DataFrame(self.frame._series, dtype=int)
assert_frame_equal(casted, expected)
casted = DataFrame(self.frame._data, dtype=np.int32)
expected = DataFrame(self.frame._series, dtype=np.int32)
assert_frame_equal(casted, expected)
def test_consolidate(self):
self.frame['E'] = 7.
consolidated = self.frame.consolidate()
self.assertEqual(len(consolidated._data.blocks), 1)
# Ensure copy, do I want this?
recons = consolidated.consolidate()
self.assertIsNot(recons, consolidated)
assert_frame_equal(recons, consolidated)
self.frame['F'] = 8.
self.assertEqual(len(self.frame._data.blocks), 3)
self.frame.consolidate(inplace=True)
self.assertEqual(len(self.frame._data.blocks), 1)
def test_consolidate_inplace(self):
frame = self.frame.copy()
# triggers in-place consolidation
for letter in range(ord('A'), ord('Z')):
self.frame[chr(letter)] = chr(letter)
def test_as_matrix_consolidate(self):
self.frame['E'] = 7.
self.assertFalse(self.frame._data.is_consolidated())
_ = self.frame.as_matrix()
self.assertTrue(self.frame._data.is_consolidated())
def test_modify_values(self):
self.frame.values[5] = 5
self.assertTrue((self.frame.values[5] == 5).all())
# unconsolidated
self.frame['E'] = 7.
self.frame.values[6] = 6
self.assertTrue((self.frame.values[6] == 6).all())
def test_boolean_set_uncons(self):
self.frame['E'] = 7.
expected = self.frame.values.copy()
expected[expected > 1] = 2
self.frame[self.frame > 1] = 2
assert_almost_equal(expected, self.frame.values)
def test_xs_view(self):
"""
in 0.14 this will return a view if possible
a copy otherwise, but this is numpy dependent
"""
dm = DataFrame(np.arange(20.).reshape(4, 5),
index=lrange(4), columns=lrange(5))
dm.xs(2)[:] = 10
self.assertTrue((dm.xs(2) == 10).all())
def test_boolean_indexing(self):
idx = lrange(3)
cols = ['A','B','C']
df1 = DataFrame(index=idx, columns=cols,
data=np.array([[0.0, 0.5, 1.0],
[1.5, 2.0, 2.5],
[3.0, 3.5, 4.0]],
dtype=float))
df2 = DataFrame(index=idx, columns=cols,
data=np.ones((len(idx), len(cols))))
expected = DataFrame(index=idx, columns=cols,
data=np.array([[0.0, 0.5, 1.0],
[1.5, 2.0, -1],
[-1, -1, -1]], dtype=float))
df1[df1 > 2.0 * df2] = -1
assert_frame_equal(df1, expected)
with assertRaisesRegexp(ValueError, 'Item wrong length'):
df1[df1.index[:-1] > 2] = -1
def test_boolean_indexing_mixed(self):
df = DataFrame(
{long(0): {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
long(1): {35: np.nan,
40: 0.32632316859446198,
43: np.nan,
49: 0.32632316859446198,
50: 0.39114724480578139},
long(2): {35: np.nan, 40: np.nan, 43: 0.29012581014105987, 49: np.nan, 50: np.nan},
long(3): {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
long(4): {35: 0.34215328467153283, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
'y': {35: 0, 40: 0, 43: 0, 49: 0, 50: 1}})
# mixed int/float ok
df2 = df.copy()
df2[df2>0.3] = 1
expected = df.copy()
expected.loc[40,1] = 1
expected.loc[49,1] = 1
expected.loc[50,1] = 1
expected.loc[35,4] = 1
assert_frame_equal(df2,expected)
df['foo'] = 'test'
with tm.assertRaisesRegexp(TypeError, 'boolean setting on mixed-type'):
df[df > 0.3] = 1
def test_sum_bools(self):
df = DataFrame(index=lrange(1), columns=lrange(10))
bools = isnull(df)
self.assertEqual(bools.sum(axis=1)[0], 10)
def test_fillna_col_reordering(self):
idx = lrange(20)
cols = ["COL." + str(i) for i in range(5, 0, -1)]
data = np.random.rand(20, 5)
df = DataFrame(index=lrange(20), columns=cols, data=data)
filled = df.fillna(method='ffill')
self.assertEqual(df.columns.tolist(), filled.columns.tolist())
def test_take(self):
# homogeneous
#----------------------------------------
order = [3, 1, 2, 0]
for df in [self.frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['D', 'B', 'C', 'A']]
assert_frame_equal(result, expected, check_names=False)
# neg indicies
order = [2,1,-1]
for df in [self.frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['C', 'B', 'D']]
assert_frame_equal(result, expected, check_names=False)
# illegal indices
self.assertRaises(IndexError, df.take, [3,1,2,30], axis=0)
self.assertRaises(IndexError, df.take, [3,1,2,-31], axis=0)
self.assertRaises(IndexError, df.take, [3,1,2,5], axis=1)
self.assertRaises(IndexError, df.take, [3,1,2,-5], axis=1)
# mixed-dtype
#----------------------------------------
order = [4, 1, 2, 0, 3]
for df in [self.mixed_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['foo', 'B', 'C', 'A', 'D']]
assert_frame_equal(result, expected)
# neg indicies
order = [4,1,-2]
for df in [self.mixed_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['foo', 'B', 'D']]
assert_frame_equal(result, expected)
# by dtype
order = [1, 2, 0, 3]
for df in [self.mixed_float,self.mixed_int]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['B', 'C', 'A', 'D']]
assert_frame_equal(result, expected)
def test_iterkv_deprecation(self):
with tm.assert_produces_warning(DeprecationWarning):
self.mixed_float.iterkv()
def test_iterkv_names(self):
for k, v in compat.iteritems(self.mixed_frame):
self.assertEqual(v.name, k)
def test_series_put_names(self):
series = self.mixed_frame._series
for k, v in compat.iteritems(series):
self.assertEqual(v.name, k)
def test_dot(self):
a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],
columns=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],
columns=['one', 'two'])
result = a.dot(b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
# Check alignment
b1 = b.reindex(index=reversed(b.index))
result = a.dot(b)
assert_frame_equal(result, expected)
# Check series argument
result = a.dot(b['one'])
assert_series_equal(result, expected['one'])
result = a.dot(b1['one'])
assert_series_equal(result, expected['one'])
# can pass correct-length arrays
row = a.ix[0].values
result = a.dot(row)
exp = a.dot(a.ix[0])
assert_series_equal(result, exp)
with assertRaisesRegexp(ValueError, 'Dot product shape mismatch'):
a.dot(row[:-1])
a = np.random.rand(1, 5)
b = np.random.rand(5, 1)
A = DataFrame(a)
B = DataFrame(b)
# it works
result = A.dot(b)
# unaligned
df = DataFrame(randn(3, 4), index=[1, 2, 3], columns=lrange(4))
df2 = DataFrame(randn(5, 3), index=lrange(5), columns=[1, 2, 3])
assertRaisesRegexp(ValueError, 'aligned', df.dot, df2)
def test_idxmin(self):
frame = self.frame
frame.ix[5:10] = np.nan
frame.ix[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, self.intframe]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(
Series.idxmin, axis=axis, skipna=skipna)
assert_series_equal(result, expected)
self.assertRaises(ValueError, frame.idxmin, axis=2)
def test_idxmax(self):
frame = self.frame
frame.ix[5:10] = np.nan
frame.ix[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, self.intframe]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(
Series.idxmax, axis=axis, skipna=skipna)
assert_series_equal(result, expected)
self.assertRaises(ValueError, frame.idxmax, axis=2)
def test_stale_cached_series_bug_473(self):
# this is chained, but ok
with option_context('chained_assignment',None):
Y = DataFrame(np.random.random((4, 4)), index=('a', 'b', 'c', 'd'),
columns=('e', 'f', 'g', 'h'))
repr(Y)
Y['e'] = Y['e'].astype('object')
Y['g']['c'] = np.NaN
repr(Y)
result = Y.sum()
exp = Y['g'].sum()
self.assertTrue(isnull(Y['g']['c']))
def test_index_namedtuple(self):
from collections import namedtuple
IndexType = namedtuple("IndexType", ["a", "b"])
idx1 = IndexType("foo", "bar")
idx2 = IndexType("baz", "bof")
index = Index([idx1, idx2],
name="composite_index", tupleize_cols=False)
df = DataFrame([(1, 2), (3, 4)], index=index, columns=["A", "B"])
self.assertEqual(df.ix[IndexType("foo", "bar")]["A"], 1)
def test_empty_nonzero(self):
df = DataFrame([1, 2, 3])
self.assertFalse(df.empty)
df = DataFrame(index=['a', 'b'], columns=['c', 'd']).dropna()
self.assertTrue(df.empty)
self.assertTrue(df.T.empty)
def test_any_all(self):
self._check_bool_op('any', np.any, has_skipna=True, has_bool_only=True)
self._check_bool_op('all', np.all, has_skipna=True, has_bool_only=True)
df = DataFrame(randn(10, 4)) > 0
df.any(1)
df.all(1)
df.any(1, bool_only=True)
df.all(1, bool_only=True)
# skip pathological failure cases
# class CantNonzero(object):
# def __nonzero__(self):
# raise ValueError
# df[4] = CantNonzero()
# it works!
# df.any(1)
# df.all(1)
# df.any(1, bool_only=True)
# df.all(1, bool_only=True)
# df[4][4] = np.nan
# df.any(1)
# df.all(1)
# df.any(1, bool_only=True)
# df.all(1, bool_only=True)
def test_consolidate_datetime64(self):
# numpy vstack bug
data = """\
starting,ending,measure
2012-06-21 00:00,2012-06-23 07:00,77
2012-06-23 07:00,2012-06-23 16:30,65
2012-06-23 16:30,2012-06-25 08:00,77
2012-06-25 08:00,2012-06-26 12:00,0
2012-06-26 12:00,2012-06-27 08:00,77
"""
df = read_csv(StringIO(data), parse_dates=[0, 1])
ser_starting = df.starting
ser_starting.index = ser_starting.values
ser_starting = ser_starting.tz_localize('US/Eastern')
ser_starting = ser_starting.tz_convert('UTC')
ser_ending = df.ending
ser_ending.index = ser_ending.values
ser_ending = ser_ending.tz_localize('US/Eastern')
ser_ending = ser_ending.tz_convert('UTC')
df.starting = ser_starting.index
df.ending = ser_ending.index
tm.assert_index_equal(pd.DatetimeIndex(df.starting), ser_starting.index)
tm.assert_index_equal(pd.DatetimeIndex(df.ending), ser_ending.index)
def _check_bool_op(self, name, alternative, frame=None, has_skipna=True,
has_bool_only=False):
if frame is None:
frame = self.frame > 0
# set some NAs
frame = DataFrame(frame.values.astype(object), frame.index,
frame.columns)
frame.ix[5:10] = np.nan
frame.ix[15:20, -2:] = np.nan
f = getattr(frame, name)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
assert_series_equal(result0, frame.apply(wrapper))
assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
assert_series_equal(result0, frame.apply(skipna_wrapper))
assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# result = f(axis=1)
# comp = frame.apply(alternative, axis=1).reindex(result.index)
# assert_series_equal(result, comp)
# bad axis
self.assertRaises(ValueError, f, axis=2)
# make sure works on mixed-type frame
mixed = self.mixed_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0
getattr(mixed, name)(axis=0)
getattr(mixed, name)(axis=1)
class NonzeroFail:
def __nonzero__(self):
raise ValueError
mixed['_nonzero_fail_'] = NonzeroFail()
if has_bool_only:
getattr(mixed, name)(axis=0, bool_only=True)
getattr(mixed, name)(axis=1, bool_only=True)
getattr(frame, name)(axis=0, bool_only=False)
getattr(frame, name)(axis=1, bool_only=False)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
if name == 'any':
self.assertFalse(r0.any())
self.assertFalse(r1.any())
else:
self.assertTrue(r0.all())
self.assertTrue(r1.all())
def test_strange_column_corruption_issue(self):
df = DataFrame(index=[0, 1])
df[0] = nan
wasCol = {}
# uncommenting these makes the results match
# for col in xrange(100, 200):
# wasCol[col] = 1
# df[col] = nan
for i, dt in enumerate(df.index):
for col in range(100, 200):
if not col in wasCol:
wasCol[col] = 1
df[col] = nan
df[col][dt] = i
myid = 100
first = len(df.ix[isnull(df[myid]), [myid]])
second = len(df.ix[isnull(df[myid]), [myid]])
self.assertTrue(first == second == 0)
def test_inplace_return_self(self):
# re #1893
data = DataFrame({'a': ['foo', 'bar', 'baz', 'qux'],
'b': [0, 0, 1, 1],
'c': [1, 2, 3, 4]})
def _check_f(base, f):
result = f(base)
self.assertTrue(result is None)
# -----DataFrame-----
# set_index
f = lambda x: x.set_index('a', inplace=True)
_check_f(data.copy(), f)
# reset_index
f = lambda x: x.reset_index(inplace=True)
_check_f(data.set_index('a'), f)
# drop_duplicates
f = lambda x: x.drop_duplicates(inplace=True)
_check_f(data.copy(), f)
# sort
f = lambda x: x.sort('b', inplace=True)
_check_f(data.copy(), f)
# sort_index
f = lambda x: x.sort_index(inplace=True)
_check_f(data.copy(), f)
# sortlevel
f = lambda x: x.sortlevel(0, inplace=True)
_check_f(data.set_index(['a', 'b']), f)
# fillna
f = lambda x: x.fillna(0, inplace=True)
_check_f(data.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
_check_f(data.copy(), f)
# rename
f = lambda x: x.rename({1: 'foo'}, inplace=True)
_check_f(data.copy(), f)
# -----Series-----
d = data.copy()['c']
# reset_index
f = lambda x: x.reset_index(inplace=True, drop=True)
_check_f(data.set_index('a')['c'], f)
# fillna
f = lambda x: x.fillna(0, inplace=True)
_check_f(d.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
_check_f(d.copy(), f)
# rename
f = lambda x: x.rename({1: 'foo'}, inplace=True)
_check_f(d.copy(), f)
def test_isin(self):
# GH #4211
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
other = ['a', 'b', 'c']
result = df.isin(other)
expected = DataFrame([df.loc[s].isin(other) for s in df.index])
assert_frame_equal(result, expected)
def test_isin_empty(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
result = df.isin([])
expected = pd.DataFrame(False, df.index, df.columns)
assert_frame_equal(result, expected)
def test_isin_dict(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
d = {'A': ['a']}
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
assert_frame_equal(result, expected)
# non unique columns
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
df.columns = ['A', 'A']
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
assert_frame_equal(result, expected)
def test_isin_with_string_scalar(self):
#GH4763
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
with tm.assertRaises(TypeError):
df.isin('a')
with tm.assertRaises(TypeError):
df.isin('aaa')
def test_isin_df(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
df2 = DataFrame({'A': [0, 2, 12, 4], 'B': [2, np.nan, 4, 5]})
expected = DataFrame(False, df1.index, df1.columns)
result = df1.isin(df2)
expected['A'].loc[[1, 3]] = True
expected['B'].loc[[0, 2]] = True
assert_frame_equal(result, expected)
# partial overlapping columns
df2.columns = ['A', 'C']
result = df1.isin(df2)
expected['B'] = False
assert_frame_equal(result, expected)
def test_isin_df_dupe_values(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
# just cols duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['B', 'B'])
with tm.assertRaises(ValueError):
df1.isin(df2)
# just index duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['A', 'B'], index=[0, 0, 1, 1])
with tm.assertRaises(ValueError):
df1.isin(df2)
# cols and index:
df2.columns = ['B', 'B']
with tm.assertRaises(ValueError):
df1.isin(df2)
def test_isin_dupe_self(self):
other = DataFrame({'A': [1, 0, 1, 0], 'B': [1, 1, 0, 0]})
df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=['A','A'])
result = df.isin(other)
expected = DataFrame(False, index=df.index, columns=df.columns)
expected.loc[0] = True
expected.iloc[1, 1] = True
assert_frame_equal(result, expected)
def test_isin_against_series(self):
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]},
index=['a', 'b', 'c', 'd'])
s = pd.Series([1, 3, 11, 4], index=['a', 'b', 'c', 'd'])
expected = DataFrame(False, index=df.index, columns=df.columns)
expected['A'].loc['a'] = True
expected.loc['d'] = True
result = df.isin(s)
assert_frame_equal(result, expected)
def test_isin_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a', 'foo'), (0, 'a', 'bar'),
(0, 'b', 'bar'), (0, 'b', 'baz'),
(2, 'a', 'foo'), (2, 'a', 'bar'),
(2, 'c', 'bar'), (2, 'c', 'baz'),
(1, 'b', 'foo'), (1, 'b', 'bar'),
(1, 'c', 'bar'), (1, 'c', 'baz')])
df1 = DataFrame({'A': np.ones(12),
'B': np.zeros(12)}, index=idx)
df2 = DataFrame({'A': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
'B': [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1]})
# against regular index
expected = DataFrame(False, index=df1.index, columns=df1.columns)
result = df1.isin(df2)
assert_frame_equal(result, expected)
df2.index = idx
expected = df2.values.astype(np.bool)
expected[:, 1] = ~expected[:, 1]
expected = DataFrame(expected, columns=['A', 'B'], index=idx)
result = df1.isin(df2)
assert_frame_equal(result, expected)
def test_to_csv_date_format(self):
from pandas import to_datetime
pname = '__tmp_to_csv_date_format__'
with ensure_clean(pname) as path:
for engine in [None, 'python']:
dt_index = self.tsframe.index
datetime_frame = DataFrame({'A': dt_index, 'B': dt_index.shift(1)}, index=dt_index)
datetime_frame.to_csv(path, date_format='%Y%m%d', engine=engine)
# Check that the data was put in the specified format
test = read_csv(path, index_col=0)
datetime_frame_int = datetime_frame.applymap(lambda x: int(x.strftime('%Y%m%d')))
datetime_frame_int.index = datetime_frame_int.index.map(lambda x: int(x.strftime('%Y%m%d')))
assert_frame_equal(test, datetime_frame_int)
datetime_frame.to_csv(path, date_format='%Y-%m-%d', engine=engine)
# Check that the data was put in the specified format
test = read_csv(path, index_col=0)
datetime_frame_str = datetime_frame.applymap(lambda x: x.strftime('%Y-%m-%d'))
datetime_frame_str.index = datetime_frame_str.index.map(lambda x: x.strftime('%Y-%m-%d'))
assert_frame_equal(test, datetime_frame_str)
# Check that columns get converted
datetime_frame_columns = datetime_frame.T
datetime_frame_columns.to_csv(path, date_format='%Y%m%d', engine=engine)
test = read_csv(path, index_col=0)
datetime_frame_columns = datetime_frame_columns.applymap(lambda x: int(x.strftime('%Y%m%d')))
# Columns don't get converted to ints by read_csv
datetime_frame_columns.columns = datetime_frame_columns.columns.map(lambda x: x.strftime('%Y%m%d'))
assert_frame_equal(test, datetime_frame_columns)
# test NaTs
nat_index = to_datetime(['NaT'] * 10 + ['2000-01-01', '1/1/2000', '1-1-2000'])
nat_frame = DataFrame({'A': nat_index}, index=nat_index)
nat_frame.to_csv(path, date_format='%Y-%m-%d', engine=engine)
test = read_csv(path, parse_dates=[0, 1], index_col=0)
assert_frame_equal(test, nat_frame)
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df['a'] = df['a'].astype(np.bool_)
df['b'] = df['b'].astype(np.int32)
df['c'] = df['c'].astype(np.float64)
result = pd.concat([df, df])
self.assertEqual(result['a'].dtype, np.bool_)
self.assertEqual(result['b'].dtype, np.int32)
self.assertEqual(result['c'].dtype, np.float64)
result = pd.concat([df, df.astype(np.float64)])
self.assertEqual(result['a'].dtype, np.object_)
self.assertEqual(result['b'].dtype, np.float64)
self.assertEqual(result['c'].dtype, np.float64)
def test_empty_frame_dtypes_ftypes(self):
empty_df = pd.DataFrame()
assert_series_equal(empty_df.dtypes, pd.Series(dtype=np.object))
assert_series_equal(empty_df.ftypes, pd.Series(dtype=np.object))
nocols_df = pd.DataFrame(index=[1,2,3])
assert_series_equal(nocols_df.dtypes, pd.Series(dtype=np.object))
assert_series_equal(nocols_df.ftypes, pd.Series(dtype=np.object))
norows_df = pd.DataFrame(columns=list("abc"))
assert_series_equal(norows_df.dtypes, pd.Series(np.object, index=list("abc")))
assert_series_equal(norows_df.ftypes, pd.Series('object:dense', index=list("abc")))
norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
assert_series_equal(norows_int_df.dtypes, pd.Series(np.dtype('int32'), index=list("abc")))
assert_series_equal(norows_int_df.ftypes, pd.Series('int32:dense', index=list("abc")))
odict = OrderedDict
df = pd.DataFrame(odict([('a', 1), ('b', True), ('c', 1.0)]), index=[1, 2, 3])
assert_series_equal(df.dtypes, pd.Series(odict([('a', np.int64),
('b', np.bool),
('c', np.float64)])))
assert_series_equal(df.ftypes, pd.Series(odict([('a', 'int64:dense'),
('b', 'bool:dense'),
('c', 'float64:dense')])))
# same but for empty slice of df
assert_series_equal(df[:0].dtypes, pd.Series(odict([('a', np.int64),
('b', np.bool),
('c', np.float64)])))
assert_series_equal(df[:0].ftypes, pd.Series(odict([('a', 'int64:dense'),
('b', 'bool:dense'),
('c', 'float64:dense')])))
def test_dtypes_are_correct_after_column_slice(self):
# GH6525
df = pd.DataFrame(index=range(5), columns=list("abc"), dtype=np.float_)
odict = OrderedDict
assert_series_equal(df.dtypes,
pd.Series(odict([('a', np.float_), ('b', np.float_),
('c', np.float_),])))
assert_series_equal(df.iloc[:,2:].dtypes,
pd.Series(odict([('c', np.float_)])))
assert_series_equal(df.dtypes,
pd.Series(odict([('a', np.float_), ('b', np.float_),
('c', np.float_),])))
def test_set_index_names(self):
df = pd.util.testing.makeDataFrame()
df.index.name = 'name'
self.assertEqual(df.set_index(df.index).index.names, ['name'])
mi = MultiIndex.from_arrays(df[['A', 'B']].T.values, names=['A', 'B'])
mi2 = MultiIndex.from_arrays(df[['A', 'B', 'A', 'B']].T.values,
names=['A', 'B', 'A', 'B'])
df = df.set_index(['A', 'B'])
self.assertEqual(df.set_index(df.index).index.names, ['A', 'B'])
# Check that set_index isn't converting a MultiIndex into an Index
self.assertTrue(isinstance(df.set_index(df.index).index, MultiIndex))
# Check actual equality
tm.assert_index_equal(df.set_index(df.index).index, mi)
# Check that [MultiIndex, MultiIndex] yields a MultiIndex rather
# than a pair of tuples
self.assertTrue(isinstance(df.set_index([df.index, df.index]).index, MultiIndex))
# Check equality
tm.assert_index_equal(df.set_index([df.index, df.index]).index, mi2)
def test_select_dtypes_include(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc'))})
ri = df.select_dtypes(include=[np.number])
ei = df[['b', 'c', 'd']]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number,'category'])
ei = df[['b', 'c', 'd', 'f']]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_exclude(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True]})
re = df.select_dtypes(exclude=[np.number])
ee = df[['a', 'e']]
tm.assert_frame_equal(re, ee)
def test_select_dtypes_exclude_include(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
exclude = np.datetime64,
include = np.bool_, 'integer'
r = df.select_dtypes(include=include, exclude=exclude)
e = df[['b', 'c', 'e']]
tm.assert_frame_equal(r, e)
exclude = 'datetime',
include = 'bool', 'int64', 'int32'
r = df.select_dtypes(include=include, exclude=exclude)
e = df[['b', 'e']]
tm.assert_frame_equal(r, e)
def test_select_dtypes_not_an_attr_but_still_valid_dtype(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
df['g'] = df.f.diff()
assert not hasattr(np, 'u8')
r = df.select_dtypes(include=['i8', 'O'], exclude=['timedelta'])
e = df[['a', 'b']]
tm.assert_frame_equal(r, e)
r = df.select_dtypes(include=['i8', 'O', 'timedelta64[ns]'])
e = df[['a', 'b', 'g']]
tm.assert_frame_equal(r, e)
def test_select_dtypes_empty(self):
df = DataFrame({'a': list('abc'), 'b': list(range(1, 4))})
with tm.assertRaisesRegexp(ValueError, 'at least one of include or '
'exclude must be nonempty'):
df.select_dtypes()
def test_select_dtypes_raises_on_string(self):
df = DataFrame({'a': list('abc'), 'b': list(range(1, 4))})
with tm.assertRaisesRegexp(TypeError, 'include and exclude .+ non-'):
df.select_dtypes(include='object')
with tm.assertRaisesRegexp(TypeError, 'include and exclude .+ non-'):
df.select_dtypes(exclude='object')
with tm.assertRaisesRegexp(TypeError, 'include and exclude .+ non-'):
df.select_dtypes(include=int, exclude='object')
def test_select_dtypes_bad_datetime64(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
with tm.assertRaisesRegexp(ValueError, '.+ is too specific'):
df.select_dtypes(include=['datetime64[D]'])
with tm.assertRaisesRegexp(ValueError, '.+ is too specific'):
df.select_dtypes(exclude=['datetime64[as]'])
def test_select_dtypes_str_raises(self):
df = DataFrame({'a': list('abc'),
'g': list(u('abc')),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
string_dtypes = set((str, 'str', np.string_, 'S1',
'unicode', np.unicode_, 'U1'))
try:
string_dtypes.add(unicode)
except NameError:
pass
for dt in string_dtypes:
with tm.assertRaisesRegexp(TypeError,
'string dtypes are not allowed'):
df.select_dtypes(include=[dt])
with tm.assertRaisesRegexp(TypeError,
'string dtypes are not allowed'):
df.select_dtypes(exclude=[dt])
def test_select_dtypes_bad_arg_raises(self):
df = DataFrame({'a': list('abc'),
'g': list(u('abc')),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
with tm.assertRaisesRegexp(TypeError, 'data type.*not understood'):
df.select_dtypes(['blargy, blarg, blarg'])
def skip_if_no_ne(engine='numexpr'):
if engine == 'numexpr':
try:
import numexpr as ne
except ImportError:
raise nose.SkipTest("cannot query engine numexpr when numexpr not "
"installed")
def skip_if_no_pandas_parser(parser):
if parser != 'pandas':
raise nose.SkipTest("cannot evaluate with parser {0!r}".format(parser))
class TestDataFrameQueryWithMultiIndex(object):
def check_query_with_named_multiindex(self, parser, engine):
tm.skip_if_no_ne(engine)
a = tm.choice(['red', 'green'], size=10)
b = tm.choice(['eggs', 'ham'], size=10)
index = MultiIndex.from_arrays([a, b], names=['color', 'food'])
df = DataFrame(randn(10, 2), index=index)
ind = Series(df.index.get_level_values('color').values, index=index,
name='color')
# equality
res1 = df.query('color == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == color', parser=parser, engine=engine)
exp = df[ind == 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# inequality
res1 = df.query('color != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != color', parser=parser, engine=engine)
exp = df[ind != 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('color == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == color', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('color != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != color', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in color', parser=parser, engine=engine)
res2 = df.query('"red" in color', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in color', parser=parser, engine=engine)
res2 = df.query('"red" not in color', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
def test_query_with_named_multiindex(self):
for parser, engine in product(['pandas'], ENGINES):
yield self.check_query_with_named_multiindex, parser, engine
def check_query_with_unnamed_multiindex(self, parser, engine):
tm.skip_if_no_ne(engine)
a = tm.choice(['red', 'green'], size=10)
b = tm.choice(['eggs', 'ham'], size=10)
index = MultiIndex.from_arrays([a, b])
df = DataFrame(randn(10, 2), index=index)
ind = Series(df.index.get_level_values(0).values, index=index)
res1 = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == ilevel_0', parser=parser, engine=engine)
exp = df[ind == 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != ilevel_0', parser=parser, engine=engine)
exp = df[ind != 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_0 == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('ilevel_0 != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in ilevel_0', parser=parser, engine=engine)
res2 = df.query('"red" in ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in ilevel_0', parser=parser, engine=engine)
res2 = df.query('"red" not in ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
#### LEVEL 1 ####
ind = Series(df.index.get_level_values(1).values, index=index)
res1 = df.query('ilevel_1 == "eggs"', parser=parser, engine=engine)
res2 = df.query('"eggs" == ilevel_1', parser=parser, engine=engine)
exp = df[ind == 'eggs']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_1 != "eggs"', parser=parser, engine=engine)
res2 = df.query('"eggs" != ilevel_1', parser=parser, engine=engine)
exp = df[ind != 'eggs']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_1 == ["eggs"]', parser=parser, engine=engine)
res2 = df.query('["eggs"] == ilevel_1', parser=parser, engine=engine)
exp = df[ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('ilevel_1 != ["eggs"]', parser=parser, engine=engine)
res2 = df.query('["eggs"] != ilevel_1', parser=parser, engine=engine)
exp = df[~ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["eggs"] in ilevel_1', parser=parser, engine=engine)
res2 = df.query('"eggs" in ilevel_1', parser=parser, engine=engine)
exp = df[ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('["eggs"] not in ilevel_1', parser=parser, engine=engine)
res2 = df.query('"eggs" not in ilevel_1', parser=parser, engine=engine)
exp = df[~ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
def test_query_with_unnamed_multiindex(self):
for parser, engine in product(['pandas'], ENGINES):
yield self.check_query_with_unnamed_multiindex, parser, engine
def check_query_with_partially_named_multiindex(self, parser, engine):
tm.skip_if_no_ne(engine)
a = tm.choice(['red', 'green'], size=10)
b = np.arange(10)
index = MultiIndex.from_arrays([a, b])
index.names = [None, 'rating']
df = DataFrame(randn(10, 2), index=index)
res = df.query('rating == 1', parser=parser, engine=engine)
ind = Series(df.index.get_level_values('rating').values, index=index,
name='rating')
exp = df[ind == 1]
assert_frame_equal(res, exp)
res = df.query('rating != 1', parser=parser, engine=engine)
ind = Series(df.index.get_level_values('rating').values, index=index,
name='rating')
exp = df[ind != 1]
assert_frame_equal(res, exp)
res = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
ind = Series(df.index.get_level_values(0).values, index=index)
exp = df[ind == "red"]
assert_frame_equal(res, exp)
res = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
ind = Series(df.index.get_level_values(0).values, index=index)
exp = df[ind != "red"]
assert_frame_equal(res, exp)
def test_query_with_partially_named_multiindex(self):
for parser, engine in product(['pandas'], ENGINES):
yield self.check_query_with_partially_named_multiindex, parser, engine
def test_query_multiindex_get_index_resolvers(self):
for parser, engine in product(['pandas'], ENGINES):
yield self.check_query_multiindex_get_index_resolvers, parser, engine
def check_query_multiindex_get_index_resolvers(self, parser, engine):
df = mkdf(10, 3, r_idx_nlevels=2, r_idx_names=['spam', 'eggs'])
resolvers = df._get_index_resolvers()
def to_series(mi, level):
level_values = mi.get_level_values(level)
s = level_values.to_series()
s.index = mi
return s
col_series = df.columns.to_series()
expected = {'index': df.index,
'columns': col_series,
'spam': to_series(df.index, 'spam'),
'eggs': to_series(df.index, 'eggs'),
'C0': col_series}
for k, v in resolvers.items():
if isinstance(v, Index):
assert v.is_(expected[k])
elif isinstance(v, Series):
tm.assert_series_equal(v, expected[k])
else:
raise AssertionError("object must be a Series or Index")
def test_raise_on_panel_with_multiindex(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_raise_on_panel_with_multiindex, parser, engine
def check_raise_on_panel_with_multiindex(self, parser, engine):
tm.skip_if_no_ne()
p = tm.makePanel(7)
p.items = tm.makeCustomIndex(len(p.items), nlevels=2)
with tm.assertRaises(NotImplementedError):
pd.eval('p + 1', parser=parser, engine=engine)
def test_raise_on_panel4d_with_multiindex(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_raise_on_panel4d_with_multiindex, parser, engine
def check_raise_on_panel4d_with_multiindex(self, parser, engine):
tm.skip_if_no_ne()
p4d = tm.makePanel4D(7)
p4d.items = tm.makeCustomIndex(len(p4d.items), nlevels=2)
with tm.assertRaises(NotImplementedError):
pd.eval('p4d + 1', parser=parser, engine=engine)
class TestDataFrameQueryNumExprPandas(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestDataFrameQueryNumExprPandas, cls).setUpClass()
cls.engine = 'numexpr'
cls.parser = 'pandas'
tm.skip_if_no_ne(cls.engine)
@classmethod
def tearDownClass(cls):
super(TestDataFrameQueryNumExprPandas, cls).tearDownClass()
del cls.engine, cls.parser
def test_date_query_with_attribute_access(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(randn(5, 3))
df['dates1'] = date_range('1/1/2012', periods=5)
df['dates2'] = date_range('1/1/2013', periods=5)
df['dates3'] = date_range('1/1/2014', periods=5)
res = df.query('@df.dates1 < 20130101 < @df.dates3', engine=engine,
parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_query_no_attribute_access(self):
engine, parser = self.engine, self.parser
df = DataFrame(randn(5, 3))
df['dates1'] = date_range('1/1/2012', periods=5)
df['dates2'] = date_range('1/1/2013', periods=5)
df['dates3'] = date_range('1/1/2014', periods=5)
res = df.query('dates1 < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates2'] = date_range('1/1/2013', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.loc[np.random.rand(n) > 0.5, 'dates3'] = pd.NaT
res = df.query('dates1 < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.set_index('dates1', inplace=True, drop=True)
res = df.query('index < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.iloc[0, 0] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
res = df.query('index < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT_duplicates(self):
engine, parser = self.engine, self.parser
n = 10
d = {}
d['dates1'] = date_range('1/1/2012', periods=n)
d['dates3'] = date_range('1/1/2014', periods=n)
df = DataFrame(d)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
res = df.query('index < 20130101 < dates3', engine=engine, parser=parser)
expec = df[(df.index.to_series() < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_query_with_non_date(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame({'dates': date_range('1/1/2012', periods=n),
'nondate': np.arange(n)})
ops = '==', '!=', '<', '>', '<=', '>='
for op in ops:
with tm.assertRaises(TypeError):
df.query('dates %s nondate' % op, parser=parser, engine=engine)
def test_query_syntax_error(self):
engine, parser = self.engine, self.parser
df = DataFrame({"i": lrange(10), "+": lrange(3, 13),
"r": lrange(4, 14)})
with tm.assertRaises(SyntaxError):
df.query('i - +', engine=engine, parser=parser)
def test_query_scope(self):
from pandas.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(20, 2), columns=list('ab'))
a, b = 1, 2
res = df.query('a > b', engine=engine, parser=parser)
expected = df[df.a > df.b]
tm.assert_frame_equal(res, expected)
res = df.query('@a > b', engine=engine, parser=parser)
expected = df[a > df.b]
tm.assert_frame_equal(res, expected)
# no local variable c
with tm.assertRaises(UndefinedVariableError):
df.query('@a > b > @c', engine=engine, parser=parser)
# no column named 'c'
with tm.assertRaises(UndefinedVariableError):
df.query('@a > b > c', engine=engine, parser=parser)
def test_query_doesnt_pickup_local(self):
from pandas.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
from numpy import sin
# we don't pick up the local 'sin'
with tm.assertRaises(UndefinedVariableError):
df.query('sin > 5', engine=engine, parser=parser)
def test_query_builtin(self):
from pandas.computation.engines import NumExprClobberingError
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
df.index.name = 'sin'
with tm.assertRaisesRegexp(NumExprClobberingError,
'Variables in expression.+'):
df.query('sin > 5', engine=engine, parser=parser)
def test_query(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randn(10, 3), columns=['a', 'b', 'c'])
assert_frame_equal(df.query('a < b', engine=engine, parser=parser),
df[df.a < df.b])
assert_frame_equal(df.query('a + b > b * c', engine=engine,
parser=parser),
df[df.a + df.b > df.b * df.c])
def test_query_index_with_name(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randint(10, size=(10, 3)),
index=Index(range(10), name='blob'),
columns=['a', 'b', 'c'])
res = df.query('(blob < 5) & (a < b)', engine=engine, parser=parser)
expec = df[(df.index < 5) & (df.a < df.b)]
assert_frame_equal(res, expec)
res = df.query('blob < b', engine=engine, parser=parser)
expec = df[df.index < df.b]
assert_frame_equal(res, expec)
def test_query_index_without_name(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randint(10, size=(10, 3)),
index=range(10), columns=['a', 'b', 'c'])
# "index" should refer to the index
res = df.query('index < b', engine=engine, parser=parser)
expec = df[df.index < df.b]
assert_frame_equal(res, expec)
# test against a scalar
res = df.query('index < 5', engine=engine, parser=parser)
expec = df[df.index < 5]
assert_frame_equal(res, expec)
def test_nested_scope(self):
engine = self.engine
parser = self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(5, 3))
df2 = DataFrame(np.random.randn(5, 3))
expected = df[(df > 0) & (df2 > 0)]
result = df.query('(@df > 0) & (@df2 > 0)', engine=engine, parser=parser)
assert_frame_equal(result, expected)
result = pd.eval('df[df > 0 and df2 > 0]', engine=engine,
parser=parser)
assert_frame_equal(result, expected)
result = pd.eval('df[df > 0 and df2 > 0 and df[df > 0] > 0]',
engine=engine, parser=parser)
expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]
assert_frame_equal(result, expected)
result = pd.eval('df[(df>0) & (df2>0)]', engine=engine, parser=parser)
expected = df.query('(@df>0) & (@df2>0)', engine=engine, parser=parser)
assert_frame_equal(result, expected)
def test_nested_raises_on_local_self_reference(self):
from pandas.computation.ops import UndefinedVariableError
df = DataFrame(np.random.randn(5, 3))
# can't reference ourself b/c we're a local so @ is necessary
with tm.assertRaises(UndefinedVariableError):
df.query('df > 0', engine=self.engine, parser=self.parser)
def test_local_syntax(self):
skip_if_no_pandas_parser(self.parser)
engine, parser = self.engine, self.parser
df = DataFrame(randn(100, 10), columns=list('abcdefghij'))
b = 1
expect = df[df.a < b]
result = df.query('a < @b', engine=engine, parser=parser)
assert_frame_equal(result, expect)
expect = df[df.a < df.b]
result = df.query('a < b', engine=engine, parser=parser)
assert_frame_equal(result, expect)
def test_chained_cmp_and_in(self):
skip_if_no_pandas_parser(self.parser)
engine, parser = self.engine, self.parser
cols = list('abc')
df = DataFrame(randn(100, len(cols)), columns=cols)
res = df.query('a < b < c and a not in b not in c', engine=engine,
parser=parser)
ind = (df.a < df.b) & (df.b < df.c) & ~df.b.isin(df.a) & ~df.c.isin(df.b)
expec = df[ind]
assert_frame_equal(res, expec)
def test_local_variable_with_in(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
a = Series(np.random.randint(3, size=15), name='a')
b = Series(np.random.randint(10, size=15), name='b')
df = DataFrame({'a': a, 'b': b})
expected = df.loc[(df.b - 1).isin(a)]
result = df.query('b - 1 in a', engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
b = Series(np.random.randint(10, size=15), name='b')
expected = df.loc[(b - 1).isin(a)]
result = df.query('@b - 1 in a', engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
def test_at_inside_string(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
c = 1
df = DataFrame({'a': ['a', 'a', 'b', 'b', '@c', '@c']})
result = df.query('a == "@c"', engine=engine, parser=parser)
expected = df[df.a == "@c"]
tm.assert_frame_equal(result, expected)
def test_query_undefined_local(self):
from pandas.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.rand(10, 2), columns=list('ab'))
with tm.assertRaisesRegexp(UndefinedVariableError,
"local variable 'c' is not defined"):
df.query('a == @c', engine=engine, parser=parser)
def test_index_resolvers_come_after_columns_with_the_same_name(self):
n = 1
a = np.r_[20:101:20]
df = DataFrame({'index': a, 'b': np.random.randn(a.size)})
df.index.name = 'index'
result = df.query('index > 5', engine=self.engine, parser=self.parser)
expected = df[df['index'] > 5]
tm.assert_frame_equal(result, expected)
df = DataFrame({'index': a, 'b': np.random.randn(a.size)})
result = df.query('ilevel_0 > 5', engine=self.engine, parser=self.parser)
expected = df.loc[df.index[df.index > 5]]
tm.assert_frame_equal(result, expected)
df = DataFrame({'a': a, 'b': np.random.randn(a.size)})
df.index.name = 'a'
result = df.query('a > 5', engine=self.engine, parser=self.parser)
expected = df[df.a > 5]
tm.assert_frame_equal(result, expected)
result = df.query('index > 5', engine=self.engine, parser=self.parser)
expected = df.loc[df.index[df.index > 5]]
tm.assert_frame_equal(result, expected)
def test_inf(self):
n = 10
df = DataFrame({'a': np.random.rand(n), 'b': np.random.rand(n)})
df.loc[::2, 0] = np.inf
ops = '==', '!='
d = dict(zip(ops, (operator.eq, operator.ne)))
for op, f in d.items():
q = 'a %s inf' % op
expected = df[f(df.a, np.inf)]
result = df.query(q, engine=self.engine, parser=self.parser)
tm.assert_frame_equal(result, expected)
class TestDataFrameQueryNumExprPython(TestDataFrameQueryNumExprPandas):
@classmethod
def setUpClass(cls):
super(TestDataFrameQueryNumExprPython, cls).setUpClass()
cls.engine = 'numexpr'
cls.parser = 'python'
tm.skip_if_no_ne(cls.engine)
cls.frame = _frame.copy()
def test_date_query_no_attribute_access(self):
engine, parser = self.engine, self.parser
df = DataFrame(randn(5, 3))
df['dates1'] = date_range('1/1/2012', periods=5)
df['dates2'] = date_range('1/1/2013', periods=5)
df['dates3'] = date_range('1/1/2014', periods=5)
res = df.query('(dates1 < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates2'] = date_range('1/1/2013', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.loc[np.random.rand(n) > 0.5, 'dates3'] = pd.NaT
res = df.query('(dates1 < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.set_index('dates1', inplace=True, drop=True)
res = df.query('(index < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.iloc[0, 0] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
res = df.query('(index < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT_duplicates(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
with tm.assertRaises(NotImplementedError):
df.query('index < 20130101 < dates3', engine=engine, parser=parser)
def test_nested_scope(self):
from pandas.computation.ops import UndefinedVariableError
engine = self.engine
parser = self.parser
# smoke test
x = 1
result = pd.eval('x + 1', engine=engine, parser=parser)
self.assertEqual(result, 2)
df = DataFrame(np.random.randn(5, 3))
df2 = DataFrame(np.random.randn(5, 3))
# don't have the pandas parser
with tm.assertRaises(SyntaxError):
df.query('(@df>0) & (@df2>0)', engine=engine, parser=parser)
with tm.assertRaises(UndefinedVariableError):
df.query('(df>0) & (df2>0)', engine=engine, parser=parser)
expected = df[(df > 0) & (df2 > 0)]
result = pd.eval('df[(df > 0) & (df2 > 0)]', engine=engine,
parser=parser)
tm.assert_frame_equal(expected, result)
expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]
result = pd.eval('df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]',
engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
class TestDataFrameQueryPythonPandas(TestDataFrameQueryNumExprPandas):
@classmethod
def setUpClass(cls):
super(TestDataFrameQueryPythonPandas, cls).setUpClass()
cls.engine = 'python'
cls.parser = 'pandas'
cls.frame = _frame.copy()
def test_query_builtin(self):
from pandas.computation.engines import NumExprClobberingError
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
df.index.name = 'sin'
expected = df[df.index > 5]
result = df.query('sin > 5', engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
class TestDataFrameQueryPythonPython(TestDataFrameQueryNumExprPython):
@classmethod
def setUpClass(cls):
super(TestDataFrameQueryPythonPython, cls).setUpClass()
cls.engine = cls.parser = 'python'
cls.frame = _frame.copy()
def test_query_builtin(self):
from pandas.computation.engines import NumExprClobberingError
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
df.index.name = 'sin'
expected = df[df.index > 5]
result = df.query('sin > 5', engine=engine, parser=parser)
tm.assert_frame_equal(expected, result)
PARSERS = 'python', 'pandas'
ENGINES = 'python', 'numexpr'
class TestDataFrameQueryStrings(object):
def check_str_query_method(self, parser, engine):
tm.skip_if_no_ne(engine)
df = DataFrame(randn(10, 1), columns=['b'])
df['strings'] = Series(list('aabbccddee'))
expect = df[df.strings == 'a']
if parser != 'pandas':
col = 'strings'
lst = '"a"'
lhs = [col] * 2 + [lst] * 2
rhs = lhs[::-1]
eq, ne = '==', '!='
ops = 2 * ([eq] + [ne])
for lhs, op, rhs in zip(lhs, ops, rhs):
ex = '{lhs} {op} {rhs}'.format(lhs=lhs, op=op, rhs=rhs)
assertRaises(NotImplementedError, df.query, ex, engine=engine,
parser=parser, local_dict={'strings': df.strings})
else:
res = df.query('"a" == strings', engine=engine, parser=parser)
assert_frame_equal(res, expect)
res = df.query('strings == "a"', engine=engine, parser=parser)
assert_frame_equal(res, expect)
assert_frame_equal(res, df[df.strings.isin(['a'])])
expect = df[df.strings != 'a']
res = df.query('strings != "a"', engine=engine, parser=parser)
assert_frame_equal(res, expect)
res = df.query('"a" != strings', engine=engine, parser=parser)
assert_frame_equal(res, expect)
assert_frame_equal(res, df[~df.strings.isin(['a'])])
def test_str_query_method(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_str_query_method, parser, engine
def test_str_list_query_method(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_str_list_query_method, parser, engine
def check_str_list_query_method(self, parser, engine):
tm.skip_if_no_ne(engine)
df = DataFrame(randn(10, 1), columns=['b'])
df['strings'] = Series(list('aabbccddee'))
expect = df[df.strings.isin(['a', 'b'])]
if parser != 'pandas':
col = 'strings'
lst = '["a", "b"]'
lhs = [col] * 2 + [lst] * 2
rhs = lhs[::-1]
eq, ne = '==', '!='
ops = 2 * ([eq] + [ne])
for lhs, op, rhs in zip(lhs, ops, rhs):
ex = '{lhs} {op} {rhs}'.format(lhs=lhs, op=op, rhs=rhs)
with tm.assertRaises(NotImplementedError):
df.query(ex, engine=engine, parser=parser)
else:
res = df.query('strings == ["a", "b"]', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
res = df.query('["a", "b"] == strings', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
expect = df[~df.strings.isin(['a', 'b'])]
res = df.query('strings != ["a", "b"]', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
res = df.query('["a", "b"] != strings', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
def check_query_with_string_columns(self, parser, engine):
tm.skip_if_no_ne(engine)
df = DataFrame({'a': list('aaaabbbbcccc'),
'b': list('aabbccddeeff'),
'c': np.random.randint(5, size=12),
'd': np.random.randint(9, size=12)})
if parser == 'pandas':
res = df.query('a in b', parser=parser, engine=engine)
expec = df[df.a.isin(df.b)]
assert_frame_equal(res, expec)
res = df.query('a in b and c < d', parser=parser, engine=engine)
expec = df[df.a.isin(df.b) & (df.c < df.d)]
assert_frame_equal(res, expec)
else:
with assertRaises(NotImplementedError):
df.query('a in b', parser=parser, engine=engine)
with assertRaises(NotImplementedError):
df.query('a in b and c < d', parser=parser, engine=engine)
def test_query_with_string_columns(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_query_with_string_columns, parser, engine
def check_object_array_eq_ne(self, parser, engine):
tm.skip_if_no_ne(engine)
df = DataFrame({'a': list('aaaabbbbcccc'),
'b': list('aabbccddeeff'),
'c': np.random.randint(5, size=12),
'd': np.random.randint(9, size=12)})
res = df.query('a == b', parser=parser, engine=engine)
exp = df[df.a == df.b]
assert_frame_equal(res, exp)
res = df.query('a != b', parser=parser, engine=engine)
exp = df[df.a != df.b]
assert_frame_equal(res, exp)
def test_object_array_eq_ne(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_object_array_eq_ne, parser, engine
def check_query_with_nested_strings(self, parser, engine):
tm.skip_if_no_ne(engine)
skip_if_no_pandas_parser(parser)
from pandas.compat import StringIO
raw = """id event timestamp
1 "page 1 load" 1/1/2014 0:00:01
1 "page 1 exit" 1/1/2014 0:00:31
2 "page 2 load" 1/1/2014 0:01:01
2 "page 2 exit" 1/1/2014 0:01:31
3 "page 3 load" 1/1/2014 0:02:01
3 "page 3 exit" 1/1/2014 0:02:31
4 "page 1 load" 2/1/2014 1:00:01
4 "page 1 exit" 2/1/2014 1:00:31
5 "page 2 load" 2/1/2014 1:01:01
5 "page 2 exit" 2/1/2014 1:01:31
6 "page 3 load" 2/1/2014 1:02:01
6 "page 3 exit" 2/1/2014 1:02:31
"""
df = pd.read_csv(StringIO(raw), sep=r'\s{2,}', engine='python',
parse_dates=['timestamp'])
expected = df[df.event == '"page 1 load"']
res = df.query("""'"page 1 load"' in event""", parser=parser,
engine=engine)
tm.assert_frame_equal(expected, res)
def test_query_with_nested_string(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_query_with_nested_strings, parser, engine
def check_query_with_nested_special_character(self, parser, engine):
skip_if_no_pandas_parser(parser)
tm.skip_if_no_ne(engine)
df = DataFrame({'a': ['a', 'b', 'test & test'],
'b': [1, 2, 3]})
res = df.query('a == "test & test"', parser=parser, engine=engine)
expec = df[df.a == 'test & test']
tm.assert_frame_equal(res, expec)
def test_query_with_nested_special_character(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_query_with_nested_special_character, parser, engine
def check_query_lex_compare_strings(self, parser, engine):
tm.skip_if_no_ne(engine=engine)
import operator as opr
a = Series(tm.choice(list('abcde'), 20))
b = Series(np.arange(a.size))
df = DataFrame({'X': a, 'Y': b})
ops = {'<': opr.lt, '>': opr.gt, '<=': opr.le, '>=': opr.ge}
for op, func in ops.items():
res = df.query('X %s "d"' % op, engine=engine, parser=parser)
expected = df[func(df.X, 'd')]
assert_frame_equal(res, expected)
def test_query_lex_compare_strings(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_query_lex_compare_strings, parser, engine
def check_query_single_element_booleans(self, parser, engine):
tm.skip_if_no_ne(engine)
columns = 'bid', 'bidsize', 'ask', 'asksize'
data = np.random.randint(2, size=(1, len(columns))).astype(bool)
df = DataFrame(data, columns=columns)
res = df.query('bid & ask', engine=engine, parser=parser)
expected = df[df.bid & df.ask]
assert_frame_equal(res, expected)
def test_query_single_element_booleans(self):
for parser, engine in product(PARSERS, ENGINES):
yield self.check_query_single_element_booleans, parser, engine
def check_query_string_scalar_variable(self, parser, engine):
tm.skip_if_no_ne(engine)
df = pd.DataFrame({'Symbol': ['BUD US', 'BUD US', 'IBM US', 'IBM US'],
'Price': [109.70, 109.72, 183.30, 183.35]})
e = df[df.Symbol == 'BUD US']
symb = 'BUD US'
r = df.query('Symbol == @symb', parser=parser, engine=engine)
tm.assert_frame_equal(e, r)
def test_query_string_scalar_variable(self):
for parser, engine in product(['pandas'], ENGINES):
yield self.check_query_string_scalar_variable, parser, engine
class TestDataFrameEvalNumExprPandas(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestDataFrameEvalNumExprPandas, cls).setUpClass()
cls.engine = 'numexpr'
cls.parser = 'pandas'
tm.skip_if_no_ne()
def setUp(self):
self.frame = DataFrame(randn(10, 3), columns=list('abc'))
def tearDown(self):
del self.frame
def test_simple_expr(self):
res = self.frame.eval('a + b', engine=self.engine, parser=self.parser)
expect = self.frame.a + self.frame.b
assert_series_equal(res, expect)
def test_bool_arith_expr(self):
res = self.frame.eval('a[a < 1] + b', engine=self.engine,
parser=self.parser)
expect = self.frame.a[self.frame.a < 1] + self.frame.b
assert_series_equal(res, expect)
def test_invalid_type_for_operator_raises(self):
df = DataFrame({'a': [1, 2], 'b': ['c', 'd']})
ops = '+', '-', '*', '/'
for op in ops:
with tm.assertRaisesRegexp(TypeError,
"unsupported operand type\(s\) for "
".+: '.+' and '.+'"):
df.eval('a {0} b'.format(op), engine=self.engine,
parser=self.parser)
class TestDataFrameEvalNumExprPython(TestDataFrameEvalNumExprPandas):
@classmethod
def setUpClass(cls):
super(TestDataFrameEvalNumExprPython, cls).setUpClass()
cls.engine = 'numexpr'
cls.parser = 'python'
tm.skip_if_no_ne(cls.engine)
class TestDataFrameEvalPythonPandas(TestDataFrameEvalNumExprPandas):
@classmethod
def setUpClass(cls):
super(TestDataFrameEvalPythonPandas, cls).setUpClass()
cls.engine = 'python'
cls.parser = 'pandas'
class TestDataFrameEvalPythonPython(TestDataFrameEvalNumExprPython):
@classmethod
def setUpClass(cls):
super(TestDataFrameEvalPythonPython, cls).tearDownClass()
cls.engine = cls.parser = 'python'
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
AnasGhrab/scikit-learn | sklearn/metrics/metrics.py | 233 | 1262 | import warnings
warnings.warn("sklearn.metrics.metrics is deprecated and will be removed in "
"0.18. Please import from sklearn.metrics",
DeprecationWarning)
from .ranking import auc
from .ranking import average_precision_score
from .ranking import label_ranking_average_precision_score
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
| bsd-3-clause |
iismd17/scikit-learn | sklearn/ensemble/tests/test_base.py | 284 | 1328 | """
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
# Check that instantiating a BaseEnsemble with n_estimators<=0 raises
# a ValueError.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
| bsd-3-clause |
energyPATHWAYS/energyPATHWAYS | model_building_tools/create_map_keys_from_drivers/map_key_from_driver.py | 1 | 2834 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 07 19:20:05 2016
@author: ryandrewjones
"""
import sys
import signal
import click
import os
import cPickle as pickle
import energyPATHWAYS.config as cfg
import energyPATHWAYS.util as util
from energyPATHWAYS.pathways_model import PathwaysModel
import energyPATHWAYS.shape as shape
from energyPATHWAYS.outputs import Output
import csv
import time
import datetime
import logging
import cProfile
import traceback
import pandas as pd
# set up a dummy model
path = os.getcwd()
config = 'config.INI'
scenario_id = 1
cfg.initialize_config(path, config, _log_name='log.log')
cfg.primary_geography = 'intersection_id'
model = PathwaysModel(scenario_id, api_run=False)
# model.run(scenario_id, solve_demand=False, solve_supply=False, save_models=False, append_results=False)
demand = model.demand
demand.add_drivers()
existing_geo_map_key_ids, existing_geo_map_key_names = zip(*util.sql_read_table('GeographyMapKeys'))
next_map_key_id = max(existing_geo_map_key_ids)+1
next_geo_map_id = max(util.sql_read_table('GeographyMap', 'id'))+1
###############################################
# user inputs
driver_ids_to_make_map_keys = [
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61]
basis_year_for_map_key = int(cfg.cfgfile.get('case', 'current_year'))
###############################################
# make our new map keys
GeographyMapKeys = [['id', 'name']]
GeographyMap_columns = ['intersection_id', 'geography_map_key_id', 'value', 'id']
GeographyMap = []
for driver_id in driver_ids_to_make_map_keys:
driver = demand.drivers[driver_id]
demand.remap_driver(driver) # remaps to our new super detailed geography
values = util.df_slice(driver.values, basis_year_for_map_key, 'year')
if values.index.nlevels>1:
levels_to_remove = [n for n in values.index.names if n!='intersection_id']
values = util.remove_df_levels(values, levels_to_remove)
new_key_name = driver.name
if new_key_name in existing_geo_map_key_names:
raise ValueError('driver name {} is already in the existing map keys, please rename driver id {}'.format(driver.name, driver.id))
GeographyMapKeys.append([next_map_key_id, new_key_name])
values = values.reset_index()
values['id'] = range(next_geo_map_id, next_geo_map_id+len(values))
values['geography_map_key_id'] = next_map_key_id
GeographyMap.append(values)
next_geo_map_id += len(values)
next_map_key_id+=1
output = pd.concat(GeographyMap)[GeographyMap_columns]
output.to_csv(os.path.join(path, 'outputs', 'GeographyMap.csv'), index=False)
with open(os.path.join(path, 'outputs', 'GeographyMapKeys.csv'), 'wb') as outfile:
csvwriter = csv.writer(outfile, delimiter=',')
for row in GeographyMapKeys:
csvwriter.writerow(row)
| mit |
hrjn/scikit-learn | sklearn/mixture/dpgmm.py | 25 | 35852 | """Bayesian Gaussian Mixture Models and
Dirichlet Process Gaussian Mixture Models"""
from __future__ import print_function
# Author: Alexandre Passos (alexandre.tp@gmail.com)
# Bertrand Thirion <bertrand.thirion@inria.fr>
#
# Based on mixture.py by:
# Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# Important note for the deprecation cleaning of 0.20 :
# All the function and classes of this file have been deprecated in 0.18.
# When you remove this file please also remove the related files
# - 'sklearn/mixture/gmm.py'
# - 'sklearn/mixture/test_dpgmm.py'
# - 'sklearn/mixture/test_gmm.py'
import numpy as np
from scipy.special import digamma as _digamma, gammaln as _gammaln
from scipy import linalg
from scipy.spatial.distance import cdist
from ..externals.six.moves import xrange
from ..utils import check_random_state, check_array, deprecated
from ..utils.extmath import logsumexp, pinvh, squared_norm, stable_cumsum
from ..utils.validation import check_is_fitted
from .. import cluster
from .gmm import _GMMBase
@deprecated("The function digamma is deprecated in 0.18 and "
"will be removed in 0.20. Use scipy.special.digamma instead.")
def digamma(x):
return _digamma(x + np.finfo(np.float32).eps)
@deprecated("The function gammaln is deprecated in 0.18 and "
"will be removed in 0.20. Use scipy.special.gammaln instead.")
def gammaln(x):
return _gammaln(x + np.finfo(np.float32).eps)
@deprecated("The function log_normalize is deprecated in 0.18 and "
"will be removed in 0.20.")
def log_normalize(v, axis=0):
"""Normalized probabilities from unnormalized log-probabilites"""
v = np.rollaxis(v, axis)
v = v.copy()
v -= v.max(axis=0)
out = logsumexp(v)
v = np.exp(v - out)
v += np.finfo(np.float32).eps
v /= np.sum(v, axis=0)
return np.swapaxes(v, 0, axis)
@deprecated("The function wishart_log_det is deprecated in 0.18 and "
"will be removed in 0.20.")
def wishart_log_det(a, b, detB, n_features):
"""Expected value of the log of the determinant of a Wishart
The expected value of the logarithm of the determinant of a
wishart-distributed random variable with the specified parameters."""
l = np.sum(digamma(0.5 * (a - np.arange(-1, n_features - 1))))
l += n_features * np.log(2)
return l + detB
@deprecated("The function wishart_logz is deprecated in 0.18 and "
"will be removed in 0.20.")
def wishart_logz(v, s, dets, n_features):
"The logarithm of the normalization constant for the wishart distribution"
z = 0.
z += 0.5 * v * n_features * np.log(2)
z += (0.25 * (n_features * (n_features - 1)) * np.log(np.pi))
z += 0.5 * v * np.log(dets)
z += np.sum(gammaln(0.5 * (v - np.arange(n_features) + 1)))
return z
def _bound_wishart(a, B, detB):
"""Returns a function of the dof, scale matrix and its determinant
used as an upper bound in variational approximation of the evidence"""
n_features = B.shape[0]
logprior = wishart_logz(a, B, detB, n_features)
logprior -= wishart_logz(n_features,
np.identity(n_features),
1, n_features)
logprior += 0.5 * (a - 1) * wishart_log_det(a, B, detB, n_features)
logprior += 0.5 * a * np.trace(B)
return logprior
##############################################################################
# Variational bound on the log likelihood of each class
##############################################################################
def _sym_quad_form(x, mu, A):
"""helper function to calculate symmetric quadratic form x.T * A * x"""
q = (cdist(x, mu[np.newaxis], "mahalanobis", VI=A) ** 2).reshape(-1)
return q
def _bound_state_log_lik(X, initial_bound, precs, means, covariance_type):
"""Update the bound with likelihood terms, for standard covariance types"""
n_components, n_features = means.shape
n_samples = X.shape[0]
bound = np.empty((n_samples, n_components))
bound[:] = initial_bound
if covariance_type in ['diag', 'spherical']:
for k in range(n_components):
d = X - means[k]
bound[:, k] -= 0.5 * np.sum(d * d * precs[k], axis=1)
elif covariance_type == 'tied':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs)
elif covariance_type == 'full':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs[k])
return bound
class _DPGMMBase(_GMMBase):
"""Variational Inference for the Infinite Gaussian Mixture Model.
DPGMM stands for Dirichlet Process Gaussian Mixture Model, and it
is an infinite mixture model with the Dirichlet Process as a prior
distribution on the number of clusters. In practice the
approximate inference algorithm uses a truncated distribution with
a fixed maximum number of components, but almost always the number
of components actually used depends on the data.
Stick-breaking Representation of a Gaussian mixture model
probability distribution. This class allows for easy and efficient
inference of an approximate posterior distribution over the
parameters of a Gaussian mixture model with a variable number of
components (smaller than the truncation parameter n_components).
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Read more in the :ref:`User Guide <dpgmm>`.
Parameters
----------
n_components : int, default 1
Number of mixture components.
covariance_type : string, default 'diag'
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
alpha : float, default 1
Real number representing the concentration parameter of
the dirichlet process. Intuitively, the Dirichlet Process
is as likely to start a new cluster for a point as it is
to add that point to a cluster with alpha elements. A
higher alpha means more clusters, as the expected number
of clusters is ``alpha*log(N)``.
tol : float, default 1e-3
Convergence threshold.
n_iter : int, default 10
Maximum number of iterations to perform before convergence.
params : string, default 'wmc'
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars.
init_params : string, default 'wmc'
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default 0
Controls output verbosity.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_components : int
Number of mixture components.
weights_ : array, shape (`n_components`,)
Mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
precs_ : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
VBGMM : Finite Gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0, min_covar=None,
n_iter=10, params='wmc', init_params='wmc'):
self.alpha = alpha
super(_DPGMMBase, self).__init__(n_components, covariance_type,
random_state=random_state,
tol=tol, min_covar=min_covar,
n_iter=n_iter, params=params,
init_params=init_params,
verbose=verbose)
def _get_precisions(self):
"""Return precisions as a full matrix."""
if self.covariance_type == 'full':
return self.precs_
elif self.covariance_type in ['diag', 'spherical']:
return [np.diag(cov) for cov in self.precs_]
elif self.covariance_type == 'tied':
return [self.precs_] * self.n_components
def _get_covars(self):
return [pinvh(c) for c in self._get_precisions()]
def _set_covars(self, covars):
raise NotImplementedError("""The variational algorithm does
not support setting the covariance parameters.""")
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'gamma_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
z = np.zeros((X.shape[0], self.n_components))
sd = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dgamma1 = digamma(self.gamma_.T[1]) - sd
dgamma2 = np.zeros(self.n_components)
dgamma2[0] = digamma(self.gamma_[0, 2]) - digamma(self.gamma_[0, 1] +
self.gamma_[0, 2])
for j in range(1, self.n_components):
dgamma2[j] = dgamma2[j - 1] + digamma(self.gamma_[j - 1, 2])
dgamma2[j] -= sd[j - 1]
dgamma = dgamma1 + dgamma2
# Free memory and developers cognitive load:
del dgamma1, dgamma2, sd
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dgamma
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
"""Update the concentration parameters for each cluster"""
sz = np.sum(z, axis=0)
self.gamma_.T[1] = 1. + sz
self.gamma_.T[2].fill(0)
for i in range(self.n_components - 2, -1, -1):
self.gamma_[i, 2] = self.gamma_[i + 1, 2] + sz[i]
self.gamma_.T[2] += self.alpha
def _update_means(self, X, z):
"""Update the variational distributions for the means"""
n_features = X.shape[1]
for k in range(self.n_components):
if self.covariance_type in ['spherical', 'diag']:
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num *= self.precs_[k]
den = 1. + self.precs_[k] * np.sum(z.T[k])
self.means_[k] = num / den
elif self.covariance_type in ['tied', 'full']:
if self.covariance_type == 'tied':
cov = self.precs_
else:
cov = self.precs_[k]
den = np.identity(n_features) + cov * np.sum(z.T[k])
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num = np.dot(cov, num)
self.means_[k] = linalg.lstsq(den, num)[0]
def _update_precisions(self, X, z):
"""Update the variational distributions for the precisions"""
n_features = X.shape[1]
if self.covariance_type == 'spherical':
self.dof_ = 0.5 * n_features * np.sum(z, axis=0)
for k in range(self.n_components):
# could be more memory efficient ?
sq_diff = np.sum((X - self.means_[k]) ** 2, axis=1)
self.scale_[k] = 1.
self.scale_[k] += 0.5 * np.sum(z.T[k] * (sq_diff + n_features))
self.bound_prec_[k] = (
0.5 * n_features * (
digamma(self.dof_[k]) - np.log(self.scale_[k])))
self.precs_ = np.tile(self.dof_ / self.scale_, [n_features, 1]).T
elif self.covariance_type == 'diag':
for k in range(self.n_components):
self.dof_[k].fill(1. + 0.5 * np.sum(z.T[k], axis=0))
sq_diff = (X - self.means_[k]) ** 2 # see comment above
self.scale_[k] = np.ones(n_features) + 0.5 * np.dot(
z.T[k], (sq_diff + 1))
self.precs_[k] = self.dof_[k] / self.scale_[k]
self.bound_prec_[k] = 0.5 * np.sum(digamma(self.dof_[k])
- np.log(self.scale_[k]))
self.bound_prec_[k] -= 0.5 * np.sum(self.precs_[k])
elif self.covariance_type == 'tied':
self.dof_ = 2 + X.shape[0] + n_features
self.scale_ = (X.shape[0] + 1) * np.identity(n_features)
for k in range(self.n_components):
diff = X - self.means_[k]
self.scale_ += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_ = pinvh(self.scale_)
self.precs_ = self.dof_ * self.scale_
self.det_scale_ = linalg.det(self.scale_)
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
sum_resp = np.sum(z.T[k])
self.dof_[k] = 2 + sum_resp + n_features
self.scale_[k] = (sum_resp + 1) * np.identity(n_features)
diff = X - self.means_[k]
self.scale_[k] += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_[k] = pinvh(self.scale_[k])
self.precs_[k] = self.dof_[k] * self.scale_[k]
self.det_scale_[k] = linalg.det(self.scale_[k])
self.bound_prec_[k] = 0.5 * wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= 0.5 * self.dof_[k] * np.trace(
self.scale_[k])
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose > 0:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_.T[1])
print("covariance_type:", self.covariance_type)
def _do_mstep(self, X, z, params):
"""Maximize the variational lower bound
Update each of the parameters to maximize the lower bound."""
self._monitor(X, z, "z")
self._update_concentration(z)
self._monitor(X, z, "gamma")
if 'm' in params:
self._update_means(X, z)
self._monitor(X, z, "mu")
if 'c' in params:
self._update_precisions(X, z)
self._monitor(X, z, "a and b", end=True)
def _initialize_gamma(self):
"Initializes the concentration parameters"
self.gamma_ = self.alpha * np.ones((self.n_components, 3))
def _bound_concentration(self):
"""The variational lower bound for the concentration parameter."""
logprior = gammaln(self.alpha) * self.n_components
logprior += np.sum((self.alpha - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior += np.sum(- gammaln(self.gamma_.T[1] + self.gamma_.T[2]))
logprior += np.sum(gammaln(self.gamma_.T[1]) +
gammaln(self.gamma_.T[2]))
logprior -= np.sum((self.gamma_.T[1] - 1) * (
digamma(self.gamma_.T[1]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior -= np.sum((self.gamma_.T[2] - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
return logprior
def _bound_means(self):
"The variational lower bound for the mean parameters"
logprior = 0.
logprior -= 0.5 * squared_norm(self.means_)
logprior -= 0.5 * self.means_.shape[1] * self.n_components
return logprior
def _bound_precisions(self):
"""Returns the bound term related to precisions"""
logprior = 0.
if self.covariance_type == 'spherical':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_
- self.precs_[:, 0])
elif self.covariance_type == 'diag':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_ - self.precs_)
elif self.covariance_type == 'tied':
logprior += _bound_wishart(self.dof_, self.scale_, self.det_scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
logprior += _bound_wishart(self.dof_[k],
self.scale_[k],
self.det_scale_[k])
return logprior
def _bound_proportions(self, z):
"""Returns the bound term related to proportions"""
dg12 = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dg1 = digamma(self.gamma_.T[1]) - dg12
dg2 = digamma(self.gamma_.T[2]) - dg12
cz = stable_cumsum(z[:, ::-1], axis=-1)[:, -2::-1]
logprior = np.sum(cz * dg2[:-1]) + np.sum(z * dg1)
del cz # Save memory
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _logprior(self, z):
logprior = self._bound_concentration()
logprior += self._bound_means()
logprior += self._bound_precisions()
logprior += self._bound_proportions(z)
return logprior
def lower_bound(self, X, z):
"""returns a lower bound on model evidence based on X and membership"""
check_is_fitted(self, 'means_')
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
c = np.sum(z * _bound_state_log_lik(X, self._initial_bound +
self.bound_prec_, self.precs_,
self.means_, self.covariance_type))
return c + self._logprior(z)
def _set_weights(self):
for i in xrange(self.n_components):
self.weights_[i] = self.gamma_[i, 1] / (self.gamma_[i, 1]
+ self.gamma_[i, 2])
self.weights_ /= np.sum(self.weights_)
def _fit(self, X, y=None):
"""Estimate model parameters with the variational
algorithm.
For a full derivation and description of the algorithm see
doc/modules/dp-derivation.rst
or
http://scikit-learn.org/stable/modules/dp-derivation.html
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating
the object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
self.random_state_ = check_random_state(self.random_state)
# initialization step
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
z = np.ones((n_samples, self.n_components))
z /= self.n_components
self._initial_bound = - 0.5 * n_features * np.log(2 * np.pi)
self._initial_bound -= np.log(2 * np.pi * np.e)
if (self.init_params != '') or not hasattr(self, 'gamma_'):
self._initialize_gamma()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state_).fit(X).cluster_centers_[::-1]
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components, self.n_components)
if 'c' in self.init_params or not hasattr(self, 'precs_'):
if self.covariance_type == 'spherical':
self.dof_ = np.ones(self.n_components)
self.scale_ = np.ones(self.n_components)
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * n_features * (
digamma(self.dof_) - np.log(self.scale_))
elif self.covariance_type == 'diag':
self.dof_ = 1 + 0.5 * n_features
self.dof_ *= np.ones((self.n_components, n_features))
self.scale_ = np.ones((self.n_components, n_features))
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * (np.sum(digamma(self.dof_) -
np.log(self.scale_), 1))
self.bound_prec_ -= 0.5 * np.sum(self.precs_, 1)
elif self.covariance_type == 'tied':
self.dof_ = 1.
self.scale_ = np.identity(n_features)
self.precs_ = np.identity(n_features)
self.det_scale_ = 1.
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
self.dof_ = (1 + self.n_components + n_samples)
self.dof_ *= np.ones(self.n_components)
self.scale_ = [2 * np.identity(n_features)
for _ in range(self.n_components)]
self.precs_ = [np.identity(n_features)
for _ in range(self.n_components)]
self.det_scale_ = np.ones(self.n_components)
self.bound_prec_ = np.zeros(self.n_components)
for k in range(self.n_components):
self.bound_prec_[k] = wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= (self.dof_[k] *
np.trace(self.scale_[k]))
self.bound_prec_ *= 0.5
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
prev_log_likelihood = current_log_likelihood
# Expectation step
curr_logprob, z = self.score_samples(X)
current_log_likelihood = (
curr_logprob.mean() + self._logprior(z) / n_samples)
# Check for convergence.
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if change < self.tol:
self.converged_ = True
break
# Maximization step
self._do_mstep(X, z, self.params)
if self.n_iter == 0:
# Need to make sure that there is a z value to output
# Output zeros because it was just a quick initialization
z = np.zeros((X.shape[0], self.n_components))
self._set_weights()
return z
@deprecated("The `DPGMM` class is not working correctly and it's better "
"to use `sklearn.mixture.BayesianGaussianMixture` class with "
"parameter `weight_concentration_prior_type='dirichlet_process'` "
"instead. DPGMM is deprecated in 0.18 and will be "
"removed in 0.20.")
class DPGMM(_DPGMMBase):
"""Dirichlet Process Gaussian Mixture Models
.. deprecated:: 0.18
This class will be removed in 0.20.
Use :class:`sklearn.mixture.BayesianGaussianMixture` with
parameter ``weight_concentration_prior_type='dirichlet_process'``
instead.
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0, min_covar=None,
n_iter=10, params='wmc', init_params='wmc'):
super(DPGMM, self).__init__(
n_components=n_components, covariance_type=covariance_type,
alpha=alpha, random_state=random_state, tol=tol, verbose=verbose,
min_covar=min_covar, n_iter=n_iter, params=params,
init_params=init_params)
@deprecated("The `VBGMM` class is not working correctly and it's better "
"to use `sklearn.mixture.BayesianGaussianMixture` class with "
"parameter `weight_concentration_prior_type="
"'dirichlet_distribution'` instead. "
"VBGMM is deprecated in 0.18 and will be removed in 0.20.")
class VBGMM(_DPGMMBase):
"""Variational Inference for the Gaussian Mixture Model
.. deprecated:: 0.18
This class will be removed in 0.20.
Use :class:`sklearn.mixture.BayesianGaussianMixture` with parameter
``weight_concentration_prior_type='dirichlet_distribution'`` instead.
Variational inference for a Gaussian mixture model probability
distribution. This class allows for easy and efficient inference
of an approximate posterior distribution over the parameters of a
Gaussian mixture model with a fixed number of components.
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Read more in the :ref:`User Guide <vbgmm>`.
Parameters
----------
n_components : int, default 1
Number of mixture components.
covariance_type : string, default 'diag'
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
alpha : float, default 1
Real number representing the concentration parameter of
the dirichlet distribution. Intuitively, the higher the
value of alpha the more likely the variational mixture of
Gaussians model will use all components it can.
tol : float, default 1e-3
Convergence threshold.
n_iter : int, default 10
Maximum number of iterations to perform before convergence.
params : string, default 'wmc'
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars.
init_params : string, default 'wmc'
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default 0
Controls output verbosity.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussians.
n_components : int (read-only)
Number of mixture components.
weights_ : array, shape (`n_components`,)
Mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
precs_ : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False
otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
DPGMM : Infinite Gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0,
min_covar=None, n_iter=10, params='wmc', init_params='wmc'):
super(VBGMM, self).__init__(
n_components, covariance_type, random_state=random_state,
tol=tol, verbose=verbose, min_covar=min_covar,
n_iter=n_iter, params=params, init_params=init_params)
self.alpha = alpha
def _fit(self, X, y=None):
"""Estimate model parameters with the variational algorithm.
For a full derivation and description of the algorithm see
doc/modules/dp-derivation.rst
or
http://scikit-learn.org/stable/modules/dp-derivation.html
A initialization step is performed before entering the EM
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating
the object. Likewise, if you just would like to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
self.alpha_ = float(self.alpha) / self.n_components
return super(VBGMM, self)._fit(X, y)
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'gamma_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
dg = digamma(self.gamma_) - digamma(np.sum(self.gamma_))
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dg
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
for i in range(self.n_components):
self.gamma_[i] = self.alpha_ + np.sum(z.T[i])
def _initialize_gamma(self):
self.gamma_ = self.alpha_ * np.ones(self.n_components)
def _bound_proportions(self, z):
logprior = 0.
dg = digamma(self.gamma_)
dg -= digamma(np.sum(self.gamma_))
logprior += np.sum(dg.reshape((-1, 1)) * z.T)
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _bound_concentration(self):
logprior = 0.
logprior = gammaln(np.sum(self.gamma_)) - gammaln(self.n_components
* self.alpha_)
logprior -= np.sum(gammaln(self.gamma_) - gammaln(self.alpha_))
sg = digamma(np.sum(self.gamma_))
logprior += np.sum((self.gamma_ - self.alpha_)
* (digamma(self.gamma_) - sg))
return logprior
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose > 0:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_)
print("covariance_type:", self.covariance_type)
def _set_weights(self):
self.weights_[:] = self.gamma_
self.weights_ /= np.sum(self.weights_)
| bsd-3-clause |
petewarden/tensorflow | tensorflow/python/keras/engine/data_adapter.py | 1 | 57975 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adapter module that convert different input data objects into tf.dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import functools
import itertools
import math
import random
import numpy as np
import six
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.experimental.ops import distribute_options
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import input_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import smart_cond
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import dataset_creator
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
keras_data_adapter_gauge = monitoring.BoolGauge(
"/tensorflow/api/keras/data_adapters", "keras data adapter usage", "method")
try:
from scipy import sparse as scipy_sparse # pylint: disable=g-import-not-at-top
except ImportError:
scipy_sparse = None
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
pd = None
@six.add_metaclass(abc.ABCMeta)
class DataAdapter(object):
"""Base class for input data adapter.
In TF 2.0, tf.data is the preferred API for user to feed in data. In order
to simplify the training code path, all the input data object will be
converted to `tf.data.Dataset` if possible.
Note that since this class is mainly targeted for TF 2.0, it might have a lot
of assumptions under the hood, eg eager context by default, distribution
strategy, etc. In the meantime, some legacy feature support might be dropped,
eg, Iterator from dataset API in v1, etc.
The sample usage of this class is like:
```
x = tf.data.Dataset.range(100)
adapter_cls = [NumpyArrayDataAdapter, ..., DatasetAdapter]
applicable_adapters = [cls for cls in adapter_cls if cls.can_handle(x)]
if len(applicable_adapters) != 1:
raise ValueError("Expect only one adapter class to handle the input")
dataset = applicable_adapters[0](x).get_dataset()
for data in dataset:
# training
```
"""
@staticmethod
def can_handle(x, y=None):
"""Whether the current DataAdapter could handle the input x and y.
Structure wise, x and y can be single object, or list of objects if there
multiple input/output, or dictionary of objects when the intput/output are
named.
Args:
x: input features.
y: target labels. Note that y could be None in the case of prediction.
Returns:
boolean
"""
raise NotImplementedError
@abc.abstractmethod
def __init__(self, x, y=None, **kwargs):
"""Create a DataAdapter based on data inputs.
The caller must make sure to call `can_handle()` first before invoking this
method. Provide unsupported data type will result into unexpected behavior.
Args:
x: input features.
y: target labels. Note that y could be None in the case of prediction.
**kwargs: Other keyword arguments for DataAdapter during the construction
of the tf.dataset.Dataset. For example:
- Numpy data might have `sample_weights` which will be used for
weighting the loss function during training.
- Numpy data might need to have `batch_size` parameter when constructing
the dataset and iterator.
- Certain input might need to be distribution strategy aware. When
`distribution_strategy` is passed, the created dataset need to respect
the strategy.
DataAdapter might choose to ignore any keyword argument if it doesn't
use it, or raise exception if any required argument is not provide.
"""
if not self.can_handle(x, y):
raise ValueError("{} Cannot handle input {}, {}".format(
self.__class__, x, y))
@abc.abstractmethod
def get_dataset(self):
"""Get a dataset instance for the current DataAdapter.
Note that the dataset returned does not repeat for epoch, so caller might
need to create new iterator for the same dataset at the beginning of the
epoch. This behavior might change in future.
Returns:
An tf.dataset.Dataset. Caller might use the dataset in different
context, eg iter(dataset) in eager to get the value directly, or in graph
mode, provide the iterator tensor to Keras model function.
"""
raise NotImplementedError
@abc.abstractmethod
def get_size(self):
"""Return the size (number of batches) for the dataset created.
For certain type of the data input, the number of batches is known, eg for
Numpy data, the size is same as (number_of_element / batch_size). Whereas
for dataset or python generator, the size is unknown since it may or may not
have a end state.
Returns:
int, the number of batches for the dataset, or None if it is unknown. The
caller could use this to control the loop of training, show progress bar,
or handle unexpected StopIteration error.
"""
raise NotImplementedError
@abc.abstractmethod
def batch_size(self):
"""Return the batch size of the dataset created.
For certain type of the data input, the batch size is known, and even
required, like numpy array. Where as for dataset, the batch is unknown
unless we take a peek.
Returns:
int, the batch size of the dataset, or None if it is unknown.
"""
raise NotImplementedError
def representative_batch_size(self):
"""Return a representative size for batches in the dataset.
This is not guaranteed to be the batch size for all batches in the
dataset. It just needs to be a rough approximation for batch sizes in
the dataset.
Returns:
int, a representative size for batches found in the dataset,
or None if it is unknown.
"""
return self.batch_size()
@abc.abstractmethod
def has_partial_batch(self):
"""Whether the dataset has partial batch at the end."""
raise NotImplementedError
@abc.abstractmethod
def partial_batch_size(self):
"""The size of the final partial batch for dataset.
Will return None if has_partial_batch is False or batch_size is None.
"""
raise NotImplementedError
@abc.abstractmethod
def should_recreate_iterator(self):
"""Returns whether a new iterator should be created every epoch."""
raise NotImplementedError
def get_samples(self):
"""Returns number of samples in the data, or `None`."""
if not self.get_size() or not self.batch_size():
return None
total_sample = self.get_size() * self.batch_size()
if self.has_partial_batch():
total_sample -= (self.batch_size() - self.partial_batch_size())
return total_sample
def on_epoch_end(self):
"""A hook called after each epoch."""
pass
class TensorLikeDataAdapter(DataAdapter):
"""Adapter that handles Tensor-like objects, e.g. EagerTensor and NumPy."""
@staticmethod
def can_handle(x, y=None):
# TODO(kaftan): Check performance implications of using a flatten
# here for other types of inputs.
flat_inputs = nest.flatten(x)
if y is not None:
flat_inputs += nest.flatten(y)
tensor_types = (ops.Tensor, np.ndarray)
if pd:
tensor_types = (ops.Tensor, np.ndarray, pd.Series, pd.DataFrame)
def _is_tensor(v):
if isinstance(v, tensor_types):
return True
return False
return all(_is_tensor(v) for v in flat_inputs)
def __init__(self,
x,
y=None,
sample_weights=None,
sample_weight_modes=None,
batch_size=None,
epochs=1,
steps=None,
shuffle=False,
**kwargs):
super(TensorLikeDataAdapter, self).__init__(x, y, **kwargs)
x, y, sample_weights = _process_tensorlike((x, y, sample_weights))
sample_weight_modes = broadcast_sample_weight_modes(
sample_weights, sample_weight_modes)
# If sample_weights are not specified for an output use 1.0 as weights.
(sample_weights, _, _) = training_utils.handle_partial_sample_weights(
y, sample_weights, sample_weight_modes, check_all_flat=True)
inputs = pack_x_y_sample_weight(x, y, sample_weights)
num_samples = set(int(i.shape[0]) for i in nest.flatten(inputs)).pop()
_check_data_cardinality(inputs)
# If batch_size is not passed but steps is, calculate from the input data.
# Default to 32 for backwards compat.
if not batch_size:
batch_size = int(math.ceil(num_samples / steps)) if steps else 32
self._size = int(math.ceil(num_samples / batch_size))
self._batch_size = batch_size
num_full_batches = int(num_samples // batch_size)
self._partial_batch_size = num_samples % batch_size
if isinstance(shuffle, str):
shuffle = shuffle.lower()
self._shuffle = shuffle
# Vectorized version of shuffle.
# This is a performance improvement over using `from_tensor_slices`.
# The indices of the data are shuffled and batched, and these indices
# are then zipped with the data and used to extract a batch of the data
# at each step. The performance improvements here come from:
# 1. vectorized batch using gather
# 2. parallelized map
# 3. pipelined permutation generation
# 4. optimized permutation batching
# 5. disabled static optimizations
indices_dataset = dataset_ops.DatasetV2.range(1)
if shuffle != "batch":
indices_dataset = indices_dataset.repeat(epochs)
def permutation(_):
# It turns out to be more performant to make a new set of indices rather
# than reusing the same range Tensor. (presumably because of buffer
# forwarding.)
indices = math_ops.range(num_samples, dtype=dtypes.int64)
if shuffle and shuffle != "batch":
indices = random_ops.random_shuffle(indices)
return indices
# We prefetch a single element. Computing large permutations can take quite
# a while so we don't want to wait for prefetching over an epoch boundary to
# trigger the next permutation. On the other hand, too many simultaneous
# shuffles can contend on a hardware level and degrade all performance.
indices_dataset = indices_dataset.map(permutation).prefetch(1)
def slice_batch_indices(indices):
"""Convert a Tensor of indices into a dataset of batched indices.
This step can be accomplished in several ways. The most natural is to
slice the Tensor in a Dataset map. (With a condition on the upper index to
handle the partial batch.) However it turns out that coercing the Tensor
into a shape which is divisible by the batch size (and handling the last
partial batch separately) allows for a much more favorable memory access
pattern and improved performance.
Args:
indices: Tensor which determines the data order for an entire epoch.
Returns:
A Dataset of batched indices.
"""
num_in_full_batch = num_full_batches * batch_size
first_k_indices = array_ops.slice(indices, [0], [num_in_full_batch])
first_k_indices = array_ops.reshape(
first_k_indices, [num_full_batches, batch_size])
flat_dataset = dataset_ops.DatasetV2.from_tensor_slices(first_k_indices)
if self._partial_batch_size:
index_remainder = dataset_ops.DatasetV2.from_tensors(array_ops.slice(
indices, [num_in_full_batch], [self._partial_batch_size]))
flat_dataset = flat_dataset.concatenate(index_remainder)
if shuffle == "batch":
# 1024 is a magic constant that has not been properly evaluated
flat_dataset = flat_dataset.shuffle(1024).repeat(epochs)
return flat_dataset
indices_dataset = indices_dataset.flat_map(slice_batch_indices)
dataset = self.slice_inputs(indices_dataset, inputs)
if shuffle == "batch":
def shuffle_batch(*batch):
return nest.map_structure(random_ops.random_shuffle, batch)
dataset = dataset.map(shuffle_batch)
self._dataset = dataset
def slice_inputs(self, indices_dataset, inputs):
"""Slice inputs into a Dataset of batches.
Given a Dataset of batch indices and the unsliced inputs,
this step slices the inputs in a parallelized fashion
and produces a dataset of input batches.
Args:
indices_dataset: A Dataset of batched indices
inputs: A python data structure that contains the inputs, targets,
and possibly sample weights.
Returns:
A Dataset of input batches matching the batch indices.
"""
dataset = dataset_ops.DatasetV2.zip((
indices_dataset,
dataset_ops.DatasetV2.from_tensors(inputs).repeat()
))
def grab_batch(i, data):
return nest.map_structure(lambda d: array_ops.gather(d, i, axis=0), data)
dataset = dataset.map(
grab_batch, num_parallel_calls=dataset_ops.AUTOTUNE)
# Default optimizations are disabled to avoid the overhead of (unnecessary)
# input pipeline graph serialization and deserialization
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
if self._shuffle:
# See b/141490660 for more details.
options.experimental_external_state_policy = (
distribute_options.ExternalStatePolicy.IGNORE)
dataset = dataset.with_options(options)
return dataset
def get_dataset(self):
return self._dataset
def get_size(self):
return self._size
def batch_size(self):
return self._batch_size
def has_partial_batch(self):
return self._partial_batch_size > 0
def partial_batch_size(self):
return self._partial_batch_size or None
def should_recreate_iterator(self):
# An infinite dataset is always created here.
return False
class GenericArrayLikeDataAdapter(TensorLikeDataAdapter):
"""Adapter that handles array-like data without forcing it into memory.
This adapter handles array-like datasets that may be too big to fully
fit into memory.
Specifically, this adapter handles any Python class which implements:
`__get_item__`, `__len__`, `shape`, and `dtype` with the same meanings
as Numpy, but it ignores any case where all the inputs are Tensors or Numpy
arrays (because that case is handled by the base TensorLikeDataAdapter).
It ignores scipy sparse matrices and Composite Tensors because those are
handled by the CompositeTensorDataAdapter.
It also does not handle lists/tuples of scalars, because those are handled
by the ListsOfScalarsDataAdapter.
"""
@staticmethod
def can_handle(x, y=None):
flat_inputs = nest.flatten(x)
if y is not None:
flat_inputs += nest.flatten(y)
def _is_array_like(v):
"""Return True if v is a Tensor, array, or is array-like."""
return (
hasattr(v, "__getitem__") and
hasattr(v, "shape") and
hasattr(v, "dtype") and
hasattr(v, "__len__")
)
if (not TensorLikeDataAdapter.can_handle(x, y) and
not CompositeTensorDataAdapter.can_handle(x, y)):
return all(_is_array_like(v) for v in flat_inputs)
else:
return False
def __init__(self, *args, **kwargs):
logging.warn(
"Keras is training/fitting/evaluating on array-like data. Keras may "
"not be optimized for this format, so if your input data format is "
"supported by TensorFlow I/O (https://github.com/tensorflow/io) we "
"recommend using that to load a Dataset instead.")
super(GenericArrayLikeDataAdapter, self).__init__(*args, **kwargs)
def slice_inputs(self, indices_dataset, inputs):
"""Slice inputs into a Dataset of batches.
Given a Dataset of batch indices and the unsliced inputs,
this step slices the inputs in a parallelized fashion
and produces a dataset of input batches.
Args:
indices_dataset: A Dataset of batched indices
inputs: A python data structure that contains the inputs, targets,
and possibly sample weights.
Returns:
A Dataset of input batches matching the batch indices.
"""
flat_inputs = nest.flatten(inputs)
def dynamic_shape_like(t):
shape = list(t.shape)
shape[0] = None
return tuple(shape)
flat_dtypes = [inp.dtype for inp in flat_inputs]
contiguous = True
if self._shuffle and self._shuffle != "batch":
contiguous = False
def grab_batch(indices):
"""Grab a batch of data from the inputs."""
# This uses a py_function to avoid converting the array-like
# into a Tensor before slicing it, because converting the array-like
# to a Tensor may force it into memory..
def py_method(ind):
def slice_array(data):
return training_utils.slice_arrays(data, ind.numpy(),
contiguous=contiguous)
return [slice_array(inp) for inp in flat_inputs]
flat_out = script_ops.eager_py_func(py_method, [indices], flat_dtypes)
for v, original_inp in zip(flat_out, flat_inputs):
v.set_shape(dynamic_shape_like(original_inp))
return nest.pack_sequence_as(inputs, flat_out)
dataset = indices_dataset.map(
grab_batch, num_parallel_calls=dataset_ops.AUTOTUNE)
return dataset
class DatasetCreatorAdapter(DataAdapter):
"""Adapter that handles dataset functions."""
def __init__(self, x, *args, **kwargs):
super(DatasetCreatorAdapter, self).__init__(x, *args, **kwargs)
if not isinstance(x, dataset_creator.DatasetCreator):
raise TypeError("The input of a `DatasetCreatorAdapter` should be a "
"`DatasetCreator` but it received type {}.".format(
type(x)))
self.dataset_creator = x
self.strategy = kwargs.get("distribution_strategy", None)
@staticmethod
def can_handle(x, y=None):
if isinstance(x, dataset_creator.DatasetCreator):
assert y is None
return True
def should_recreate_iterator(self):
# We expect users to shuffle the dataset in their `dataset_fn` supplied to
# `DatasetCreator`. Since that is a buffered shuffle, we intend to not reset
# the dataset so the batches that are not shuffled can still be pulled.
return False
def get_size(self):
return None # To be inferred by `DataHandler`.
def get_dataset(self):
return self.strategy.distribute_datasets_from_function(self.dataset_creator)
def batch_size(self):
raise NotImplementedError()
def has_partial_batch(self):
raise NotImplementedError()
def partial_batch_size(self):
raise NotImplementedError()
class CompositeTensorDataAdapter(DataAdapter):
"""Adapter that handles composite tensor."""
@staticmethod
def can_handle(x, y=None):
flat_inputs = nest.flatten(x)
if y is not None:
flat_inputs += nest.flatten(y)
def _is_composite(v):
# Dataset/iterator/DistributedDataset inherits from CompositeTensor but
# should be handled by DatasetAdapter and GeneratorAdapter.
if (tf_utils.is_extension_type(v) and
not isinstance(v,
(dataset_ops.DatasetV2, iterator_ops.IteratorBase)) and
not _is_distributed_dataset(v)):
return True
# Support Scipy sparse tensors if scipy is installed
if scipy_sparse is not None and scipy_sparse.issparse(v):
return True
return False
def _is_tensor_or_composite(v):
if isinstance(v, (ops.Tensor, np.ndarray)):
return True
return _is_composite(v)
return (any(_is_composite(v) for v in flat_inputs) and
all(_is_tensor_or_composite(v) for v in flat_inputs))
def __init__(self,
x,
y=None,
sample_weights=None,
sample_weight_modes=None,
batch_size=None,
steps=None,
shuffle=False,
**kwargs):
super(CompositeTensorDataAdapter, self).__init__(x, y, **kwargs)
x, y, sample_weights = _process_tensorlike((x, y, sample_weights))
sample_weight_modes = broadcast_sample_weight_modes(
sample_weights, sample_weight_modes)
# If sample_weights are not specified for an output use 1.0 as weights.
(sample_weights, _, _) = training_utils.handle_partial_sample_weights(
y, sample_weights, sample_weight_modes, check_all_flat=True)
inputs = pack_x_y_sample_weight(x, y, sample_weights)
dataset = dataset_ops.DatasetV2.from_tensor_slices(inputs)
num_samples = int(nest.flatten(x)[0].shape[0])
if shuffle:
dataset = dataset.shuffle(num_samples)
# If batch_size is not passed but steps is, calculate from the input data.
# Default to 32 for backwards compat.
if not batch_size:
batch_size = int(math.ceil(num_samples / steps)) if steps else 32
dataset = dataset.batch(batch_size)
self._size = int(math.ceil(num_samples / batch_size))
self._batch_size = batch_size
self._has_partial_batch = (self._size != (num_samples // batch_size))
self._partial_batch_size = None
if self._has_partial_batch:
self._partial_batch_size = (
num_samples - (self._size - 1) * self._batch_size)
self._dataset = dataset
def get_dataset(self):
return self._dataset
def get_size(self):
return self._size
def batch_size(self):
return self._batch_size
def has_partial_batch(self):
return self._has_partial_batch
def partial_batch_size(self):
return self._partial_batch_size
def should_recreate_iterator(self):
return True
class ListsOfScalarsDataAdapter(DataAdapter):
"""Adapter that handles lists of scalars and lists of lists of scalars."""
@staticmethod
def can_handle(x, y=None):
handles_x = ListsOfScalarsDataAdapter._is_list_of_scalars(x)
handles_y = True
if y is not None:
handles_y = ListsOfScalarsDataAdapter._is_list_of_scalars(y)
return handles_x and handles_y
@staticmethod
def _is_list_of_scalars(inp):
if isinstance(inp, (float, int, str, bytes, bytearray)):
return True
if isinstance(inp, (list, tuple)) and inp:
return ListsOfScalarsDataAdapter._is_list_of_scalars(inp[0])
return False
def __init__(self,
x,
y=None,
sample_weights=None,
sample_weight_modes=None,
batch_size=None,
shuffle=False,
**kwargs):
super(ListsOfScalarsDataAdapter, self).__init__(x, y, **kwargs)
x = np.asarray(x)
if y is not None:
y = np.asarray(y)
if sample_weights is not None:
sample_weights = np.asarray(sample_weights)
sample_weight_modes = broadcast_sample_weight_modes(
sample_weights, sample_weight_modes)
self._internal_adapter = TensorLikeDataAdapter(
x,
y=y,
sample_weights=sample_weights,
sample_weight_modes=sample_weight_modes,
batch_size=batch_size,
shuffle=shuffle,
**kwargs)
def get_dataset(self):
return self._internal_adapter.get_dataset()
def get_size(self):
return self._internal_adapter.get_size()
def batch_size(self):
return self._internal_adapter.batch_size()
def has_partial_batch(self):
return self._internal_adapter.has_partial_batch()
def partial_batch_size(self):
return self._internal_adapter.partial_batch_size()
def should_recreate_iterator(self):
return True
class DatasetAdapter(DataAdapter):
"""Adapter that handles `tf.data.Dataset`."""
@staticmethod
def can_handle(x, y=None):
return (isinstance(x, (dataset_ops.DatasetV1, dataset_ops.DatasetV2)) or
_is_distributed_dataset(x))
def __init__(self,
x,
y=None,
sample_weights=None,
steps=None,
**kwargs):
super(DatasetAdapter, self).__init__(x, y, **kwargs)
# Note that the dataset instance is immutable, its fine to reuse the user
# provided dataset.
self._dataset = x
# The user-provided steps.
self._user_steps = steps
self._validate_args(y, sample_weights, steps)
def get_dataset(self):
return self._dataset
def get_size(self):
return # Inferred in `DataHandler`.
def batch_size(self):
return None
def has_partial_batch(self):
return False
def partial_batch_size(self):
return None
def should_recreate_iterator(self):
# Since DistributedDatasets have no cardinality, the user must provide
# all steps that need to be run, calling `.repeat()` as needed.
if _is_distributed_dataset(self._dataset):
return False
# If user doesn't supply `steps`, or if they supply `steps` that
# exactly equals the size of the `Dataset`, create a new iterator
# each epoch.
return (self._user_steps is None or
cardinality.cardinality(self._dataset).numpy() == self._user_steps)
def _validate_args(self, y, sample_weights, steps):
"""Validates `__init__` arguments."""
# Arguments that shouldn't be passed.
if not is_none_or_empty(y):
raise ValueError("`y` argument is not supported when using "
"dataset as input.")
if not is_none_or_empty(sample_weights):
raise ValueError("`sample_weight` argument is not supported when using "
"dataset as input.")
if steps is None:
if _is_distributed_dataset(self._dataset):
raise ValueError("When providing a distributed dataset, you must "
"specify the number of steps to run.")
size = cardinality.cardinality(self._dataset).numpy()
if size == cardinality.INFINITE and steps is None:
raise ValueError(
"When providing an infinite dataset, you must specify "
"the number of steps to run (if you did not intend to "
"create an infinite dataset, make sure to not call "
"`repeat()` on the dataset).")
class GeneratorDataAdapter(DataAdapter):
"""Adapter that handles python generators and iterators."""
@staticmethod
def can_handle(x, y=None):
return ((hasattr(x, "__next__") or hasattr(x, "next"))
and hasattr(x, "__iter__")
and not isinstance(x, data_utils.Sequence))
def __init__(self,
x,
y=None,
sample_weights=None,
workers=1,
use_multiprocessing=False,
max_queue_size=10,
model=None,
**kwargs):
# Generators should never shuffle as exhausting the generator in order to
# shuffle the batches is inefficient.
kwargs.pop("shuffle", None)
if not is_none_or_empty(y):
raise ValueError("`y` argument is not supported when using "
"python generator as input.")
if not is_none_or_empty(sample_weights):
raise ValueError("`sample_weight` argument is not supported when using "
"python generator as input.")
super(GeneratorDataAdapter, self).__init__(x, y, **kwargs)
# Since we have to know the dtype of the python generator when we build the
# dataset, we have to look at a batch to infer the structure.
peek, x = self._peek_and_restore(x)
peek = self._standardize_batch(peek)
peek = _process_tensorlike(peek)
# Need to build the Model on concrete input shapes.
if model is not None and not model.built:
concrete_x, _, _ = unpack_x_y_sample_weight(peek)
model.distribute_strategy.run(
lambda x: model(x, training=False), args=(concrete_x,))
self._first_batch_size = int(nest.flatten(peek)[0].shape[0])
def _get_dynamic_shape(t):
shape = t.shape
# Unknown number of dimensions, `as_list` cannot be called.
if shape.rank is None:
return shape
return tensor_shape.TensorShape([None for _ in shape.as_list()])
output_shapes = nest.map_structure(_get_dynamic_shape, peek)
output_types = nest.map_structure(lambda t: t.dtype, peek)
# Note that dataset API takes a callable that creates a generator object,
# rather than generator itself, which is why we define a function here.
generator_fn = self._handle_multiprocessing(x, workers, use_multiprocessing,
max_queue_size)
def wrapped_generator():
for data in generator_fn():
yield self._standardize_batch(data)
dataset = dataset_ops.DatasetV2.from_generator(
wrapped_generator, output_types, output_shapes=output_shapes)
if workers == 1 and not use_multiprocessing:
dataset = dataset.prefetch(1)
self._dataset = dataset
def _standardize_batch(self, data):
"""Standardizes a batch output by a generator."""
# Removes `None`s.
x, y, sample_weight = unpack_x_y_sample_weight(data)
data = pack_x_y_sample_weight(x, y, sample_weight)
data = nest.list_to_tuple(data)
def _convert_dtype(t):
if (isinstance(t, np.ndarray) and issubclass(t.dtype.type, np.floating)):
return np.array(t, dtype=backend.floatx())
return t
data = nest.map_structure(_convert_dtype, data)
return data
@staticmethod
def _peek_and_restore(x):
peek = next(x)
return peek, itertools.chain([peek], x)
def _handle_multiprocessing(self, x, workers, use_multiprocessing,
max_queue_size):
"""Create a callable, possibly including an Enqueuer."""
if workers > 1 or (workers > 0 and use_multiprocessing):
def generator_fn():
enqueuer = data_utils.GeneratorEnqueuer(
x, use_multiprocessing=use_multiprocessing)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
return enqueuer.get()
else:
generator_fn = lambda: x
return generator_fn
def get_dataset(self):
return self._dataset
def get_size(self):
return None
def batch_size(self):
return None
def representative_batch_size(self):
return self._first_batch_size
def has_partial_batch(self):
return False
def partial_batch_size(self):
return
def should_recreate_iterator(self):
return False
class KerasSequenceAdapter(GeneratorDataAdapter):
"""Adapter that handles `keras.utils.Sequence`."""
@staticmethod
def can_handle(x, y=None):
return isinstance(x, data_utils.Sequence)
def __init__(self,
x,
y=None,
sample_weights=None,
shuffle=False,
workers=1,
use_multiprocessing=False,
max_queue_size=10,
model=None,
**kwargs):
if not is_none_or_empty(y):
raise ValueError("`y` argument is not supported when using "
"`keras.utils.Sequence` as input.")
if not is_none_or_empty(sample_weights):
raise ValueError("`sample_weight` argument is not supported when using "
"`keras.utils.Sequence` as input.")
self._size = len(x)
self._shuffle_sequence = shuffle
self._keras_sequence = x
self._enqueuer = None
super(KerasSequenceAdapter, self).__init__(
x,
shuffle=False, # Shuffle is handed in the _make_callable override.
workers=workers,
use_multiprocessing=use_multiprocessing,
max_queue_size=max_queue_size,
model=model,
**kwargs)
@staticmethod
def _peek_and_restore(x):
return x[0], x
def _handle_multiprocessing(self, x, workers, use_multiprocessing,
max_queue_size):
if workers > 1 or (workers > 0 and use_multiprocessing):
def generator_fn():
self._enqueuer = data_utils.OrderedEnqueuer(
x, use_multiprocessing=use_multiprocessing,
shuffle=self._shuffle_sequence)
self._enqueuer.start(workers=workers, max_queue_size=max_queue_size)
return self._enqueuer.get()
else:
def generator_fn():
order = range(len(x))
if self._shuffle_sequence:
# Match the shuffle convention in OrderedEnqueuer.
order = list(order)
random.shuffle(order)
for i in order:
yield x[i]
return generator_fn
def get_size(self):
return self._size
def should_recreate_iterator(self):
return True
def on_epoch_end(self):
if self._enqueuer:
self._enqueuer.stop()
self._keras_sequence.on_epoch_end()
ALL_ADAPTER_CLS = [
ListsOfScalarsDataAdapter, TensorLikeDataAdapter,
GenericArrayLikeDataAdapter, DatasetAdapter, GeneratorDataAdapter,
KerasSequenceAdapter, CompositeTensorDataAdapter, DatasetCreatorAdapter
]
def select_data_adapter(x, y):
"""Selects a data adapter than can handle a given x and y."""
adapter_cls = [cls for cls in ALL_ADAPTER_CLS if cls.can_handle(x, y)]
if not adapter_cls:
# TODO(scottzhu): This should be a less implementation-specific error.
raise ValueError(
"Failed to find data adapter that can handle "
"input: {}, {}".format(
_type_name(x), _type_name(y)))
elif len(adapter_cls) > 1:
raise RuntimeError(
"Data adapters should be mutually exclusive for "
"handling inputs. Found multiple adapters {} to handle "
"input: {}, {}".format(
adapter_cls, _type_name(x), _type_name(y)))
# Instrument the data adapter usage before returning it
keras_data_adapter_gauge.get_cell(adapter_cls[0].__name__).set(True)
return adapter_cls[0]
def _type_name(x):
"""Generates a description of the type of an object."""
if isinstance(x, dict):
key_types = set(_type_name(key) for key in x.keys())
val_types = set(_type_name(key) for key in x.values())
return "({} containing {} keys and {} values)".format(
type(x), key_types, val_types)
if isinstance(x, (list, tuple)):
types = set(_type_name(val) for val in x)
return "({} containing values of types {})".format(
type(x), types)
return str(type(x))
def _process_tensorlike(inputs):
"""Process tensor-like inputs.
This function:
(1) Converts `Numpy` arrays to `Tensor`s.
(2) Converts `Scipy` sparse matrices to `SparseTensor`s.
(2) Converts `list`s to `tuple`s (for `tf.data` support).
Args:
inputs: Structure of `Tensor`s, `NumPy` arrays, or tensor-like.
Returns:
Structure of `Tensor`s or tensor-like.
"""
def _convert_numpy_and_scipy(x):
if isinstance(x, np.ndarray):
dtype = None
if issubclass(x.dtype.type, np.floating):
dtype = backend.floatx()
return ops.convert_to_tensor_v2_with_dispatch(x, dtype=dtype)
elif scipy_sparse and scipy_sparse.issparse(x):
return _scipy_sparse_to_sparse_tensor(x)
return x
inputs = nest.map_structure(_convert_numpy_and_scipy, inputs)
return nest.list_to_tuple(inputs)
def is_none_or_empty(inputs):
# util method to check if the input is a None or a empty list.
# the python "not" check will raise an error like below if the input is a
# numpy array
# "The truth value of an array with more than one element is ambiguous.
# Use a.any() or a.all()"
return inputs is None or not nest.flatten(inputs)
def broadcast_sample_weight_modes(target_structure, sample_weight_modes):
"""Match sample_weight_modes structure with output structure."""
if target_structure is None or not nest.flatten(target_structure):
return sample_weight_modes
if isinstance(sample_weight_modes, str):
if isinstance(target_structure, dict):
return {key: sample_weight_modes for key in target_structure.keys()}
return [sample_weight_modes for _ in target_structure]
if sample_weight_modes:
try:
nest.assert_same_structure(
training_utils.list_to_tuple(target_structure),
training_utils.list_to_tuple(sample_weight_modes))
except (ValueError, TypeError):
target_str = str(nest.map_structure(lambda _: "...", target_structure))
mode_str = str(nest.map_structure(lambda _: "...", sample_weight_modes))
# Attempt to coerce sample_weight_modes to the target structure. This
# implicitly depends on the fact that Model flattens outputs for its
# internal representation.
try:
sample_weight_modes = nest.pack_sequence_as(
target_structure, nest.flatten(sample_weight_modes))
logging.warning(
"sample_weight modes were coerced from\n {}\n to \n {}"
.format(target_str, mode_str))
except (ValueError, TypeError):
raise ValueError(
"Unable to match target structure and sample_weight_modes "
"structure:\n {}\n to \n {}".format(target_str, mode_str))
return sample_weight_modes
class DataHandler(object):
"""Handles iterating over epoch-level `tf.data.Iterator` objects."""
def __init__(self,
x,
y=None,
sample_weight=None,
batch_size=None,
steps_per_epoch=None,
initial_epoch=0,
epochs=1,
shuffle=False,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
model=None,
steps_per_execution=None,
distribute=True):
"""Initializes a `DataHandler`.
Arguments:
x: See `Model.fit`.
y: See `Model.fit`.
sample_weight: See `Model.fit`.
batch_size: See `Model.fit`.
steps_per_epoch: See `Model.fit`.
initial_epoch: See `Model.fit`.
epochs: See `Model.fit`.
shuffle: See `Model.fit`.
class_weight: See `Model.fit`.
max_queue_size: See `Model.fit`.
workers: See `Model.fit`.
use_multiprocessing: See `Model.fit`.
model: The `Model` instance. Needed in order to correctly `build` the
`Model` using generator-like inputs (see `GeneratorDataAdapter`).
steps_per_execution: See `Model.compile`.
distribute: Whether to distribute the `tf.dataset`.
`PreprocessingLayer.adapt` does not support distributed datasets,
`Model` should always set this to `True`.
"""
self._initial_epoch = initial_epoch
self._epochs = epochs
self._insufficient_data = False
self._model = model
# `steps_per_execution_value` is the cached initial value.
# `steps_per_execution` is mutable and may be changed by the DataAdapter
# to handle partial executions.
if steps_per_execution is None:
self._steps_per_execution = 1
self._steps_per_execution_value = 1
else:
self._steps_per_execution = steps_per_execution
self._steps_per_execution_value = steps_per_execution.numpy().item()
adapter_cls = select_data_adapter(x, y)
self._verify_data_adapter_compatibility(adapter_cls)
self._adapter = adapter_cls(
x,
y,
batch_size=batch_size,
steps=steps_per_epoch,
epochs=epochs - initial_epoch,
sample_weights=sample_weight,
shuffle=shuffle,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
distribution_strategy=ds_context.get_strategy(),
model=model)
strategy = ds_context.get_strategy()
self._current_step = 0
self._step_increment = self._steps_per_execution_value - 1
self._insufficient_data = False
self._configure_dataset_and_inferred_steps(strategy, x, steps_per_epoch,
class_weight, distribute)
def _verify_data_adapter_compatibility(self, adapter_cls):
pass
def _configure_dataset_and_inferred_steps(self, strategy, x, steps_per_epoch,
class_weight, distribute):
"""Configure the `_dataset` and `_inferred_steps` attributes."""
del x
dataset = self._adapter.get_dataset()
if class_weight:
dataset = dataset.map(_make_class_weight_map_fn(class_weight))
self._inferred_steps = self._infer_steps(steps_per_epoch, dataset)
# `PreprocessingLayer.adapt` does not currently support distributed
# datasets, so we pass `distribute=False` there.
if distribute and not _is_distributed_dataset(dataset):
dataset = strategy.experimental_distribute_dataset(dataset)
self._dataset = dataset
self._validate_data_handler()
def enumerate_epochs(self):
"""Yields `(epoch, tf.data.Iterator)`."""
with self._truncate_execution_to_epoch():
data_iterator = iter(self._dataset)
for epoch in range(self._initial_epoch, self._epochs):
if self._insufficient_data: # Set by `catch_stop_iteration`.
break
if self._adapter.should_recreate_iterator():
data_iterator = iter(self._dataset)
yield epoch, data_iterator
self._adapter.on_epoch_end()
@contextlib.contextmanager
def _truncate_execution_to_epoch(self):
"""Truncates steps per execution to at most one epoch."""
should_truncate = (
self._inferred_steps is not None and
self._steps_per_execution_value > self._inferred_steps)
original_value = self._steps_per_execution_value
try:
if should_truncate:
self._steps_per_execution.assign(self._inferred_steps)
self._steps_per_execution_value = self._inferred_steps
yield
finally:
if should_truncate:
self._steps_per_execution.assign(original_value)
self._steps_per_execution_value = original_value
def sync(self):
context.async_wait()
@contextlib.contextmanager
def catch_stop_iteration(self):
"""Catches errors when an iterator runs out of data."""
try:
yield
self.sync()
except (StopIteration, errors.OutOfRangeError):
if self._inferred_steps is None:
self._inferred_steps = self._current_step
else:
self._insufficient_data = True
total_epochs = self._epochs - self._initial_epoch
logging.warning(
"Your input ran out of data; interrupting training. "
"Make sure that your dataset or generator can generate at "
"least `steps_per_epoch * epochs` batches (in this case, "
"{} batches). You may need to use the repeat() function "
"when building your dataset.".format(total_epochs *
self._inferred_steps))
def steps(self):
"""Yields steps for the current epoch."""
self._current_step = 0
# `self._inferred_steps` can be changed by `catch_stop_iteration`.
while (self._inferred_steps is None or
self._current_step < self._inferred_steps):
if self._insufficient_data: # Set by `catch_stop_iteration`.
break
can_run_full_execution = (
self._steps_per_execution_value == 1 or
self._inferred_steps is None or
self._inferred_steps - self._current_step >=
self._steps_per_execution_value)
if can_run_full_execution:
self._step_increment = self._steps_per_execution_value - 1
yield self._current_step
self._current_step += self._steps_per_execution_value
else:
# Last partial execution.
steps_remaining = self._inferred_steps - self._current_step
self._steps_per_execution.assign(steps_remaining)
self._step_increment = steps_remaining - 1
yield self._current_step
self._current_step += steps_remaining
self._steps_per_execution.assign(self._steps_per_execution_value)
@property
def step_increment(self):
"""The number to increment the step for `on_batch_end` methods."""
return self._step_increment
@property
def inferred_steps(self):
"""The inferred steps per epoch of the created `Dataset`.
This will be `None` in the case where:
(1) A `Dataset` of unknown cardinality was passed to the `DataHandler`, and
(2) `steps_per_epoch` was not provided, and
(3) The first epoch of iteration has not yet completed.
Returns:
The inferred steps per epoch of the created `Dataset`.
"""
return self._inferred_steps
@property
def should_sync(self):
# Catch OutOfRangeError for Datasets of unknown size.
# This blocks until the batch has finished executing.
# TODO(b/150292341): Allow multiple async steps here.
return self._inferred_steps is None
def _infer_steps(self, steps, dataset):
"""Infers steps_per_epoch needed to loop through a dataset."""
if steps is not None:
return steps
adapter_steps = self._adapter.get_size()
if adapter_steps is not None:
return adapter_steps
size = cardinality.cardinality(dataset)
if size == cardinality.INFINITE and steps is None:
raise ValueError("When passing an infinitely repeating dataset, you "
"must specify how many steps to draw.")
if size >= 0:
return size.numpy().item()
return None
@property
def _samples(self):
return self._adapter.get_samples()
def _validate_data_handler(self):
# TODO(b/152094471): Support this with DistIter.get_next_as_optional.
if self._steps_per_execution_value > 1 and self._inferred_steps is None:
raise ValueError(
"Could not infer the size of the data. With "
"`steps_per_execution > 1`, you must specify the number of steps "
"to run.")
def resolve_logs(self, logs):
return logs
class _ClusterCoordinatorDataHandler(DataHandler):
"""A `DataHandler` that is compatible with `ClusterCoordinator`."""
def _verify_data_adapter_compatibility(self, adapter_cls):
if adapter_cls != DatasetCreatorAdapter:
raise NotImplementedError("Only `DatasetCreator` input is supported in "
"`ParameterServerStrategy` at this time.")
def _configure_dataset_and_inferred_steps(self, strategy, x, steps_per_epoch,
class_weight, distribute):
if not isinstance(x, dataset_creator.DatasetCreator):
raise TypeError("When using `ParameterServerStrategy`, `x` must be a "
"`DatasetCreator`.")
def per_worker_dataset_fn():
return strategy.distribute_datasets_from_function(x)
self._dataset = self._model._cluster_coordinator.create_per_worker_dataset( # pylint: disable=protected-access
per_worker_dataset_fn)
if steps_per_epoch is None:
raise ValueError(
"`steps_per_epoch` must be specified with `ParameterServerStrategy`.")
self._inferred_steps = steps_per_epoch
def sync(self):
self._model._cluster_coordinator.join() # pylint: disable=protected-access
def resolve_logs(self, logs):
return logs.fetch()
def get_data_handler(*args, **kwargs):
if getattr(kwargs["model"], "_cluster_coordinator", None):
return _ClusterCoordinatorDataHandler(*args, **kwargs)
return DataHandler(*args, **kwargs)
def _make_class_weight_map_fn(class_weight):
"""Applies class weighting to a `Dataset`.
The `Dataset` is assumed to be in format `(x, y)` or `(x, y, sw)`, where
`y` must be a single `Tensor`.
Args:
class_weight: A map where the keys are integer class ids and values are
the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}`
Returns:
A function that can be used with `tf.data.Dataset.map` to apply class
weighting.
"""
class_ids = list(sorted(class_weight.keys()))
expected_class_ids = list(range(len(class_ids)))
if class_ids != expected_class_ids:
error_msg = (
"Expected `class_weight` to be a dict with keys from 0 to one less "
"than the number of classes, found {}").format(class_weight)
raise ValueError(error_msg)
class_weight_tensor = ops.convert_to_tensor_v2_with_dispatch(
[class_weight[int(c)] for c in class_ids])
def _class_weights_map_fn(*data):
"""Convert `class_weight` to `sample_weight`."""
x, y, sw = unpack_x_y_sample_weight(data)
if nest.is_nested(y):
raise ValueError(
"`class_weight` is only supported for Models with a single output.")
if y.shape.rank > 2:
raise ValueError("`class_weight` not supported for "
"3+ dimensional targets.")
y_classes = smart_cond.smart_cond(
y.shape.rank == 2 and backend.shape(y)[1] > 1,
lambda: backend.argmax(y, axis=1),
lambda: math_ops.cast(backend.reshape(y, (-1,)), dtypes.int64))
cw = array_ops.gather_v2(class_weight_tensor, y_classes)
if sw is not None:
cw = math_ops.cast(cw, sw.dtype)
sw, cw = expand_1d((sw, cw))
# `class_weight` and `sample_weight` are multiplicative.
sw = sw * cw
else:
sw = cw
return x, y, sw
return _class_weights_map_fn
def expand_1d(data):
"""Expands 1-dimensional `Tensor`s into 2-dimensional `Tensor`s."""
def _expand_single_1d_tensor(t):
# Leaves `CompositeTensor`s as-is.
if (isinstance(t, ops.Tensor) and
isinstance(t.shape, tensor_shape.TensorShape) and t.shape.rank == 1):
return array_ops.expand_dims_v2(t, axis=-1)
return t
return nest.map_structure(_expand_single_1d_tensor, data)
def train_validation_split(arrays, validation_split):
"""Split arrays into train and validation subsets in deterministic order.
The last part of data will become validation data.
Args:
arrays: Tensors to split. Allowed inputs are arbitrarily nested structures
of Tensors and NumPy arrays.
validation_split: Float between 0 and 1. The proportion of the dataset to
include in the validation split. The rest of the dataset will be included
in the training split.
Returns:
`(train_arrays, validation_arrays)`
"""
def _can_split(t):
tensor_types = (ops.Tensor, np.ndarray)
if pd:
tensor_types = (ops.Tensor, np.ndarray, pd.Series, pd.DataFrame)
return isinstance(t, tensor_types) or t is None
flat_arrays = nest.flatten(arrays)
unsplitable = [type(t) for t in flat_arrays if not _can_split(t)]
if unsplitable:
raise ValueError(
"`validation_split` is only supported for Tensors or NumPy "
"arrays, found following types in the input: {}".format(unsplitable))
if all(t is None for t in flat_arrays):
return arrays, arrays
first_non_none = None
for t in flat_arrays:
if t is not None:
first_non_none = t
break
# Assumes all arrays have the same batch shape or are `None`.
batch_dim = int(first_non_none.shape[0])
split_at = int(math.floor(batch_dim * (1. - validation_split)))
if split_at == 0 or split_at == batch_dim:
raise ValueError(
"Training data contains {batch_dim} samples, which is not sufficient "
"to split it into a validation and training set as specified by "
"`validation_split={validation_split}`. Either provide more data, or a "
"different value for the `validation_split` argument." .format(
batch_dim=batch_dim, validation_split=validation_split))
def _split(t, start, end):
if t is None:
return t
return t[start:end]
train_arrays = nest.map_structure(
functools.partial(_split, start=0, end=split_at), arrays)
val_arrays = nest.map_structure(
functools.partial(_split, start=split_at, end=batch_dim), arrays)
return train_arrays, val_arrays
@keras_export("keras.utils.unpack_x_y_sample_weight", v1=[])
def unpack_x_y_sample_weight(data):
"""Unpacks user-provided data tuple.
This is a convenience utility to be used when overriding
`Model.train_step`, `Model.test_step`, or `Model.predict_step`.
This utility makes it easy to support data of the form `(x,)`,
`(x, y)`, or `(x, y, sample_weight)`.
Standalone usage:
>>> features_batch = tf.ones((10, 5))
>>> labels_batch = tf.zeros((10, 5))
>>> data = (features_batch, labels_batch)
>>> # `y` and `sample_weight` will default to `None` if not provided.
>>> x, y, sample_weight = tf.keras.utils.unpack_x_y_sample_weight(data)
>>> sample_weight is None
True
Example in overridden `Model.train_step`:
```python
class MyModel(tf.keras.Model):
def train_step(self, data):
# If `sample_weight` is not provided, all samples will be weighted
# equally.
x, y, sample_weight = tf.keras.utils.unpack_x_y_sample_weight(data)
with tf.GradientTape() as tape:
y_pred = self(x, training=True)
loss = self.compiled_loss(
y, y_pred, sample_weight, regularization_losses=self.losses)
trainable_variables = self.trainable_variables
gradients = tape.gradient(loss, trainable_variables)
self.optimizer.apply_gradients(zip(gradients, trainable_variables))
self.compiled_metrics.update_state(y, y_pred, sample_weight)
return {m.name: m.result() for m in self.metrics}
```
Args:
data: A tuple of the form `(x,)`, `(x, y)`, or `(x, y, sample_weight)`.
Returns:
The unpacked tuple, with `None`s for `y` and `sample_weight` if they are not
provided.
"""
if not isinstance(data, tuple):
return (data, None, None)
elif len(data) == 1:
return (data[0], None, None)
elif len(data) == 2:
return (data[0], data[1], None)
elif len(data) == 3:
return (data[0], data[1], data[2])
else:
error_msg = ("Data is expected to be in format `x`, `(x,)`, `(x, y)`, "
"or `(x, y, sample_weight)`, found: {}").format(data)
raise ValueError(error_msg)
@keras_export("keras.utils.pack_x_y_sample_weight", v1=[])
def pack_x_y_sample_weight(x, y=None, sample_weight=None):
"""Packs user-provided data into a tuple.
This is a convenience utility for packing data into the tuple formats
that `Model.fit` uses.
Standalone usage:
>>> x = tf.ones((10, 1))
>>> data = tf.keras.utils.pack_x_y_sample_weight(x)
>>> isinstance(data, tf.Tensor)
True
>>> y = tf.ones((10, 1))
>>> data = tf.keras.utils.pack_x_y_sample_weight(x, y)
>>> isinstance(data, tuple)
True
>>> x, y = data
Args:
x: Features to pass to `Model`.
y: Ground-truth targets to pass to `Model`.
sample_weight: Sample weight for each element.
Returns:
Tuple in the format used in `Model.fit`.
"""
if y is None:
# For single x-input, we do no tuple wrapping since in this case
# there is no ambiguity. This also makes NumPy and Dataset
# consistent in that the user does not have to wrap their Dataset
# data in an unecessary tuple
if not nest.is_nested(x):
return x
else:
return (x,)
elif sample_weight is None:
return (x, y)
else:
return (x, y, sample_weight)
def single_batch_iterator(strategy,
x,
y=None,
sample_weight=None,
class_weight=None):
"""Creates a single-batch dataset."""
x, y, sample_weight = _process_tensorlike((x, y, sample_weight))
if y is None:
data = (x,)
elif sample_weight is None:
data = (x, y)
else:
data = (x, y, sample_weight)
_check_data_cardinality(data)
dataset = dataset_ops.DatasetV2.from_tensors(data)
if class_weight:
dataset = dataset.map(_make_class_weight_map_fn(class_weight))
dataset = strategy.experimental_distribute_dataset(dataset)
return iter(dataset)
def _check_data_cardinality(data):
num_samples = set(int(i.shape[0]) for i in nest.flatten(data))
if len(num_samples) > 1:
msg = "Data cardinality is ambiguous:\n"
for label, single_data in zip(["x", "y", "sample_weight"], data):
msg += " {} sizes: {}\n".format(
label, ", ".join(str(i.shape[0]) for i in nest.flatten(single_data)))
msg += "Make sure all arrays contain the same number of samples."
raise ValueError(msg)
def _scipy_sparse_to_sparse_tensor(t):
"""Converts a SciPy sparse matrix to a SparseTensor."""
sparse_coo = t.tocoo()
row, col = sparse_coo.row, sparse_coo.col
data, shape = sparse_coo.data, sparse_coo.shape
if issubclass(data.dtype.type, np.floating):
data = data.astype(backend.floatx())
indices = np.concatenate(
(np.expand_dims(row, axis=1), np.expand_dims(col, axis=1)), axis=1)
return sparse_tensor.SparseTensor(indices, data, shape)
def _is_distributed_dataset(ds):
return isinstance(ds, input_lib.DistributedDatasetInterface)
| apache-2.0 |
bsipocz/scikit-image | doc/examples/plot_label.py | 23 | 1557 | """
===================
Label image regions
===================
This example shows how to segment an image with image labelling. The following
steps are applied:
1. Thresholding with automatic Otsu method
2. Close small holes with binary closing
3. Remove artifacts touching image border
4. Measure image regions to filter small objects
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from skimage import data
from skimage.filters import threshold_otsu
from skimage.segmentation import clear_border
from skimage.measure import label
from skimage.morphology import closing, square
from skimage.measure import regionprops
from skimage.color import label2rgb
image = data.coins()[50:-50, 50:-50]
# apply threshold
thresh = threshold_otsu(image)
bw = closing(image > thresh, square(3))
# remove artifacts connected to image border
cleared = bw.copy()
clear_border(cleared)
# label image regions
label_image = label(cleared)
borders = np.logical_xor(bw, cleared)
label_image[borders] = -1
image_label_overlay = label2rgb(label_image, image=image)
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
ax.imshow(image_label_overlay)
for region in regionprops(label_image):
# skip small images
if region.area < 100:
continue
# draw rectangle around segmented coins
minr, minc, maxr, maxc = region.bbox
rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
fill=False, edgecolor='red', linewidth=2)
ax.add_patch(rect)
plt.show()
| bsd-3-clause |
rs2/bokeh | examples/webgl/clustering.py | 6 | 2167 | """ Example inspired by an example from the scikit-learn project:
http://scikit-learn.org/stable/auto_examples/cluster/plot_cluster_comparison.html
"""
import numpy as np
try:
from sklearn import cluster, datasets
from sklearn.preprocessing import StandardScaler
except ImportError:
raise ImportError('This example requires scikit-learn (conda install sklearn)')
from bokeh.layouts import row, column
from bokeh.plotting import figure, show, output_file
N = 50000
PLOT_SIZE = 400
# generate datasets.
np.random.seed(0)
noisy_circles = datasets.make_circles(n_samples=N, factor=.5, noise=.04)
noisy_moons = datasets.make_moons(n_samples=N, noise=.05)
centers = [(-2, 3), (2, 3), (-2, -3), (2, -3)]
blobs1 = datasets.make_blobs(centers=centers, n_samples=N, cluster_std=0.4, random_state=8)
blobs2 = datasets.make_blobs(centers=centers, n_samples=N, cluster_std=0.7, random_state=8)
colors = np.array([x for x in ('#00f', '#0f0', '#f00', '#0ff', '#f0f', '#ff0')])
colors = np.hstack([colors] * 20)
# create clustering algorithms
dbscan = cluster.DBSCAN(eps=.2)
birch = cluster.Birch(n_clusters=2)
means = cluster.MiniBatchKMeans(n_clusters=2)
spectral = cluster.SpectralClustering(n_clusters=2, eigen_solver='arpack', affinity="nearest_neighbors")
affinity = cluster.AffinityPropagation(damping=.9, preference=-200)
# change here, to select clustering algorithm (note: spectral is slow)
algorithm = dbscan # <- SELECT ALG
plots =[]
for dataset in (noisy_circles, noisy_moons, blobs1, blobs2):
X, y = dataset
X = StandardScaler().fit_transform(X)
# predict cluster memberships
algorithm.fit(X)
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
p = figure(output_backend="webgl", title=algorithm.__class__.__name__,
plot_width=PLOT_SIZE, plot_height=PLOT_SIZE)
p.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), alpha=0.1,)
plots.append(p)
# generate layout for the plots
layout = column(row(plots[:2]), row(plots[2:]))
output_file("clustering.html", title="clustering with sklearn")
show(layout)
| bsd-3-clause |
JonnaStalring/AZOrange | ConfPred/conformal-master/examples/icp_classification_tree.py | 2 | 2055 | #!/usr/bin/env python3
"""
Example: inductive conformal classification using DecisionTreeClassifier
"""
import numpy as np
from sklearn.tree import DecisionTreeClassifier
import Orange
from nonconformist.acp import CrossConformalClassifier, AggregatedCp, CrossSampler
from nonconformist.icp import IcpClassifier
from nonconformist.nc import ProbEstClassifierNc, inverse_probability
# -----------------------------------------------------------------------------
# Setup training, calibration and test indices
# -----------------------------------------------------------------------------
data = Orange.data.Table('iris')
X, y = data.X, data.Y
idx = np.random.permutation(y.size)
train = idx[:idx.size // 3]
calibrate = idx[idx.size // 3:2 * idx.size // 3]
test = idx[2 * idx.size // 3:]
# -----------------------------------------------------------------------------
# Train and calibrate
# -----------------------------------------------------------------------------
icp = IcpClassifier(ProbEstClassifierNc(DecisionTreeClassifier(), inverse_probability))
icp.fit(X[train, :], y[train])
icp.calibrate(X[calibrate, :], y[calibrate])
ccp = CrossConformalClassifier(IcpClassifier(ProbEstClassifierNc(DecisionTreeClassifier(), inverse_probability)))
ccp.fit(X[train, :], y[train])
acp = AggregatedCp(IcpClassifier(ProbEstClassifierNc(DecisionTreeClassifier(), inverse_probability)), CrossSampler())
acp.fit(X[train, :], y[train])
# -----------------------------------------------------------------------------
# Predict
# -----------------------------------------------------------------------------
print('# Inductive')
prediction = icp.predict(X[test, :], significance=0.1)
for pred, actual in zip(prediction[:5], y[test]):
print(pred, actual)
print('\n# Cross')
prediction = ccp.predict(X[test, :], significance=0.1)
for pred, actual in zip(prediction[:5], y[test]):
print(pred, actual)
print('\n# Aggre')
prediction = acp.predict(X[test, :], significance=0.1)
for pred, actual in zip(prediction[:5], y[test]):
print(pred, actual) | lgpl-3.0 |
AnasGhrab/scikit-learn | examples/plot_digits_pipe.py | 250 | 1809 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
ml-lab/pylearn2 | pylearn2/models/tests/test_s3c_inference.py | 4 | 14275 | from pylearn2.models.s3c import S3C
from pylearn2.models.s3c import E_Step_Scan
from pylearn2.models.s3c import Grad_M_Step
from pylearn2.models.s3c import E_Step
from theano import function
import numpy as np
import theano.tensor as T
from theano import config
#from pylearn2.utils import serial
import warnings
def broadcast(mat, shape_0):
rval = mat
if mat.shape[0] != shape_0:
assert mat.shape[0] == 1
rval = np.zeros((shape_0, mat.shape[1]),dtype=mat.dtype)
for i in xrange(shape_0):
rval[i,:] = mat[0,:]
return rval
class Test_S3C_Inference:
def setUp(self):
# Temporarily change config.floatX to float64, as s3c inference
# tests currently fail due to numerical issues for float32.
self.prev_floatX = config.floatX
config.floatX = 'float64'
def tearDown(self):
# Restore previous value of floatX
config.floatX = self.prev_floatX
def __init__(self):
""" gets a small batch of data
sets up an S3C model
"""
# We also have to change the value of config.floatX in __init__.
self.prev_floatX = config.floatX
config.floatX = 'float64'
try:
self.tol = 1e-5
#dataset = serial.load('${PYLEARN2_DATA_PATH}/stl10/stl10_patches/data.pkl')
#X = dataset.get_batch_design(1000)
#X = X[:,0:5]
X = np.random.RandomState([1,2,3]).randn(1000,5)
X -= X.mean()
X /= X.std()
m, D = X.shape
N = 5
#don't give the model an e_step or learning rate so it won't spend years compiling a learn_func
self.model = S3C(nvis = D,
nhid = N,
irange = .1,
init_bias_hid = 0.,
init_B = 3.,
min_B = 1e-8,
max_B = 1000.,
init_alpha = 1., min_alpha = 1e-8, max_alpha = 1000.,
init_mu = 1., e_step = None,
m_step = Grad_M_Step(),
min_bias_hid = -1e30, max_bias_hid = 1e30,
)
self.model.make_pseudoparams()
self.h_new_coeff_schedule = [.1, .2, .3, .4, .5, .6, .7, .8, .9, 1. ]
self.e_step = E_Step_Scan(h_new_coeff_schedule = self.h_new_coeff_schedule)
self.e_step.register_model(self.model)
self.X = X
self.N = N
self.m = m
finally:
config.floatX = self.prev_floatX
def test_match_unrolled(self):
""" tests that inference with scan matches result using unrolled loops """
unrolled_e_step = E_Step(h_new_coeff_schedule = self.h_new_coeff_schedule)
unrolled_e_step.register_model(self.model)
V = T.matrix()
scan_result = self.e_step.infer(V)
unrolled_result = unrolled_e_step.infer(V)
outputs = []
for key in scan_result:
outputs.append(scan_result[key])
outputs.append(unrolled_result[key])
f = function([V], outputs)
outputs = f(self.X)
assert len(outputs) % 2 == 0
for i in xrange(0,len(outputs),2):
assert np.allclose(outputs[i],outputs[i+1])
def test_grad_s(self):
"tests that the gradients with respect to s_i are 0 after doing a mean field update of s_i "
model = self.model
e_step = self.e_step
X = self.X
assert X.shape[0] == self.m
model.test_batch_size = X.shape[0]
init_H = e_step.init_H_hat(V = X)
init_Mu1 = e_step.init_S_hat(V = X)
prev_setting = config.compute_test_value
config.compute_test_value= 'off'
H, Mu1 = function([], outputs=[init_H, init_Mu1])()
config.compute_test_value = prev_setting
H = broadcast(H, self.m)
Mu1 = broadcast(Mu1, self.m)
H = np.cast[config.floatX](self.model.rng.uniform(0.,1.,H.shape))
Mu1 = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,Mu1.shape))
H_var = T.matrix(name='H_var')
H_var.tag.test_value = H
Mu1_var = T.matrix(name='Mu1_var')
Mu1_var.tag.test_value = Mu1
idx = T.iscalar()
idx.tag.test_value = 0
S = e_step.infer_S_hat(V = X, H_hat = H_var, S_hat = Mu1_var)
s_idx = S[:,idx]
s_i_func = function([H_var,Mu1_var,idx],s_idx)
sigma0 = 1. / model.alpha
Sigma1 = e_step.infer_var_s1_hat()
mu0 = T.zeros_like(model.mu)
#by truncated KL, I mean that I am dropping terms that don't depend on H and Mu1
# (they don't affect the outcome of this test and some of them are intractable )
trunc_kl = - model.entropy_hs(H_hat = H_var, var_s0_hat = sigma0, var_s1_hat = Sigma1) + \
model.expected_energy_vhs(V = X, H_hat = H_var, S_hat = Mu1_var, var_s0_hat = sigma0, var_s1_hat = Sigma1)
grad_Mu1 = T.grad(trunc_kl.sum(), Mu1_var)
grad_Mu1_idx = grad_Mu1[:,idx]
grad_func = function([H_var, Mu1_var, idx], grad_Mu1_idx)
for i in xrange(self.N):
Mu1[:,i] = s_i_func(H, Mu1, i)
g = grad_func(H,Mu1,i)
assert not np.any(np.isnan(g))
g_abs_max = np.abs(g).max()
if g_abs_max > self.tol:
raise Exception('after mean field step, gradient of kl divergence wrt mean field parameter should be 0, but here the max magnitude of a gradient element is '+str(g_abs_max)+' after updating s_'+str(i))
def test_value_s(self):
"tests that the value of the kl divergence decreases with each update to s_i "
model = self.model
e_step = self.e_step
X = self.X
assert X.shape[0] == self.m
init_H = e_step.init_H_hat(V = X)
init_Mu1 = e_step.init_S_hat(V = X)
prev_setting = config.compute_test_value
config.compute_test_value= 'off'
H, Mu1 = function([], outputs=[init_H, init_Mu1])()
config.compute_test_value = prev_setting
H = broadcast(H, self.m)
Mu1 = broadcast(Mu1, self.m)
H = np.cast[config.floatX](self.model.rng.uniform(0.,1.,H.shape))
Mu1 = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,Mu1.shape))
H_var = T.matrix(name='H_var')
H_var.tag.test_value = H
Mu1_var = T.matrix(name='Mu1_var')
Mu1_var.tag.test_value = Mu1
idx = T.iscalar()
idx.tag.test_value = 0
S = e_step.infer_S_hat( V = X, H_hat = H_var, S_hat = Mu1_var)
s_idx = S[:,idx]
s_i_func = function([H_var,Mu1_var,idx],s_idx)
sigma0 = 1. / model.alpha
Sigma1 = e_step.infer_var_s1_hat()
mu0 = T.zeros_like(model.mu)
#by truncated KL, I mean that I am dropping terms that don't depend on H and Mu1
# (they don't affect the outcome of this test and some of them are intractable )
trunc_kl = - model.entropy_hs(H_hat = H_var, var_s0_hat = sigma0, var_s1_hat = Sigma1) + \
model.expected_energy_vhs(V = X, H_hat = H_var, S_hat = Mu1_var, var_s0_hat = sigma0, var_s1_hat = Sigma1)
trunc_kl_func = function([H_var, Mu1_var], trunc_kl)
for i in xrange(self.N):
prev_kl = trunc_kl_func(H,Mu1)
Mu1[:,i] = s_i_func(H, Mu1, i)
new_kl = trunc_kl_func(H,Mu1)
increase = new_kl - prev_kl
mx = increase.max()
if mx > 1e-3:
raise Exception('after mean field step in s, kl divergence should decrease, but some elements increased by as much as '+str(mx)+' after updating s_'+str(i))
def test_grad_h(self):
"tests that the gradients with respect to h_i are 0 after doing a mean field update of h_i "
model = self.model
e_step = self.e_step
X = self.X
assert X.shape[0] == self.m
init_H = e_step.init_H_hat(V = X)
init_Mu1 = e_step.init_S_hat(V = X)
prev_setting = config.compute_test_value
config.compute_test_value= 'off'
H, Mu1 = function([], outputs=[init_H, init_Mu1])()
config.compute_test_value = prev_setting
H = broadcast(H, self.m)
Mu1 = broadcast(Mu1, self.m)
H = np.cast[config.floatX](self.model.rng.uniform(0.,1.,H.shape))
Mu1 = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,Mu1.shape))
H_var = T.matrix(name='H_var')
H_var.tag.test_value = H
Mu1_var = T.matrix(name='Mu1_var')
Mu1_var.tag.test_value = Mu1
idx = T.iscalar()
idx.tag.test_value = 0
new_H = e_step.infer_H_hat(V = X, H_hat = H_var, S_hat = Mu1_var)
h_idx = new_H[:,idx]
updates_func = function([H_var,Mu1_var,idx], h_idx)
sigma0 = 1. / model.alpha
Sigma1 = e_step.infer_var_s1_hat()
mu0 = T.zeros_like(model.mu)
#by truncated KL, I mean that I am dropping terms that don't depend on H and Mu1
# (they don't affect the outcome of this test and some of them are intractable )
trunc_kl = - model.entropy_hs(H_hat = H_var, var_s0_hat = sigma0, var_s1_hat = Sigma1) + \
model.expected_energy_vhs(V = X, H_hat = H_var, S_hat = Mu1_var, var_s0_hat = sigma0,
var_s1_hat = Sigma1)
grad_H = T.grad(trunc_kl.sum(), H_var)
assert len(grad_H.type.broadcastable) == 2
#from theano.printing import min_informative_str
#print min_informative_str(grad_H)
#grad_H = Print('grad_H')(grad_H)
#grad_H_idx = grad_H[:,idx]
grad_func = function([H_var, Mu1_var], grad_H)
failed = False
for i in xrange(self.N):
rval = updates_func(H, Mu1, i)
H[:,i] = rval
g = grad_func(H,Mu1)[:,i]
assert not np.any(np.isnan(g))
g_abs_max = np.abs(g).max()
if g_abs_max > self.tol:
#print "new values of H"
#print H[:,i]
#print "gradient on new values of H"
#print g
failed = True
print 'iteration ',i
#print 'max value of new H: ',H[:,i].max()
#print 'H for failing g: '
failing_h = H[np.abs(g) > self.tol, i]
#print failing_h
#from matplotlib import pyplot as plt
#plt.scatter(H[:,i],g)
#plt.show()
#ignore failures extremely close to h=1
high_mask = failing_h > .001
low_mask = failing_h < .999
mask = high_mask * low_mask
print 'masked failures: ',mask.shape[0],' err ',g_abs_max
if mask.sum() > 0:
print 'failing h passing the range mask'
print failing_h[ mask.astype(bool) ]
raise Exception('after mean field step, gradient of kl divergence'
' wrt freshly updated variational parameter should be 0, '
'but here the max magnitude of a gradient element is '
+str(g_abs_max)+' after updating h_'+str(i))
#assert not failed
def test_value_h(self):
"tests that the value of the kl divergence decreases with each update to h_i "
model = self.model
e_step = self.e_step
X = self.X
assert X.shape[0] == self.m
init_H = e_step.init_H_hat(V = X)
init_Mu1 = e_step.init_S_hat(V = X)
prev_setting = config.compute_test_value
config.compute_test_value= 'off'
H, Mu1 = function([], outputs=[init_H, init_Mu1])()
config.compute_test_value = prev_setting
H = broadcast(H, self.m)
Mu1 = broadcast(Mu1, self.m)
H = np.cast[config.floatX](self.model.rng.uniform(0.,1.,H.shape))
Mu1 = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,Mu1.shape))
H_var = T.matrix(name='H_var')
H_var.tag.test_value = H
Mu1_var = T.matrix(name='Mu1_var')
Mu1_var.tag.test_value = Mu1
idx = T.iscalar()
idx.tag.test_value = 0
newH = e_step.infer_H_hat(V = X, H_hat = H_var, S_hat = Mu1_var)
h_idx = newH[:,idx]
h_i_func = function([H_var,Mu1_var,idx],h_idx)
sigma0 = 1. / model.alpha
Sigma1 = e_step.infer_var_s1_hat()
mu0 = T.zeros_like(model.mu)
#by truncated KL, I mean that I am dropping terms that don't depend on H and Mu1
# (they don't affect the outcome of this test and some of them are intractable )
trunc_kl = - model.entropy_hs(H_hat = H_var, var_s0_hat = sigma0, var_s1_hat = Sigma1) + \
model.expected_energy_vhs(V = X, H_hat = H_var, S_hat = Mu1_var, var_s0_hat = sigma0, var_s1_hat = Sigma1)
trunc_kl_func = function([H_var, Mu1_var], trunc_kl)
for i in xrange(self.N):
prev_kl = trunc_kl_func(H,Mu1)
H[:,i] = h_i_func(H, Mu1, i)
#we don't update mu, the whole point of the split e step is we don't have to
new_kl = trunc_kl_func(H,Mu1)
increase = new_kl - prev_kl
print 'failures after iteration ',i,': ',(increase > self.tol).sum()
mx = increase.max()
if mx > 1e-4:
print 'increase amounts of failing examples:'
print increase[increase > self.tol]
print 'failing H:'
print H[increase > self.tol,:]
print 'failing Mu1:'
print Mu1[increase > self.tol,:]
print 'failing V:'
print X[increase > self.tol,:]
raise Exception('after mean field step in h, kl divergence should decrease, but some elements increased by as much as '+str(mx)+' after updating h_'+str(i))
if __name__ == '__main__':
obj = Test_S3C_Inference()
#obj.test_grad_h()
#obj.test_grad_s()
#obj.test_value_s()
obj.test_value_h()
| bsd-3-clause |
miloharper/neural-network-animation | matplotlib/tests/test_colors.py | 9 | 9307 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from nose.tools import assert_raises, assert_equal
import numpy as np
from numpy.testing.utils import assert_array_equal, assert_array_almost_equal
import matplotlib.colors as mcolors
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison, cleanup
def test_colormap_endian():
"""
Github issue #1005: a bug in putmask caused erroneous
mapping of 1.0 when input from a non-native-byteorder
array.
"""
cmap = cm.get_cmap("jet")
# Test under, over, and invalid along with values 0 and 1.
a = [-0.5, 0, 0.5, 1, 1.5, np.nan]
for dt in ["f2", "f4", "f8"]:
anative = np.ma.masked_invalid(np.array(a, dtype=dt))
aforeign = anative.byteswap().newbyteorder()
#print(anative.dtype.isnative, aforeign.dtype.isnative)
assert_array_equal(cmap(anative), cmap(aforeign))
def test_BoundaryNorm():
"""
Github issue #1258: interpolation was failing with numpy
1.7 pre-release.
"""
# TODO: expand this into a more general test of BoundaryNorm.
boundaries = [0, 1.1, 2.2]
vals = [-1, 0, 2, 2.2, 4]
expected = [-1, 0, 2, 3, 3]
# ncolors != len(boundaries) - 1 triggers interpolation
ncolors = len(boundaries)
bn = mcolors.BoundaryNorm(boundaries, ncolors)
assert_array_equal(bn(vals), expected)
def test_LogNorm():
"""
LogNorm ignored clip, now it has the same
behavior as Normalize, e.g., values > vmax are bigger than 1
without clip, with clip they are 1.
"""
ln = mcolors.LogNorm(clip=True, vmax=5)
assert_array_equal(ln([1, 6]), [0, 1.0])
def test_PowerNorm():
a = np.array([0, 0.5, 1, 1.5], dtype=np.float)
pnorm = mcolors.PowerNorm(1)
norm = mcolors.Normalize()
assert_array_almost_equal(norm(a), pnorm(a))
a = np.array([-0.5, 0, 2, 4, 8], dtype=np.float)
expected = [0, 0, 1/16, 1/4, 1]
pnorm = mcolors.PowerNorm(2, vmin=0, vmax=8)
assert_array_almost_equal(pnorm(a), expected)
assert_equal(pnorm(a[0]), expected[0])
assert_equal(pnorm(a[2]), expected[2])
assert_array_almost_equal(a[1:], pnorm.inverse(pnorm(a))[1:])
# Clip = True
a = np.array([-0.5, 0, 1, 8, 16], dtype=np.float)
expected = [0, 0, 0, 1, 1]
pnorm = mcolors.PowerNorm(2, vmin=2, vmax=8, clip=True)
assert_array_almost_equal(pnorm(a), expected)
assert_equal(pnorm(a[0]), expected[0])
assert_equal(pnorm(a[-1]), expected[-1])
# Clip = True at call time
a = np.array([-0.5, 0, 1, 8, 16], dtype=np.float)
expected = [0, 0, 0, 1, 1]
pnorm = mcolors.PowerNorm(2, vmin=2, vmax=8, clip=False)
assert_array_almost_equal(pnorm(a, clip=True), expected)
assert_equal(pnorm(a[0], clip=True), expected[0])
assert_equal(pnorm(a[-1], clip=True), expected[-1])
def test_Normalize():
norm = mcolors.Normalize()
vals = np.arange(-10, 10, 1, dtype=np.float)
_inverse_tester(norm, vals)
_scalar_tester(norm, vals)
_mask_tester(norm, vals)
def test_SymLogNorm():
"""
Test SymLogNorm behavior
"""
norm = mcolors.SymLogNorm(3, vmax=5, linscale=1.2)
vals = np.array([-30, -1, 2, 6], dtype=np.float)
normed_vals = norm(vals)
expected = [0., 0.53980074, 0.826991, 1.02758204]
assert_array_almost_equal(normed_vals, expected)
_inverse_tester(norm, vals)
_scalar_tester(norm, vals)
_mask_tester(norm, vals)
# Ensure that specifying vmin returns the same result as above
norm = mcolors.SymLogNorm(3, vmin=-30, vmax=5, linscale=1.2)
normed_vals = norm(vals)
assert_array_almost_equal(normed_vals, expected)
def _inverse_tester(norm_instance, vals):
"""
Checks if the inverse of the given normalization is working.
"""
assert_array_almost_equal(norm_instance.inverse(norm_instance(vals)), vals)
def _scalar_tester(norm_instance, vals):
"""
Checks if scalars and arrays are handled the same way.
Tests only for float.
"""
scalar_result = [norm_instance(float(v)) for v in vals]
assert_array_almost_equal(scalar_result, norm_instance(vals))
def _mask_tester(norm_instance, vals):
"""
Checks mask handling
"""
masked_array = np.ma.array(vals)
masked_array[0] = np.ma.masked
assert_array_equal(masked_array.mask, norm_instance(masked_array).mask)
@image_comparison(baseline_images=['levels_and_colors'],
extensions=['png'])
def test_cmap_and_norm_from_levels_and_colors():
data = np.linspace(-2, 4, 49).reshape(7, 7)
levels = [-1, 2, 2.5, 3]
colors = ['red', 'green', 'blue', 'yellow', 'black']
extend = 'both'
cmap, norm = mcolors.from_levels_and_colors(levels, colors, extend=extend)
ax = plt.axes()
m = plt.pcolormesh(data, cmap=cmap, norm=norm)
plt.colorbar(m)
# Hide the axes labels (but not the colorbar ones, as they are useful)
for lab in ax.get_xticklabels() + ax.get_yticklabels():
lab.set_visible(False)
def test_cmap_and_norm_from_levels_and_colors2():
levels = [-1, 2, 2.5, 3]
colors = ['red', (0, 1, 0), 'blue', (0.5, 0.5, 0.5), (0.0, 0.0, 0.0, 1.0)]
clr = mcolors.colorConverter.to_rgba_array(colors)
bad = (0.1, 0.1, 0.1, 0.1)
no_color = (0.0, 0.0, 0.0, 0.0)
masked_value = 'masked_value'
# Define the test values which are of interest.
# Note: levels are lev[i] <= v < lev[i+1]
tests = [('both', None, {-2: clr[0],
-1: clr[1],
2: clr[2],
2.25: clr[2],
3: clr[4],
3.5: clr[4],
masked_value: bad}),
('min', -1, {-2: clr[0],
-1: clr[1],
2: clr[2],
2.25: clr[2],
3: no_color,
3.5: no_color,
masked_value: bad}),
('max', -1, {-2: no_color,
-1: clr[0],
2: clr[1],
2.25: clr[1],
3: clr[3],
3.5: clr[3],
masked_value: bad}),
('neither', -2, {-2: no_color,
-1: clr[0],
2: clr[1],
2.25: clr[1],
3: no_color,
3.5: no_color,
masked_value: bad}),
]
for extend, i1, cases in tests:
cmap, norm = mcolors.from_levels_and_colors(levels, colors[0:i1],
extend=extend)
cmap.set_bad(bad)
for d_val, expected_color in cases.items():
if d_val == masked_value:
d_val = np.ma.array([1], mask=True)
else:
d_val = [d_val]
assert_array_equal(expected_color, cmap(norm(d_val))[0],
'Wih extend={0!r} and data '
'value={1!r}'.format(extend, d_val))
assert_raises(ValueError, mcolors.from_levels_and_colors, levels, colors)
def test_rgb_hsv_round_trip():
for a_shape in [(500, 500, 3), (500, 3), (1, 3), (3,)]:
np.random.seed(0)
tt = np.random.random(a_shape)
assert_array_almost_equal(tt,
mcolors.hsv_to_rgb(mcolors.rgb_to_hsv(tt)))
assert_array_almost_equal(tt,
mcolors.rgb_to_hsv(mcolors.hsv_to_rgb(tt)))
@cleanup
def test_autoscale_masked():
# Test for #2336. Previously fully masked data would trigger a ValueError.
data = np.ma.masked_all((12, 20))
plt.pcolor(data)
plt.draw()
def test_colors_no_float():
# Gray must be a string to distinguish 3-4 grays from RGB or RGBA.
def gray_from_float_rgb():
return mcolors.colorConverter.to_rgb(0.4)
def gray_from_float_rgba():
return mcolors.colorConverter.to_rgba(0.4)
assert_raises(ValueError, gray_from_float_rgb)
assert_raises(ValueError, gray_from_float_rgba)
def test_light_source_shading_color_range():
# see also
#http://matplotlib.org/examples/pylab_examples/shading_example.html
from matplotlib.colors import LightSource
from matplotlib.colors import Normalize
refinput = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
norm = Normalize(vmin=0, vmax=50)
ls = LightSource(azdeg=0, altdeg=65)
testoutput = ls.shade(refinput, plt.cm.jet, norm=norm)
refoutput = np.array([
[[0., 0., 0.58912656, 1.],
[0., 0., 0.67825312, 1.],
[0., 0., 0.76737968, 1.],
[0., 0., 0.85650624, 1.]],
[[0., 0., 0.9456328, 1.],
[0., 0., 1., 1.],
[0., 0.04901961, 1., 1.],
[0., 0.12745098, 1., 1.]],
[[0., 0.22156863, 1., 1.],
[0., 0.3, 1., 1.],
[0., 0.37843137, 1., 1.],
[0., 0.45686275, 1., 1.]]
])
assert_array_almost_equal(refoutput, testoutput)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
Leotrinos/agpy | agpy/showspec.py | 6 | 51670 | """
showspec is my homegrown spectrum plotter, meant to somewhat follow STARLINK's
SPLAT and have functionality similar to GAIA, but with an emphasis on producing
publication-quality plots (which, while splat may do, it does unreproducibly)
.. todo::
-add spectrum arithmetic tools
(as is, you can use numpy.interp with sp.vind and sp.spectrum pretty
easily)
-implement other fitters
-e.g., NH3 hyperfine, Voigt
-move to object-oriented pylab/pyplot implementation (for bulk / speedup work)
-allow for non-plotting fitting work (curious... I've never needed that yet)
-Equivalent Width measurement without gaussian fitting
-probably should be part of the baseline code
-write documentation other people can read
12/21/2011 - ALL of the above to do is IS DONE! It's now hosted at <http://pyspeckit.bitbucket.org>
"""
import math
import pylab
from pylab import *
for k,v in pylab.__dict__.iteritems():
if hasattr(v,'__module__'):
if v.__module__ is None:
locals()[k].__module__ = 'pylab'
import matplotlib
from agpy.mpfit import mpfit
from collapse_gaussfit import *
from ratosexagesimal import *
import pyfits
import gaussfitter
import numpy
from numpy import isnan
from mad import MAD,nanmedian
def steppify(arr,isX=False,interval=0,sign=+1.0):
"""
*support function*
Converts an array to double-length for step plotting
"""
if isX and interval==0:
interval = numpy.abs(arr[1]-arr[0]) / 2.0
newarr = pylab.array(zip(arr-sign*interval,arr+sign*interval)).ravel()
return newarr
class SpecPlotter:
"""
SpecPlotter class. Takes in a spectrum or data cube, plotting properties,
and a velocity axis determination function. Look at splat_1d for a wrapper
that might actually be useful.
Whew, needs more documentation
"""
def __init__(self, cube, axis=None, xtol=None, ytol=None, vconv=lambda
x: x, xtora=lambda x: x, ytodec=lambda x: x, specname=None,
dv=None, color='k', hdr=None, errspec=None, maskspec=None,
fig=None, fignum=1, clear=True, title=None, xunits='km/s',
erralpha=0.2, ivconv=None, autorefresh=True, reffreq=None,
gaiafignum=0, gaiafig=None, clickid=None, **kwargs ):
self.vconv = vconv
self.xtora = xtora
self.ytodec = ytodec
self.cube = cube # where(numpy.isnan(cube),0,cube)
if len(self.cube.shape) > 1:
self.spectrum = cube[:,0,0] # spectrum is what's plotted; cube is the "raw data"
else:
self.spectrum = cube # spectrum is what's plotted; cube is the "raw data"
self.specname=specname
self.dv=dv
self.reffreq=reffreq
self.scale=1.0
self.units='K'
self.xunits=xunits
self.voff=0.0
self.offset=0.0
self.continuum=0.0
self.errspec = errspec
self.erralpha=erralpha
self.plotcolor=color
self.specfit = Specfit(self)
self.fitspec = self.specfit
self.baseline = Baseline(self)
#self.fft = FFT(self)
#self.psd = self.fft.psd
self.vmin=None
self.vmax=None
self.title=title
self.ivconv=ivconv
self.autorefresh=autorefresh
self.spectrumplot=None
self.errorplot=None
self.gaiafignum = gaiafignum
self.gaiafig = gaiafig
self.clickid = clickid
self.plotkwargs = kwargs
if maskspec is not None:
self.maskspec = maskspec
else:
self.maskspec = zeros(self.cube.shape)
self.linecollections =[]
self.texts =[]
if hdr: self.header = hdr
# figure out where to put the plot
if fig is None and axis is None:
fig=figure(fignum)
if clear: fig.clf()
self.axis = fig.gca()
elif fig is None and axis is None:
self.axis = pylab.gca()
elif fig is not None and axis is None:
if clear: fig.clf()
self.axis = fig.gca()
elif fig is None and axis is not None:
self.axis = axis
else: # if figure and axis are both set, just use axis
self.axis = axis
if clear: self.axis.clear()
def __call__(self, event):
"""
Connects map cube to specplotter...
"""
if event.inaxes:
clickX = event.xdata
clickY = event.ydata
tb = get_current_fig_manager().toolbar
#if ((self.axis is None) or (self.axis==event.inaxes)) and tb.mode=='':
if event.button==1 and tb.mode=='':
print "OverPlotting spectrum from point %i,%i" % (clickX,clickY)
self.plotspec(clickY,clickX,button=event.button,cube=True)
elif event.button==2:
print "Plotting spectrum from point %i,%i" % (clickX,clickY)
self.plotspec(clickY,clickX,button=event.button,cube=True,clear=True)
elif event.button==3:
print "Disconnecting GAIA-like tool"
self.gaiafig.canvas.mpl_disconnect(self.clickid)
else:
print "Call failed for some reason: "
print "event: ",event
def plotspec(self, i=0, j=0, cube=False, title=None,
clear=False, color=None, continuum=None,
axis=None, offset=None, scale=None, voff=None, vmin=None,
vmax=None, units=None, xunits=None, erralpha=None, plotpix=False,
errstyle='fill', autorefresh=None, button=None, **kwargs):
"""
Plot a spectrum
Originally written to plot spectra from data cubes, hence the i,j parameter
to specify the location in the cube
Now, cube defaults to False, but you can still pass in a data cube.
Inputs:
title,color, kwargs - semi-obvious plot-related comands
axis - You can pass in a Matplotlib axis instance and it will plot on that
clear - Clear the axis before plotting?
continuum - if you've already subtracted out a continuum, you can add
it back in (only if it is a constant offset). It will be included in
the spectrum
offset - Like continuum, but ONLY for plotting purposes. Will move the
plot vertically but will NOT include values in the .spectrum
scale - multiplicative factor to scale the data by (NOT for plotting
purposes; modifies spectrum)
voff - Shift the spectrum on the velocity axis by this amount
vmin,vmax - only plot within this range
(note that these keywords passed to splat_1d MAY crop the spectrum)
units - units of the data. At the moment, no conversions are done
xunits - units of the Y axis. Can affect other procedures, like show_lines,
and some unit conversion (Hz to GHz) is done
erralpha - Transparency of the errorbars if plotted
errstyle - style of errorbars if plotted
plotpix - if set, will plot against a pixel (channel) axis instead of a
physical axis
autorefresh - automatically update the plot when fitting gaussians, labeling,
etc?
"""
if kwargs.has_key('fignum'): kwargs.pop('fignum') # HACK because I want __init__ to accept different kwargs
if kwargs.has_key('fig'): kwargs.pop('fig') # is there a better workaround?
if scale is not None: self.scale = scale
if units is not None: self.units = units
if xunits is not None: self.xunits= xunits
if voff is not None: self.voff = voff
if offset is not None: self.offset= offset
if continuum is not None: self.continuum= continuum
if color is not None: self.plotcolor=color
if erralpha is not None: self.erralpha= erralpha
if vmax is not None: self.vmax = vmax
if vmin is not None: self.vmin = vmin
if title is not None: self.title = title
if autorefresh is not None: self.autorefresh = autorefresh
if axis is None: axis=self.axis # allow spectrum to be plotted on other axis
if clear: axis.clear()
if plotpix:
self.vind = arange(self.cube.shape[0])
else:
self.vind = self.vconv(arange(self.cube.shape[0])) + self.voff
if kwargs.has_key('fignum'): kwargs.pop('fignum')
if kwargs.has_key('linewidth'):
linewidth = kwargs.pop('linewidth')
else:
linewidth=0.5
if cube or len(self.cube.shape) == 3:
self.spectrum = self.cube[:,i,j]*self.scale+self.continuum-self.baseline.basespec
self.spectrumplot = axis.plot(self.vind,self.spectrum+self.offset,color=self.plotcolor,
linestyle='steps-mid',linewidth=linewidth,
**kwargs)
else:
if self.maskspec.sum() > 0:
nanmask = where(self.maskspec,numpy.nan,1)
self.spectrum = self.cube*self.scale*nanmask+self.continuum-self.baseline.basespec
self.spectrumplot = axis.plot(self.vind,self.spectrum+self.offset,color=self.plotcolor,
linestyle='steps-mid',linewidth=linewidth,
**kwargs)
else:
self.spectrum = self.cube*self.scale+self.continuum-self.baseline.basespec
self.spectrumplot = axis.plot(self.vind,self.spectrum+self.offset,color=self.plotcolor,
linestyle='steps-mid',linewidth=linewidth,
**kwargs)
if self.errspec is not None:
if errstyle == 'fill':
self.errorplot = [axis.fill_between(steppify(self.vind,isX=True,sign=sign(self.dv)),
steppify(self.spectrum+self.offset-self.errspec*self.scale),
steppify(self.spectrum+self.offset+self.errspec*self.scale),
facecolor=self.plotcolor, alpha=self.erralpha, **kwargs)]
elif errstyle == 'bars':
self.errorplot = axis.errorbar(self.vind, self.spectrum+self.offset,
yerr=self.errspec*self.scale, ecolor=self.plotcolor, fmt=None,
**kwargs)
if vmin is not None: xlo = self.vmin
else: xlo=self.vind.min()
if vmax is not None: xhi = self.vmax
else: xhi=self.vind.max()
axis.set_xlim(xlo,xhi)
if self.title is not None:
axis.set_title(self.title)
elif self.xtora and self.ytodec:
axis.set_title("Spectrum at %s %s" %
(ratos(self.xtora(i)),dectos(self.ytodec(j))))
elif self.specname:
axis.set_title("Spectrum of %s" % self.specname)
if isinstance(self.xunits,str):
axis.set_xlabel(self.xunits)
else:
axis.set_xlabel("V$_{LSR}$ (km s$^{-1}$)")
self.xunits = 'km/s'
if units in ['Ta*','Tastar','K']:
axis.set_ylabel("$T_A^*$ (K)")
elif units == 'mJy':
axis.set_ylabel("$S_\\nu$ (mJy)")
elif units == 'Jy':
axis.set_ylabel("$S_\\nu$ (Jy)")
else:
axis.set_ylabel(self.units)
if self.autorefresh: self.refresh()
def save(self,fname,**kwargs):
"""
Save the current spectrum (useful for saving baselined data)
"""
newfile = pyfits.PrimaryHDU(data=self.cube,header=self.header)
newfile.writeto(fname,**kwargs)
def savefig(self,fname,bbox_inches='tight',**kwargs):
"""
simple wrapper of maplotlib's savefig.
"""
self.axis.figure.savefig(fname,bbox_inches=bbox_inches,**kwargs)
def showlines(self,linefreqs,linenames,ctype='freq',cunit='hz',yscale=0.8,vofflines=0.0,
voffunit='km/s',**kwargs):
"""
Overplot vertical lines and labels at the frequencies (or velocities) of each line
yscale - fraction of maximum at which to label
"""
self.clearlines()
if ctype != 'freq':
print "Sorry, non-frequency units not implemented yet."
return
speedoflight=2.99792458e5
if self.reffreq and self.xunits in ('km/s','m/s'):
linefreqs = -(array(linefreqs)-self.reffreq)/self.reffreq * speedoflight
if 'hz' in cunit or 'Hz' in cunit:
linefreqs *= (1.0 + vofflines / speedoflight)
else:
linefreqs += vofflines
ymax = (self.spectrum[self.spectrum==self.spectrum]).max()
for lf,ln in zip(linefreqs,linenames):
if lf < self.vind.max() and lf > self.vind.min():
self.linecollections.append(vlines(lf,0,ymax,**kwargs))
self.texts.append(text(lf,ymax*yscale,ln,rotation='vertical',**kwargs))
if self.autorefresh: self.refresh()
def clearlines(self):
if len(self.texts) > 0:
for T in self.texts:
if T in self.axis.texts:
self.axis.texts.remove(T)
if len(self.linecollections) > 0:
for LC in self.linecollections:
if LC in self.axis.collections:
self.axis.collections.remove(LC)
def refresh(self):
self.axis.figure.canvas.draw()
class FFT:
def __init__(self,specplotter,fignum=3,axis=None, color='k'):
self.specplotter=specplotter
if axis is None:
self.fignum=fignum
self.figure=figure(self.fignum)
self.axis=gca()
else:
self.axis=axis
self.figure=self.axis.figure
self.fignum=None
#self.axis.clear()
self.color=color
self.fftplot=None
self.setspec()
self.setshift()
self.clear()
def __call__(self,psd=False,shift=True):
self.setspec()
if psd:
self.psd(shift=shift)
else:
self.fft(shift=shift)
def fft(self,shift=True,logplot=False,**kwargs):
self.clear()
self.setshift(shift)
if logplot: self.axis.set_yscale('log')
else: self.axis.set_yscale('linear')
self.fftspec = fft(self.spectofft)
self.realfft = self.fftspec.real
self.imagfft = self.fftspec.imag
self.fftplot = self.axis.plot(self.shiftfunc(self.realfft),
drawstyle='steps-mid',color=self.color,**kwargs)
self.refresh()
def psd(self,logplot=True,shift=True,**kwargs):
self.clear()
if logplot: self.axis.set_yscale('log')
else: self.axis.set_yscale('linear')
self.setshift(shift)
self.psdspec = fft(self.spectofft) * fft(self.spectofft[::-1])
self.psdreal = abs(self.psdspec)
self.fftplot = self.axis.plot(self.shiftfunc(self.psdreal),
drawstyle='steps-mid',color=self.color,**kwargs)
if logplot: self.axis.set_yscale('log')
else: self.axis.set_yscale('linear')
self.refresh()
def setshift(self,shift=True):
if shift: self.shiftfunc = fftshift
else: self.shiftfunc = lambda x: x
def setspec(self):
self.spectofft = copy(self.specplotter.spectrum)
OKmask = (self.spectofft==self.spectofft)
self.spectofft[(True-OKmask)] = 0
def clear(self):
if self.fftplot is not None:
for p in self.fftplot:
p.set_visible(False)
if p in self.axis.lines: self.axis.lines.remove(p)
self.axis.clear()
self.refresh()
def refresh(self):
self.axis.figure.canvas.draw()
class PSD(FFT):
def __call__(self,shift=True):
self.setspec()
self.setshift(shift)
self.clear()
self.psd()
self.refresh()
class Baseline:
def __init__(self,specplotter):
self.baselinepars = None
self.order = None
self.basespec = zeros(specplotter.spectrum.shape[0])
self.excludemask = zeros(specplotter.spectrum.shape[0],dtype='bool')
self.OKmask = ones(specplotter.spectrum.shape[0],dtype='bool')
self.specplotter = specplotter
self.blleg = None
self.click = 0
self.nclicks_b1 = 0
self.nclicks_b2 = 0
self.fitregion=[]
self.excludevelo = []
self.excludepix = []
def __call__(self, order=1, annotate=False, excludefit=False, save=True,
exclude=None, exclusionlevel=0.01,
interactive=False, **kwargs):
"""
Fit and remove a polynomial from the spectrum.
It will be saved in the variable "self.basespec"
and the fit parameters will be saved in "self.order"
function baseline(spectrum,xarr=None,xmin=None,xmax=None,order=1,quiet=True,exclude=None):
Subtract a baseline from a spectrum
If xmin,xmax are not specified, defaults to ignoring first and last 10% of spectrum
exclude is a set of start/end indices to ignore when baseline fitting
(ignored by setting error to infinite in fitting procedure)
excludefit creates a mask based on the fitted gaussian model (assuming
that it has a zero-height) using an exclusion level of (exclusionlevel)
* the smallest gaussian peak that was fit
"basespec" is added back to the spectrum before fitting so you can run this
procedure multiple times without losing information
"""
specfit = self.specplotter.specfit
self.order = order
fitp = zeros(self.order+1)
self.spectofit = self.specplotter.spectrum+self.basespec
self.OKmask = (self.spectofit==self.spectofit)
if exclude == 'interactive' or interactive:
self.excludemask[:] = True
self.excludevelo = []
self.excludepix = []
self.click = self.specplotter.axis.figure.canvas.mpl_connect('button_press_event',self.selectregion)
else:
if excludefit and specfit.modelpars is not None:
#vlo = self.specplotter.specfit.modelpars[1] - 2*self.specplotter.specfit.modelpars[2]
#vhi = self.specplotter.specfit.modelpars[1] + 2*self.specplotter.specfit.modelpars[2]
#exclude = [argmin(abs(self.specplotter.vind-vlo)),argmin(abs(self.specplotter.vind-vhi))]
specfit.fullsizemodel() # make sure the spectrum is the right size
self.excludemask = abs(specfit.model) > exclusionlevel*abs(min(specfit.modelpars[0::3]))
else:
self.excludemask[:] = False
self.dofit(exclude=exclude,annotate=annotate,**kwargs)
if save: self.savefit()
def dofit(self, exclude=None, excludeunits='velo', annotate=False,
**kwargs):
"""
Do the baseline fitting and save and plot the results.
Can specify a region to exclude using velocity units or pixel units
"""
if exclude is not None and excludeunits in ['velo','km/s']:
if len(exclude) % 2 == 0:
self.excludevelo = exclude
self.excludepix = []
for vl,vu in zip(exclude[::2],exclude[1::2]):
xl = argmin(abs(self.specplotter.vind-vl))
xu = argmin(abs(self.specplotter.vind-vu))
if xl > xu: xl,xu=xu,xl
self.excludemask[xl:xu] = True
self.excludepix += [xl,xu]
elif excludeunits in ['pix','pixel','chan','channel']:
if len(exclude) % 2 == 0:
self.excludepix = []
for xl,xu in zip(exclude[::2],exclude[1::2]):
if xl > xu: xl,xu=xu,xl
self.excludemask[xl:xu] = True
self.excludepix += [xl,xu]
self.specplotter.spectrum, self.baselinepars = baseline(
self.spectofit,
xarr=self.specplotter.vind,
order=self.order, exclude=None,
mask=(True-self.OKmask)+self.excludemask,
**kwargs)
self.basespec = poly1d(self.baselinepars)(self.specplotter.vind)
if self.specplotter.spectrumplot is not None:
[self.specplotter.axis.lines.remove(p) for p in self.specplotter.spectrumplot]
if self.specplotter.errorplot is not None:
[self.specplotter.axis.collections.remove(p) for p in self.specplotter.errorplot if isinstance(p,matplotlib.collections.PolyCollection)]
[self.specplotter.axis.lines.remove(p) for p in self.specplotter.errorplot if isinstance(p,matplotlib.lines.Line2D)]
self.specplotter.plotspec(**self.specplotter.plotkwargs)
self.specplotter.axis.set_ylim(
abs(self.specplotter.spectrum[self.OKmask].min())*1.1*sign(self.specplotter.spectrum[self.OKmask].min()),
abs(self.specplotter.spectrum[self.OKmask].max())*1.1*sign(self.specplotter.spectrum[self.OKmask].max()))
if annotate: self.annotate() # refreshes automatically
elif self.specplotter.autorefresh: self.specplotter.refresh()
def selectregion(self,event,annotate=False):
"""
select regions for baseline fitting
"""
if event.button == 1:
if self.nclicks_b1 == 0:
self.bx1 = argmin(abs(event.xdata-self.specplotter.vind))
self.excludevelo += [self.specplotter.vind]
self.excludepix += [self.bx1]
self.nclicks_b1 += 1
elif self.nclicks_b1 == 1:
self.bx2 = argmin(abs(event.xdata-self.specplotter.vind))
self.nclicks_b1 -= 1
if self.bx1 > self.bx2: self.bx1,self.bx2 = self.bx2,self.bx1
self.fitregion += self.specplotter.axis.plot(
self.specplotter.vind[self.bx1:self.bx2],
self.specplotter.spectrum[self.bx1:self.bx2]+self.specplotter.offset,
drawstyle='steps-mid',
color='g',alpha=0.5)
self.specplotter.refresh()
self.excludemask[self.bx1:self.bx2] = False
self.excludevelo += [self.specplotter.vind]
self.excludepix += [self.bx2]
if event.button in [2,3]:
disconnect(self.click)
self.dofit(exclude=None,annotate=annotate)
for p in self.fitregion:
p.set_visible(False)
self.specplotter.axis.lines.remove(p)
self.fitregion=[] # I should be able to just remove from the list... but it breaks the loop...
self.specplotter.refresh()
def annotate(self,loc='upper left'):
bltext = "bl: $y=$"+"".join(["$%+6.3gx^{%i}$" % (f,self.order-i)
for i,f in enumerate(self.baselinepars)])
#self.blleg = text(xloc,yloc ,bltext,transform = self.specplotter.axis.transAxes)
self.clearlegend()
pl = matplotlib.collections.CircleCollection([0],edgecolors=['k'])
self.blleg = self.specplotter.axis.legend(
(pl,),
(bltext,),loc=loc,markerscale=0.001,
borderpad=0.1, handlelength=0.1, handletextpad=0.1
)
self.specplotter.axis.add_artist(self.blleg)
if self.specplotter.autorefresh: self.specplotter.refresh()
def clearlegend(self):
if self.blleg is not None:
self.blleg.set_visible(False)
if self.blleg in self.specplotter.axis.artists:
self.specplotter.axis.artists.remove(self.blleg)
if self.specplotter.autorefresh: self.specplotter.refresh()
def savefit(self):
if self.baselinepars is not None:
for ii,p in enumerate(self.baselinepars):
self.specplotter.header.update('BLCOEF%0.2i' % (ii),p,comment="Baseline power-law best-fit coefficient x^%i" % (self.order-ii-1))
class Specfit:
def __init__(self,specplotter):
self.model = None
self.modelpars = None
self.modelerrs = None
self.modelplot = None
self.guessplot = []
self.fitregion = []
self.ngauss = 0
self.nclicks_b1 = 0
self.nclicks_b2 = 0
self.gx1 = 0
self.gx2 = specplotter.spectrum.shape[0]
self.guesses = []
self.click = 0
self.fitkwargs = {}
self.auto = False
self.autoannotate = True
self.specplotter = specplotter
self.gaussleg=None
self.residuals=None
self.setfitspec()
#self.seterrspec()
def __call__(self, interactive=False, usemoments=True, fitcolor='r',
multifit=False, guesses=None, annotate=True, save=True,
**kwargs):
"""
Fit gaussians to a spectrum
guesses = [height,amplitude,center,width]
"""
self.fitcolor = fitcolor
self.clear()
self.ngauss = 0
self.fitkwargs = kwargs
if interactive:
print "Left-click twice to select a fitting range, then middle-click twice to select a peak and width"
self.nclicks_b1 = 0
self.nclicks_b2 = 0
self.guesses = []
self.click = self.specplotter.axis.figure.canvas.mpl_connect('button_press_event',self.makeguess)
self.autoannotate = annotate
elif multifit:
if guesses is None:
print "You must input guesses when using multifit. Also, baseline first!"
else:
self.guesses = guesses
self.multifit()
self.autoannotate = annotate
else:
#print "Non-interactive, 1D fit with automatic guessing"
if self.specplotter.baseline.order is None:
self.specplotter.baseline.order=0
self.onedfit(usemoments=usemoments,annotate=annotate,**kwargs)
else:
self.onedfit(usemoments=usemoments,annotate=annotate,
vheight=False,height=0.0,**kwargs)
if self.specplotter.autorefresh: self.specplotter.refresh()
if save: self.savefit()
def seterrspec(self,usestd=None,useresiduals=True):
if self.specplotter.errspec is not None and not usestd:
self.errspec = self.specplotter.errspec
elif self.residuals is not None and useresiduals:
self.errspec = ones(self.spectofit.shape[0]) * self.residuals.std()
else: self.errspec = ones(self.spectofit.shape[0]) * self.spectofit.std()
def setfitspec(self):
self.spectofit = copy(self.specplotter.spectrum)
OKmask = (self.spectofit==self.spectofit)
self.spectofit[(True-OKmask)] = 0
self.seterrspec()
self.errspec[(True-OKmask)] = 1e10
def multifit(self):
self.ngauss = len(self.guesses)/3
self.setfitspec()
if self.fitkwargs.has_key('negamp'): self.fitkwargs.pop('negamp')
mpp,model,mpperr,chi2 = gaussfitter.multigaussfit(
self.specplotter.vind[self.gx1:self.gx2],
self.spectofit[self.gx1:self.gx2],
err=self.errspec[self.gx1:self.gx2],
ngauss=self.ngauss,
params=self.guesses,
**self.fitkwargs)
self.chi2 = chi2
self.dof = self.gx2-self.gx1-self.ngauss*3
self.model = model
self.modelpars = mpp.tolist()
self.modelerrs = mpperr.tolist()
self.modelplot = self.specplotter.axis.plot(
self.specplotter.vind[self.gx1:self.gx2],
self.model+self.specplotter.offset, color=self.fitcolor, linewidth=0.5)
self.residuals = self.spectofit[self.gx1:self.gx2] - self.model
if self.autoannotate:
self.annotate()
def onedfit(self, usemoments=True, annotate=True, vheight=True, height=0, negamp=None,**kwargs):
self.ngauss = 1
self.auto = True
self.setfitspec()
if usemoments: # this can be done within gaussfit but I want to save them
self.guesses = gaussfitter.onedmoments(
self.specplotter.vind[self.gx1:self.gx2],
self.spectofit[self.gx1:self.gx2],
vheight=vheight,negamp=negamp,**kwargs)
if vheight is False: self.guesses = [height]+self.guesses
else:
if negamp: self.guesses = [height,-1,0,1]
else: self.guesses = [height,1,0,1]
mpp,model,mpperr,chi2 = gaussfitter.onedgaussfit(
self.specplotter.vind[self.gx1:self.gx2],
self.spectofit[self.gx1:self.gx2],
err=self.errspec[self.gx1:self.gx2],
vheight=vheight,
params=self.guesses,
**self.fitkwargs)
self.chi2 = chi2
self.dof = self.gx2-self.gx1-self.ngauss*3-vheight
if vheight:
self.specplotter.baseline.baselinepars = mpp[:1] # first item in list form
self.model = model - mpp[0]
else: self.model = model
self.residuals = self.spectofit[self.gx1:self.gx2] - self.model
self.modelpars = mpp[1:].tolist()
self.modelerrs = mpperr[1:].tolist()
self.modelplot = self.specplotter.axis.plot(
self.specplotter.vind[self.gx1:self.gx2],
self.model+self.specplotter.offset, color=self.fitcolor, linewidth=0.5)
if annotate:
self.annotate()
if vheight: self.specplotter.baseline.annotate()
def fullsizemodel(self):
"""
If the gaussian was fit to a sub-region of the spectrum,
expand it (with zeros) to fill the spectrum. You can
always recover the original by:
origmodel = model[gx1:gx2]
"""
if self.model.shape != self.specplotter.spectrum.shape:
temp = zeros(self.specplotter.spectrum.shape)
temp[self.gx1:self.gx2] = self.model
self.model = temp
self.residuals = self.spectofit - self.model
def plotresiduals(self,fig=None,axis=None,clear=True,**kwargs):
"""
Plot residuals of the fit. Specify a figure or
axis; defaults to figure(2).
kwargs are passed to matplotlib plot
"""
if axis is None:
fig=figure(2)
self.residualaxis = gca()
if clear: self.residualaxis.clear()
else:
self.residualaxis = axis
if clear: self.residualaxis.clear()
self.residualplot = self.residualaxis.plot(self.specplotter.vind[self.gx1:self.gx2],
self.residuals,drawstyle='steps-mid',
linewidth=0.5, color='k', **kwargs)
if self.specplotter.vmin is not None and self.specplotter.vmax is not None:
self.residualaxis.set_xlim(self.specplotter.vmin,self.specplotter.vmax)
self.residualaxis.figure.canvas.draw()
def annotate(self,loc='upper right'):
#text(xloc,yloc ,"c=%g" % self.modelpars[1],transform = self.specplotter.axis.transAxes)
#text(xloc,yloc-0.05,"w=%g" % self.modelpars[2],transform = self.specplotter.axis.transAxes)
#text(xloc,yloc-0.10,"a=%g" % self.modelpars[0],transform = self.specplotter.axis.transAxes)
self.clearlegend()
pl = matplotlib.collections.CircleCollection([0],edgecolors=['k'])
self.gaussleg = self.specplotter.axis.legend(
tuple([pl]*3*self.ngauss),
tuple(flatten(
[("c%i=%6.4g $\\pm$ %6.4g" % (jj,self.modelpars[1+jj*3],self.modelerrs[1+jj*3]),
"w%i=%6.4g $\\pm$ %6.4g" % (jj,self.modelpars[2+jj*3],self.modelerrs[2+jj*3]),
"a%i=%6.4g $\\pm$ %6.4g" % (jj,self.modelpars[0+jj*3],self.modelerrs[0+jj*3]))
for jj in range(self.ngauss)])),
loc=loc,markerscale=0.01,
borderpad=0.1, handlelength=0.1, handletextpad=0.1
)
self.gaussleg.draggable(True)
self.specplotter.axis.add_artist(self.gaussleg)
if self.specplotter.autorefresh: self.specplotter.refresh()
def selectregion(self,event):
if self.nclicks_b1 == 0:
self.gx1 = argmin(abs(event.xdata-self.specplotter.vind))
self.nclicks_b1 += 1
elif self.nclicks_b1 == 1:
self.gx2 = argmin(abs(event.xdata-self.specplotter.vind))
self.nclicks_b1 -= 1
if self.gx1 > self.gx2: self.gx1,self.gx2 = self.gx2,self.gx1
if abs(self.gx1-self.gx2) > 3: # can't fit w/ fewer data than pars
self.fitregion = self.specplotter.axis.plot(
self.specplotter.vind[self.gx1:self.gx2],
self.specplotter.spectrum[self.gx1:self.gx2]+self.specplotter.offset,
drawstyle='steps-mid',
color='c')
if self.guesses == []:
self.guesses = gaussfitter.onedmoments(
self.specplotter.vind[self.gx1:self.gx2],
self.spectofit[self.gx1:self.gx2],
vheight=0)
self.ngauss = 1
self.auto = True
else:
print "Fitting region is too small (channels %i:%i). Try again." % (self.gx1,self.gx2)
def guesspeakwidth(self,event):
if self.nclicks_b2 % 2 == 0:
if self.auto:
self.guesses[:2] = [event.ydata,event.xdata]
else:
self.guesses += [event.ydata,event.xdata,1]
self.ngauss += 1
self.nclicks_b2 += 1
self.guessplot += [self.specplotter.axis.scatter(event.xdata,event.ydata,marker='x',c='r')]
elif self.nclicks_b2 % 2 == 1:
self.guesses[-1] = abs(event.xdata-self.guesses[-2])
self.nclicks_b2 += 1
self.guessplot += self.specplotter.axis.plot([event.xdata,
2*self.guesses[-2]-event.xdata],[event.ydata]*2,
color='r')
if self.auto:
self.auto = False
if self.nclicks_b2 / 2 > self.ngauss:
print "There have been %i middle-clicks but there are only %i gaussians" % (self.nclicks_b2,self.ngauss)
self.ngauss += 1
def clear(self,legend=True):
if self.modelplot is not None:
for p in self.modelplot:
p.set_visible(False)
if legend: self.clearlegend()
def makeguess(self,event):
if event.button == 1:
self.selectregion(event)
elif event.button == 2:
self.guesspeakwidth(event)
elif event.button == 3:
disconnect(self.click)
if self.ngauss > 0:
print len(self.guesses)/3," Guesses: ",self.guesses," X channel range: ",self.gx1,self.gx2
if len(self.guesses) % 3 == 0:
self.multifit()
for p in self.guessplot + self.fitregion:
p.set_visible(False)
else:
print "error, wrong # of pars"
if self.specplotter.autorefresh: self.specplotter.refresh()
def clearlegend(self):
if self.gaussleg is not None:
self.gaussleg.set_visible(False)
if self.gaussleg in self.specplotter.axis.artists:
self.specplotter.axis.artists.remove(self.gaussleg)
if self.specplotter.autorefresh: self.specplotter.refresh()
def savefit(self):
if self.modelpars is not None:
for ii,p in enumerate(self.modelpars):
if ii % 3 == 0: self.specplotter.header.update('AMP%1i' % (ii/3),p,comment="Gaussian best fit amplitude #%i" % (ii/3))
if ii % 3 == 1: self.specplotter.header.update('CEN%1i' % (ii/3),p,comment="Gaussian best fit center #%i" % (ii/3))
if ii % 3 == 2: self.specplotter.header.update('WID%1i' % (ii/3),p,comment="Gaussian best fit width #%i" % (ii/3))
def mapplot(plane,cube,vconv=lambda x: x,xtora=lambda x: x,ytodec=lambda x: x, gaiafignum=0, specfignum=1):
gaiafig = figure(gaiafignum)
gaiafig.clf()
gaiaax = gaiafig.add_subplot(111)
gaiaax.imshow(plane)
sp = SpecPlotter(cube, vconv=vconv, xtora=xtora, ytodec=ytodec,
gaiafignum=gaiafignum, fignum=specfignum, gaiafig=gaiafig)
sp.clickid = gaiafig.canvas.mpl_connect('button_press_event',sp)
#connect('button_press_event',sp)
def splat_3d(filename,xi=0,yi=0,vmin=None,vmax=None,button=1,dobaseline=False,exclude=None,
smooth=None,smoothto=None,smoothtype='gaussian',order=1,savepre=None,**kwargs):
"""
Inputs:
vmin,vmax - range over which to baseline and plottransform = ax.transAxes
exclude - (internal) range to exclude from baseline fit
"""
dv,v0,p3,hdr,cube,xtora,ytodec,vconv,xunits,conversion_factor,units = open_3d(filename)
if units is None: units="UNITS"
if xunits is None: xunits="km/s"
if conversion_factor == 0 or conversion_factor is None: conversion_factor=1.0
sp = splat_1d(vpars=[dv, v0, p3], hdr=hdr, spec=cube[:, yi, xi],
xtora=xtora, ytodec=ytodec, vconv=vconv, units=units,
conversion_factor=conversion_factor, xunits=xunits, **kwargs)
sp.cube = cube
return sp
def gaia(filename,estimator='max',axis=0):
f = pyfits.open(filename)
hdr = f[0].header
cube = f[0].data
dv,v0,p3 = hdr.get('CD3_3'),hdr.get('CRVAL3'),hdr.get('CRPIX3')
dr,r0,p1 = hdr.get('CD1_1'),hdr.get('CRVAL1'),hdr.get('CRPIX1')
dd,d0,p2 = hdr.get('CD2_2'),hdr.get('CRVAL2'),hdr.get('CRPIX2')
if dv is None: dv = hdr.get('CDELT3')
if dr is None: dr = hdr.get('CDELT1')
if dd is None: dd = hdr.get('CDELT2')
xtora = lambda x: (x-p1+1)*dr+r0 # convert pixel coordinates to RA/Dec/Velocity
ytodec = lambda y: (y-p2+1)*dd+d0
vconv = lambda v: (v-p3+1)*dv+v0
if axis > 0:
cube = cube.swapaxes(0,axis)
if estimator == 'max':
p = where(isnan(cube),0,cube).max(axis=0)
elif estimator == 'int':
p = where(isnan(cube),0,cube).sum(axis=0) * dv
elif estimator == 'intdivmax':
cut = MAD(cube.ravel()) + nanmedian(cube.ravel())
if cut < 0:
cut = 0
m = where(isnan(cube),0,cube).max(axis=0)
i = where(isnan(cube),0,cube).sum(axis=0) * dv
p = where(i<0,0,i)/where(m<=cut,numpy.inf,m)
elif estimator[-5:] == ".fits":
p = pyfits.open(estimator)[0].data
mapplot(p,cube,vconv,xtora,ytodec)
def baseline_file(filename,outfilename,vmin=None,vmax=None,order=1,crop=False):
f = pyfits.open(filename)
hdr = f[0].header
cube = f[0].data.squeeze()
dv,v0,p3 = hdr.get('CD3_3'),hdr.get('CRVAL3'),hdr.get('CRPIX3')
dr,r0,p1 = hdr.get('CD1_1'),hdr.get('CRVAL1'),hdr.get('CRPIX1')
dd,d0,p2 = hdr.get('CD2_2'),hdr.get('CRVAL2'),hdr.get('CRPIX2')
if dv is None: dv = hdr.get('CDELT3')
if dr is None: dr = hdr.get('CDELT1')
if dd is None: dd = hdr.get('CDELT2')
vconv = lambda v: (v-p3+1)*dv+v0
varr = vconv(arange(cube.shape[-1]))
if vmin is None: argvmin = None
else: argvmin = argmin(abs(varr-vmin))
if vmax is None: argvmax = None
else: argvmax = argmin(abs(varr-vmax))
bspec,bfit = baseline(cube,vmin=argvmin,vmax=argvmax,order=order)
def baseline(spectrum,xarr=None,xmin='default',xmax='default',order=1,quiet=True,exclude=None,
mask=None):
"""
Subtract a baseline from a spectrum
If xmin,xmax are not specified, defaults to ignoring first and last 10% of spectrum
*unless* order > 1, in which case ignoring the ends tends to cause strange effects
exclude is a set of start/end indices to ignore when baseline fitting
(ignored by setting error to infinite in fitting procedure)
"""
if xmin == 'default':
if order <= 1: xmin = floor( spectrum.shape[-1]*0.1 )
else: xmin = 0
elif xmin is None:
xmin = 0
if xmax == 'default':
if order <= 1: xmax = ceil( spectrum.shape[-1]*0.9 )
else: xmax = spectrum.shape[-1]
elif xmax is None:
xmax = spectrum.shape[-1]
pguess = [1]*(order+1)
if xarr is None:
xarr = indices(spectrum.shape).squeeze()
subxarr = xarr[xmin:xmax]
def mpfitfun(data,err):
def f(p,fjac=None): return [0,numpy.ravel((poly1d(p)(subxarr)-data)/err)]
return f
err = ones(spectrum.shape)
if exclude is not None:
err[exclude[0]:exclude[1]] = 1e10
if mask is not None:
if mask.dtype.name != 'bool': mask = mask.astype('bool')
err[mask] = 1e10
spectrum[mask] = 0
if (spectrum!=spectrum).sum() > 0:
print "There is an error in baseline: some values are NaN"
import pdb; pdb.set_trace()
mp = mpfit(mpfitfun(spectrum[xmin:xmax],err[xmin:xmax]),xall=pguess,quiet=quiet)
fitp = mp.params
bestfit = poly1d(fitp)(xarr).squeeze()
return (spectrum-bestfit),fitp
def open_3d(filename):
f = pyfits.open(filename)
hdr = f[0].header
cube = f[0].data
if len(cube.shape) == 4: cube=cube[0,:,:,:]
#cube = reshape(cube.mean(axis=2).mean(axis=1),[cube.shape[0],1,1])
dv,v0,p3 = hdr.get('CD3_3'),hdr.get('CRVAL3'),hdr.get('CRPIX3')
dr,r0,p1 = hdr.get('CD1_1'),hdr.get('CRVAL1'),hdr.get('CRPIX1')
dd,d0,p2 = hdr.get('CD2_2'),hdr.get('CRVAL2'),hdr.get('CRPIX2')
if dv is None: dv = hdr.get('CDELT3')
if dr is None: dr = hdr.get('CDELT1')
if dd is None: dd = hdr.get('CDELT2')
xtora = lambda x: (x-p1+1)*dr+r0 # convert pixel coordinates to RA/Dec/Velocity
ytodec = lambda y: (y-p2+1)*dd+d0
vconv = lambda v: (v-p3+1)*dv+v0
if hdr.get('CUNIT3') in ['m/s','M/S']:
conversion_factor = 1000.0
xunits = 'km/s' # change to km/s because you're converting units
else:
xunits = hdr.get('CUNIT3')
if xunits in ("hz","Hz"):
print "Converting from Hz to GHz"
xunits = "GHz"
conversion_factor = 1.0e9
else:
conversion_factor = 1.0
units = hdr.get('BUNIT')
return dv,v0,p3,hdr,cube,xtora,ytodec,vconv,xunits,conversion_factor,units
def open_1d(filename,specnum=0,wcstype='',errspecnum=None,maskspecnum=None):
"""
Grabs all the relevant pieces of a 1d spectrum for plotting
wcstype is the suffix on the WCS type to get to velocity/frequency/whatever
"""
f = pyfits.open(filename)
hdr = f[0].header
spec = f[0].data
errspec = None
maskspec = None
if hdr.get('NAXIS') == 2:
if errspecnum is not None:
errspec = spec[errspecnum,:]
if maskspecnum is not None:
maskspec = spec[maskspecnum,:]
if isinstance(specnum,list):
spec = spec[specnum,:].mean(axis=0)
elif isinstance(specnum,int):
spec = spec[specnum,:]
else:
raise TypeError("Specnum is of wrong type (not a list of integers or an integer). Type: %s" %
str(type(specnum)))
elif hdr.get('NAXIS') > 2:
raise ValueError("Too many axes for open_1d (splat_1d) - use cube instead")
if hdr.get('CD1_1'+wcstype):
dv,v0,p3 = hdr['CD1_1'+wcstype],hdr['CRVAL1'+wcstype],hdr['CRPIX1'+wcstype]
else:
dv,v0,p3 = hdr['CDELT1'+wcstype],hdr['CRVAL1'+wcstype],hdr['CRPIX1'+wcstype]
if hdr.get('OBJECT'):
specname = hdr.get('OBJECT')
elif hdr.get('GLON') and hdr.get('GLAT'):
specname = "%s %s" % (hdr.get('GLON'),hdr.get('GLAT'))
else:
specname = filename.rstrip(".fits")
if hdr.get('CUNIT1'+wcstype) in ['m/s','M/S']:
conversion_factor = 1000.0
xunits = 'km/s' # change to km/s because you're converting units
else:
xunits = hdr.get('CUNIT1'+wcstype)
if xunits in ("hz","Hz"):
print "Converting from Hz to GHz"
xunits = "GHz"
conversion_factor = 1.0e9
else:
conversion_factor = 1.0
vconv = lambda v: ((v-p3+1)*dv+v0)/conversion_factor
xtora=None
ytodec=None
units = hdr.get('BUNIT').strip()
if hdr.get('CTYPE1'+wcstype):
xtype = hdr.get('CTYPE1'+wcstype)
else:
xtype = 'VLSR'
if hdr.get('REFFREQ'+wcstype):
reffreq = hdr.get('REFFREQ'+wcstype)
else:
reffreq = None
return dv,v0,p3,conversion_factor,hdr,spec,vconv,xtora,ytodec,specname,units,xunits,errspec,maskspec,reffreq
def splat_1d(filename=None,vmin=None,vmax=None,button=1,dobaseline=False,
exclude=None,smooth=None,order=1,savepre=None,vcrop=True,
vconv=None,vpars=None,hdr=None,spec=None,xtora=None,ytodec=None,
specname=None,quiet=True,specnum=0,errspecnum=None,wcstype='',
offset=0.0, continuum=0.0, annotatebaseline=False, plotspectrum=True,
smoothto=None, xunits=None, units=None, conversion_factor=None,
smoothtype='gaussian',convmode='valid',maskspecnum=None,**kwargs):
"""
Wrapper for specplotter creation. Works nicely with 1D spectra with well-defined
FITS headers (i.e., CRVAL1, CRPIX1, CDELT1, and optionally CUNIT1 and CTYPE1)
This documentation needs to be updated a lot... I implemented a lot of features
without documenting them, which was a mistake
Inputs:
vmin,vmax - range over which to baseline and plot
exclude - (internal) range to exclude from baseline fit
vcrop - will vmin/vmax crop out data, or just set the plot limits?
"""
if (vpars and vconv and hdr and spec is not None and xtora and ytodec
and units and xunits and conversion_factor):
dv,v0,p3 = vpars
errspec = None
maskspec = None
reffreq = None
if units is None and kwargs.has_key('units'): units = kwargs.pop('units')
else:
dv,v0,p3,conversion_factor,hdr,spec,vconv,xtora,ytodec,specname_file,units,xunits,errspec,maskspec,reffreq = \
open_1d(filename,specnum=specnum,wcstype=wcstype,errspecnum=errspecnum,maskspecnum=maskspecnum)
if specname is None: specname=specname_file
if units is None and kwargs.has_key('units'): units = kwargs.pop('units')
if type(continuum)==type('str'):
if hdr.get(continuum) is not None:
continuum = hdr.get(continuum)
else:
raise ValueError("Continuum specified but none present.")
varr = vconv(arange(spec.shape[0]))
if vmin is None or vcrop==False: argvmin = 0
else:
argvmin = argmin(abs(varr-vmin))
if dv > 0:
hdr.update('CRPIX1'+wcstype,p3-argvmin)
if vmax is None or vcrop==False: argvmax = spec.shape[0]
else:
argvmax = argmin(abs(varr-vmax))
if dv < 0:
hdr.update('CRPIX1'+wcstype,p3-argvmax)
if argvmin > argvmax:
argvmin,argvmax = argvmax,argvmin
#if exclude is not None: exclude = exclude[::-1]
elif argvmin == argvmax:
raise Exception("Error: no data in velocity range %g:%g for source %s."
% (vmin,vmax,filename))
# these lines were meant to automatically put "exclude" into velocity
# units; this is now done in the baseline code
#if exclude is not None:
# exclude[0] = argmin(abs(varr-exclude[0]))
# exclude[1] = argmin(abs(varr-exclude[1]))
# exclude = array(exclude) - argvmin
vconv = lambda v: ((v-p3+argvmin+1)*dv+v0) / conversion_factor
ivconv = lambda V: p3-1-argvmin+(V*conversion_factor-v0)/dv
specplot = spec[argvmin:argvmax]
if errspec is not None: errspec=errspec[argvmin:argvmax]
if maskspec is not None: maskspec=maskspec[argvmin:argvmax]
if smoothto:
smooth = abs(smoothto/dv)
if smooth:
roundsmooth = round(smooth) # can only downsample by integers
# change fitter first
if smoothtype == 'hanning':
specplot = convolve(specplot,hanning(2+roundsmooth)/hanning(2+roundsmooth).sum(),convmode)[::roundsmooth]
kernsize = smooth
ones_sameshape = zeros(smooth+2)
ones_sameshape[1:-1] = 1
elif smoothtype == 'boxcar':
specplot = convolve(specplot,ones(roundsmooth)/float(roundsmooth),convmode)[::roundsmooth]
kernsize = roundsmooth
ones_sameshape = ones(roundsmooth)
elif smoothtype == 'gaussian':
speclen = specplot.shape[0]
xkern = linspace(-1*smooth,smooth,smooth*3)
kernel = exp(-xkern**2/(2*(smooth/sqrt(8*log(2)))**2))
kernel /= kernel.sum()
kernsize = len(kernel)
specplot = convolve(specplot,kernel,convmode)[::roundsmooth]
ones_sameshape = zeros(roundsmooth*3)
ones_sameshape[roundsmooth:-roundsmooth] = 1
if errspec is not None:
errspec = sqrt(convolve(errspec**2,ones_sameshape,convmode)[::roundsmooth]) / float(roundsmooth)
if maskspec is not None:
maskspec = array(convolve(maskspec,ones_sameshape,convmode)[::roundsmooth],dtype='bool')
if maskspec.shape != specplot.shape: import pdb; pdb.set_trace()
# this bit of code may also make sense, but I'm shifting the center pixel instead
# b/c it's easier (?) to deal with velocity range
#v0 += (abs(dv)*smooth - abs(dv))/2.0 # pixel center moves by half the original pixel size
dv *= roundsmooth
if convmode == 'same':
newrefpix = (p3-argvmin)/roundsmooth
elif convmode == 'full':
newrefpix = (p3-0.5-argvmin+kernsize/2.0)/roundsmooth
elif convmode == 'valid':
newrefpix = (p3-0.5-argvmin-kernsize/2.0)/roundsmooth
# this was resolved by advanced guess-and check
# but also, sort of makes sense: FITS refers to the *center* of a pixel. You want to
# shift 1/2 pixel to the right so that the first pixel goes from 0 to 1
vconv = lambda v: ((v-newrefpix)*dv+v0)/conversion_factor
ivconv = lambda V: newrefpix+(V*conversion_factor-v0)/dv
hdr.update('CRPIX1'+wcstype,newrefpix+1)
hdr.update('CDELT1'+wcstype,dv)
sp = SpecPlotter(specplot, vconv=vconv, xtora=xtora, ytodec=ytodec,
specname=specname, dv=dv/conversion_factor, hdr=hdr, reffreq=reffreq,
errspec=errspec, maskspec=maskspec, xunits=xunits, **kwargs)
if plotspectrum:
sp.plotspec(button=button, cube=False, vmin=vmin, vmax=vmax,
units=units, offset=offset, continuum=continuum,
**kwargs)
if dobaseline:
sp.baseline(exclude=exclude,order=order,quiet=quiet,annotate=annotatebaseline)
if plotspectrum: sp.refresh()
if hdr.get('GLON') and hdr.get('GLAT'):
sp.glon = hdr.get('GLON')
sp.glat = hdr.get('GLAT')
if savepre is not None:
glon,glat = sp.glon,sp.glat
if glat < 0: pm=""
else: pm = "+"
savename = savepre + "G%07.3f%0s%07.3f_" % (glon,pm,glat) + hdr.get('MOLECULE').replace(' ','') + hdr.get('TRANSITI').replace(' ','')
savefig(savename+'.png')
return sp
def splat_tspec(filename,specnum=0,**kwargs):
"""
Same as splat_1d for tspec data
"""
tdata = pyfits.getdata(filename)
theader = pyfits.getheader(filename)
if len(tdata.shape) == 3:
tdata = tdata[specnum,:,:]
wavelength = tdata[0,:]
spectrum = tdata[1,:]
error = tdata[2,:]
vconv = lambda x: wavelength[x]
ivconv = lambda x: argmin(abs(wavelength-x))
specname='TSPEC'
dv = median(wavelength[1:] - wavelength[:-1])
sp = SpecPlotter(spectrum,vconv=vconv,specname=specname,dv=dv,hdr=theader)
sp.plotspec(cube=False,units=theader.get('YUNITS'),xunits=theader.get('XUNITS'),**kwargs)
return sp
| mit |
MycChiu/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/tensorflow_dataframe_test.py | 7 | 12865 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for learn.dataframe.tensorflow_dataframe."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import math
import tempfile
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe import tensorflow_dataframe as df
from tensorflow.contrib.learn.python.learn.dataframe.transforms import densify
from tensorflow.core.example import example_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def _assert_df_equals_dict(expected_df, actual_dict):
for col in expected_df:
if expected_df[col].dtype in [np.float32, np.float64]:
assertion = np.testing.assert_allclose
else:
assertion = np.testing.assert_array_equal
if expected_df[col].dtype.kind in ["O", "S", "U"]:
# Python 2/3 compatibility
# TensorFlow always returns bytes, so we just convert the unicode
# expectations to bytes also before comparing.
expected_values = [x.encode("utf-8") for x in expected_df[col].values]
else:
expected_values = expected_df[col].values
assertion(
expected_values,
actual_dict[col],
err_msg="Expected {} in column '{}'; got {}.".format(expected_values,
col,
actual_dict[col]))
def _make_test_csv():
f = tempfile.NamedTemporaryFile(
dir=test.get_temp_dir(), delete=False, mode="w")
w = csv.writer(f)
w.writerow(["int", "float", "bool", "string"])
for _ in range(100):
intvalue = np.random.randint(-10, 10)
floatvalue = np.random.rand()
boolvalue = int(np.random.rand() > 0.3)
stringvalue = "S: %.4f" % np.random.rand()
row = [intvalue, floatvalue, boolvalue, stringvalue]
w.writerow(row)
f.close()
return f.name
def _make_test_csv_sparse():
f = tempfile.NamedTemporaryFile(
dir=test.get_temp_dir(), delete=False, mode="w")
w = csv.writer(f)
w.writerow(["int", "float", "bool", "string"])
for _ in range(100):
# leave columns empty; these will be read as default value (e.g. 0 or NaN)
intvalue = np.random.randint(-10, 10) if np.random.rand() > 0.5 else ""
floatvalue = np.random.rand() if np.random.rand() > 0.5 else ""
boolvalue = int(np.random.rand() > 0.3) if np.random.rand() > 0.5 else ""
stringvalue = (("S: %.4f" % np.random.rand()) if np.random.rand() > 0.5 else
"")
row = [intvalue, floatvalue, boolvalue, stringvalue]
w.writerow(row)
f.close()
return f.name
def _make_test_tfrecord():
f = tempfile.NamedTemporaryFile(dir=test.get_temp_dir(), delete=False)
w = tf_record.TFRecordWriter(f.name)
for i in range(100):
ex = example_pb2.Example()
ex.features.feature["var_len_int"].int64_list.value.extend(range((i % 3)))
ex.features.feature["fixed_len_float"].float_list.value.extend(
[float(i), 2 * float(i)])
w.write(ex.SerializeToString())
return f.name
class TensorFlowDataFrameTestCase(test.TestCase):
"""Tests for `TensorFlowDataFrame`."""
def _assert_pandas_equals_tensorflow(self, pandas_df, tensorflow_df,
num_batches, batch_size):
self.assertItemsEqual(
list(pandas_df.columns) + ["index"], tensorflow_df.columns())
for batch_num, batch in enumerate(tensorflow_df.run(num_batches)):
row_numbers = [
total_row_num % pandas_df.shape[0]
for total_row_num in range(batch_size * batch_num, batch_size * (
batch_num + 1))
]
expected_df = pandas_df.iloc[row_numbers]
_assert_df_equals_dict(expected_df, batch)
def testInitFromPandas(self):
"""Test construction from Pandas DataFrame."""
if not HAS_PANDAS:
return
pandas_df = pd.DataFrame({"sparrow": range(10), "ostrich": 1})
tensorflow_df = df.TensorFlowDataFrame.from_pandas(
pandas_df, batch_size=10, shuffle=False)
batch = tensorflow_df.run_one_batch()
np.testing.assert_array_equal(pandas_df.index.values, batch["index"],
"Expected index {}; got {}".format(
pandas_df.index.values, batch["index"]))
_assert_df_equals_dict(pandas_df, batch)
def testBatch(self):
"""Tests `batch` method.
`DataFrame.batch()` should iterate through the rows of the
`pandas.DataFrame`, and should "wrap around" when it reaches the last row.
"""
if not HAS_PANDAS:
return
pandas_df = pd.DataFrame({
"albatross": range(10),
"bluejay": 1,
"cockatoo": range(0, 20, 2),
"penguin": list("abcdefghij")
})
tensorflow_df = df.TensorFlowDataFrame.from_pandas(pandas_df, shuffle=False)
# Rebatch `df` into the following sizes successively.
batch_sizes = [4, 7]
num_batches = 3
final_batch_size = batch_sizes[-1]
for batch_size in batch_sizes:
tensorflow_df = tensorflow_df.batch(batch_size, shuffle=False)
self._assert_pandas_equals_tensorflow(
pandas_df,
tensorflow_df,
num_batches=num_batches,
batch_size=final_batch_size)
def testFromNumpy(self):
x = np.eye(20)
tensorflow_df = df.TensorFlowDataFrame.from_numpy(x, batch_size=10)
for batch in tensorflow_df.run(30):
for ind, val in zip(batch["index"], batch["value"]):
expected_val = np.zeros_like(val)
expected_val[ind] = 1
np.testing.assert_array_equal(expected_val, val)
def testFromCSV(self):
if not HAS_PANDAS:
return
num_batches = 100
batch_size = 8
enqueue_size = 7
data_path = _make_test_csv()
default_values = [0, 0.0, 0, ""]
pandas_df = pd.read_csv(data_path)
tensorflow_df = df.TensorFlowDataFrame.from_csv(
[data_path],
enqueue_size=enqueue_size,
batch_size=batch_size,
shuffle=False,
default_values=default_values)
self._assert_pandas_equals_tensorflow(
pandas_df,
tensorflow_df,
num_batches=num_batches,
batch_size=batch_size)
def testFromCSVLimitEpoch(self):
batch_size = 8
num_epochs = 17
expected_num_batches = (num_epochs * 100) // batch_size
data_path = _make_test_csv()
default_values = [0, 0.0, 0, ""]
tensorflow_df = df.TensorFlowDataFrame.from_csv(
[data_path],
batch_size=batch_size,
shuffle=False,
default_values=default_values)
result_batches = list(tensorflow_df.run(num_epochs=num_epochs))
actual_num_batches = len(result_batches)
self.assertEqual(expected_num_batches, actual_num_batches)
# TODO(soergel): figure out how to dequeue the final small batch
expected_rows = 1696 # num_epochs * 100
actual_rows = sum([len(x["int"]) for x in result_batches])
self.assertEqual(expected_rows, actual_rows)
def testFromCSVWithFeatureSpec(self):
if not HAS_PANDAS:
return
num_batches = 100
batch_size = 8
data_path = _make_test_csv_sparse()
feature_spec = {
"int": parsing_ops.FixedLenFeature(None, dtypes.int16, np.nan),
"float": parsing_ops.VarLenFeature(dtypes.float16),
"bool": parsing_ops.VarLenFeature(dtypes.bool),
"string": parsing_ops.FixedLenFeature(None, dtypes.string, "")
}
pandas_df = pd.read_csv(data_path, dtype={"string": object})
# Pandas insanely uses NaN for empty cells in a string column.
# And, we can't use Pandas replace() to fix them because nan != nan
s = pandas_df["string"]
for i in range(0, len(s)):
if isinstance(s[i], float) and math.isnan(s[i]):
pandas_df.set_value(i, "string", "")
tensorflow_df = df.TensorFlowDataFrame.from_csv_with_feature_spec(
[data_path],
batch_size=batch_size,
shuffle=False,
feature_spec=feature_spec)
# These columns were sparse; re-densify them for comparison
tensorflow_df["float"] = densify.Densify(np.nan)(tensorflow_df["float"])
tensorflow_df["bool"] = densify.Densify(np.nan)(tensorflow_df["bool"])
self._assert_pandas_equals_tensorflow(
pandas_df,
tensorflow_df,
num_batches=num_batches,
batch_size=batch_size)
def testFromExamples(self):
num_batches = 77
enqueue_size = 11
batch_size = 13
data_path = _make_test_tfrecord()
features = {
"fixed_len_float":
parsing_ops.FixedLenFeature(
shape=[2], dtype=dtypes.float32, default_value=[0.0, 0.0]),
"var_len_int":
parsing_ops.VarLenFeature(dtype=dtypes.int64)
}
tensorflow_df = df.TensorFlowDataFrame.from_examples(
data_path,
enqueue_size=enqueue_size,
batch_size=batch_size,
features=features,
shuffle=False)
# `test.tfrecord` contains 100 records with two features: var_len_int and
# fixed_len_float. Entry n contains `range(n % 3)` and
# `float(n)` for var_len_int and fixed_len_float,
# respectively.
num_records = 100
def _expected_fixed_len_float(n):
return np.array([float(n), 2 * float(n)])
def _expected_var_len_int(n):
return np.arange(n % 3)
for batch_num, batch in enumerate(tensorflow_df.run(num_batches)):
record_numbers = [
n % num_records
for n in range(batch_num * batch_size, (batch_num + 1) * batch_size)
]
for i, j in enumerate(record_numbers):
np.testing.assert_allclose(
_expected_fixed_len_float(j), batch["fixed_len_float"][i])
var_len_int = batch["var_len_int"]
for i, ind in enumerate(var_len_int.indices):
val = var_len_int.values[i]
expected_row = _expected_var_len_int(record_numbers[ind[0]])
expected_value = expected_row[ind[1]]
np.testing.assert_array_equal(expected_value, val)
def testSplitString(self):
batch_size = 8
num_epochs = 17
expected_num_batches = (num_epochs * 100) // batch_size
data_path = _make_test_csv()
default_values = [0, 0.0, 0, ""]
tensorflow_df = df.TensorFlowDataFrame.from_csv(
[data_path],
batch_size=batch_size,
shuffle=False,
default_values=default_values)
a, b = tensorflow_df.split("string", 0.7) # no rebatching
total_result_batches = list(tensorflow_df.run(num_epochs=num_epochs))
a_result_batches = list(a.run(num_epochs=num_epochs))
b_result_batches = list(b.run(num_epochs=num_epochs))
self.assertEqual(expected_num_batches, len(total_result_batches))
self.assertEqual(expected_num_batches, len(a_result_batches))
self.assertEqual(expected_num_batches, len(b_result_batches))
total_rows = sum([len(x["int"]) for x in total_result_batches])
a_total_rows = sum([len(x["int"]) for x in a_result_batches])
b_total_rows = sum([len(x["int"]) for x in b_result_batches])
print("Split rows: %s => %s, %s" % (total_rows, a_total_rows, b_total_rows))
# TODO(soergel): figure out how to dequeue the final small batch
expected_total_rows = 1696 # (num_epochs * 100)
self.assertEqual(expected_total_rows, total_rows)
self.assertEqual(1087, a_total_rows) # stochastic but deterministic
# self.assertEqual(int(total_rows * 0.7), a_total_rows)
self.assertEqual(609, b_total_rows) # stochastic but deterministic
# self.assertEqual(int(total_rows * 0.3), b_total_rows)
# The strings used for hashing were all unique in the original data, but
# we ran 17 epochs, so each one should appear 17 times. Each copy should
# be hashed into the same partition, so there should be no overlap of the
# keys.
a_strings = set([s for x in a_result_batches for s in x["string"]])
b_strings = set([s for x in b_result_batches for s in x["string"]])
self.assertEqual(frozenset(), a_strings & b_strings)
if __name__ == "__main__":
test.main()
| apache-2.0 |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/core/strings.py | 1 | 105891 | import codecs
from functools import wraps
import re
import textwrap
from typing import Dict, List
import warnings
import numpy as np
import pandas._libs.lib as lib
import pandas._libs.ops as libops
from pandas.util._decorators import Appender, deprecate_kwarg
from pandas.core.dtypes.common import (
ensure_object,
is_bool_dtype,
is_categorical_dtype,
is_integer,
is_list_like,
is_re,
is_scalar,
is_string_like,
)
from pandas.core.dtypes.generic import ABCIndexClass, ABCMultiIndex, ABCSeries
from pandas.core.dtypes.missing import isna
from pandas.core.algorithms import take_1d
from pandas.core.base import NoNewAttributesMixin
import pandas.core.common as com
_cpython_optimized_encoders = (
"utf-8",
"utf8",
"latin-1",
"latin1",
"iso-8859-1",
"mbcs",
"ascii",
)
_cpython_optimized_decoders = _cpython_optimized_encoders + ("utf-16", "utf-32")
_shared_docs = dict() # type: Dict[str, str]
def cat_core(list_of_columns: List, sep: str):
"""
Auxiliary function for :meth:`str.cat`
Parameters
----------
list_of_columns : list of numpy arrays
List of arrays to be concatenated with sep;
these arrays may not contain NaNs!
sep : string
The separator string for concatenating the columns
Returns
-------
nd.array
The concatenation of list_of_columns with sep
"""
list_with_sep = [sep] * (2 * len(list_of_columns) - 1)
list_with_sep[::2] = list_of_columns
return np.sum(list_with_sep, axis=0)
def cat_safe(list_of_columns: List, sep: str):
"""
Auxiliary function for :meth:`str.cat`.
Same signature as cat_core, but handles TypeErrors in concatenation, which
happen if the arrays in list_of columns have the wrong dtypes or content.
Parameters
----------
list_of_columns : list of numpy arrays
List of arrays to be concatenated with sep;
these arrays may not contain NaNs!
sep : string
The separator string for concatenating the columns
Returns
-------
nd.array
The concatenation of list_of_columns with sep
"""
try:
result = cat_core(list_of_columns, sep)
except TypeError:
# if there are any non-string values (wrong dtype or hidden behind
# object dtype), np.sum will fail; catch and return with better message
for column in list_of_columns:
dtype = lib.infer_dtype(column, skipna=True)
if dtype not in ["string", "empty"]:
raise TypeError(
"Concatenation requires list-likes containing only "
"strings (or missing values). Offending values found in "
"column {}".format(dtype)
) from None
return result
def _na_map(f, arr, na_result=np.nan, dtype=object):
# should really _check_ for NA
return _map(f, arr, na_mask=True, na_value=na_result, dtype=dtype)
def _map(f, arr, na_mask=False, na_value=np.nan, dtype=object):
if not len(arr):
return np.ndarray(0, dtype=dtype)
if isinstance(arr, ABCSeries):
arr = arr.values
if not isinstance(arr, np.ndarray):
arr = np.asarray(arr, dtype=object)
if na_mask:
mask = isna(arr)
try:
convert = not all(mask)
result = lib.map_infer_mask(arr, f, mask.view(np.uint8), convert)
except (TypeError, AttributeError) as e:
# Reraise the exception if callable `f` got wrong number of args.
# The user may want to be warned by this, instead of getting NaN
p_err = (
r"((takes)|(missing)) (?(2)from \d+ to )?\d+ "
r"(?(3)required )positional arguments?"
)
if len(e.args) >= 1 and re.search(p_err, e.args[0]):
raise e
def g(x):
try:
return f(x)
except (TypeError, AttributeError):
return na_value
return _map(g, arr, dtype=dtype)
if na_value is not np.nan:
np.putmask(result, mask, na_value)
if result.dtype == object:
result = lib.maybe_convert_objects(result)
return result
else:
return lib.map_infer(arr, f)
def str_count(arr, pat, flags=0):
"""
Count occurrences of pattern in each string of the Series/Index.
This function is used to count the number of times a particular regex
pattern is repeated in each of the string elements of the
:class:`~pandas.Series`.
Parameters
----------
pat : str
Valid regular expression.
flags : int, default 0, meaning no flags
Flags for the `re` module. For a complete list, `see here
<https://docs.python.org/3/howto/regex.html#compilation-flags>`_.
**kwargs
For compatibility with other string methods. Not used.
Returns
-------
Series or Index
Same type as the calling object containing the integer counts.
See Also
--------
re : Standard library module for regular expressions.
str.count : Standard library version, without regular expression support.
Notes
-----
Some characters need to be escaped when passing in `pat`.
eg. ``'$'`` has a special meaning in regex and must be escaped when
finding this literal character.
Examples
--------
>>> s = pd.Series(['A', 'B', 'Aaba', 'Baca', np.nan, 'CABA', 'cat'])
>>> s.str.count('a')
0 0.0
1 0.0
2 2.0
3 2.0
4 NaN
5 0.0
6 1.0
dtype: float64
Escape ``'$'`` to find the literal dollar sign.
>>> s = pd.Series(['$', 'B', 'Aab$', '$$ca', 'C$B$', 'cat'])
>>> s.str.count('\\$')
0 1
1 0
2 1
3 2
4 2
5 0
dtype: int64
This is also available on Index
>>> pd.Index(['A', 'A', 'Aaba', 'cat']).str.count('a')
Int64Index([0, 0, 2, 1], dtype='int64')
"""
regex = re.compile(pat, flags=flags)
f = lambda x: len(regex.findall(x))
return _na_map(f, arr, dtype=int)
def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True):
"""
Test if pattern or regex is contained within a string of a Series or Index.
Return boolean Series or Index based on whether a given pattern or regex is
contained within a string of a Series or Index.
Parameters
----------
pat : str
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
flags : int, default 0 (no flags)
Flags to pass through to the re module, e.g. re.IGNORECASE.
na : default NaN
Fill value for missing values.
regex : bool, default True
If True, assumes the pat is a regular expression.
If False, treats the pat as a literal string.
Returns
-------
Series or Index of boolean values
A Series or Index of boolean values indicating whether the
given pattern is contained within the string of each element
of the Series or Index.
See Also
--------
match : Analogous, but stricter, relying on re.match instead of re.search.
Series.str.startswith : Test if the start of each string element matches a
pattern.
Series.str.endswith : Same as startswith, but tests the end of string.
Examples
--------
Returning a Series of booleans using only a literal pattern.
>>> s1 = pd.Series(['Mouse', 'dog', 'house and parrot', '23', np.NaN])
>>> s1.str.contains('og', regex=False)
0 False
1 True
2 False
3 False
4 NaN
dtype: object
Returning an Index of booleans using only a literal pattern.
>>> ind = pd.Index(['Mouse', 'dog', 'house and parrot', '23.0', np.NaN])
>>> ind.str.contains('23', regex=False)
Index([False, False, False, True, nan], dtype='object')
Specifying case sensitivity using `case`.
>>> s1.str.contains('oG', case=True, regex=True)
0 False
1 False
2 False
3 False
4 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN` replaces NaN values
with `False`. If Series or Index does not contain NaN values
the resultant dtype will be `bool`, otherwise, an `object` dtype.
>>> s1.str.contains('og', na=False, regex=True)
0 False
1 True
2 False
3 False
4 False
dtype: bool
Returning 'house' or 'dog' when either expression occurs in a string.
>>> s1.str.contains('house|dog', regex=True)
0 False
1 True
2 True
3 False
4 NaN
dtype: object
Ignoring case sensitivity using `flags` with regex.
>>> import re
>>> s1.str.contains('PARROT', flags=re.IGNORECASE, regex=True)
0 False
1 False
2 True
3 False
4 NaN
dtype: object
Returning any digit using regular expression.
>>> s1.str.contains('\\d', regex=True)
0 False
1 False
2 False
3 True
4 NaN
dtype: object
Ensure `pat` is a not a literal pattern when `regex` is set to True.
Note in the following example one might expect only `s2[1]` and `s2[3]` to
return `True`. However, '.0' as a regex matches any character
followed by a 0.
>>> s2 = pd.Series(['40', '40.0', '41', '41.0', '35'])
>>> s2.str.contains('.0', regex=True)
0 True
1 True
2 False
3 True
4 False
dtype: bool
"""
if regex:
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
if regex.groups > 0:
warnings.warn(
"This pattern has match groups. To actually get the"
" groups, use str.extract.",
UserWarning,
stacklevel=3,
)
f = lambda x: bool(regex.search(x))
else:
if case:
f = lambda x: pat in x
else:
upper_pat = pat.upper()
f = lambda x: upper_pat in x
uppered = _na_map(lambda x: x.upper(), arr)
return _na_map(f, uppered, na, dtype=bool)
return _na_map(f, arr, na, dtype=bool)
def str_startswith(arr, pat, na=np.nan):
"""
Test if the start of each string element matches a pattern.
Equivalent to :meth:`str.startswith`.
Parameters
----------
pat : str
Character sequence. Regular expressions are not accepted.
na : object, default NaN
Object shown if element tested is not a string.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given pattern matches
the start of each string element.
See Also
--------
str.startswith : Python standard library string method.
Series.str.endswith : Same as startswith, but tests the end of string.
Series.str.contains : Tests if string element contains a pattern.
Examples
--------
>>> s = pd.Series(['bat', 'Bear', 'cat', np.nan])
>>> s
0 bat
1 Bear
2 cat
3 NaN
dtype: object
>>> s.str.startswith('b')
0 True
1 False
2 False
3 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN`.
>>> s.str.startswith('b', na=False)
0 True
1 False
2 False
3 False
dtype: bool
"""
f = lambda x: x.startswith(pat)
return _na_map(f, arr, na, dtype=bool)
def str_endswith(arr, pat, na=np.nan):
"""
Test if the end of each string element matches a pattern.
Equivalent to :meth:`str.endswith`.
Parameters
----------
pat : str
Character sequence. Regular expressions are not accepted.
na : object, default NaN
Object shown if element tested is not a string.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given pattern matches
the end of each string element.
See Also
--------
str.endswith : Python standard library string method.
Series.str.startswith : Same as endswith, but tests the start of string.
Series.str.contains : Tests if string element contains a pattern.
Examples
--------
>>> s = pd.Series(['bat', 'bear', 'caT', np.nan])
>>> s
0 bat
1 bear
2 caT
3 NaN
dtype: object
>>> s.str.endswith('t')
0 True
1 False
2 False
3 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN`.
>>> s.str.endswith('t', na=False)
0 True
1 False
2 False
3 False
dtype: bool
"""
f = lambda x: x.endswith(pat)
return _na_map(f, arr, na, dtype=bool)
def str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True):
r"""
Replace occurrences of pattern/regex in the Series/Index with
some other string. Equivalent to :meth:`str.replace` or
:func:`re.sub`.
Parameters
----------
pat : str or compiled regex
String can be a character sequence or regular expression.
.. versionadded:: 0.20.0
`pat` also accepts a compiled regex.
repl : str or callable
Replacement string or a callable. The callable is passed the regex
match object and must return a replacement string to be used.
See :func:`re.sub`.
.. versionadded:: 0.20.0
`repl` also accepts a callable.
n : int, default -1 (all)
Number of replacements to make from start.
case : bool, default None
- If True, case sensitive (the default if `pat` is a string)
- Set to False for case insensitive
- Cannot be set if `pat` is a compiled regex
flags : int, default 0 (no flags)
- re module flags, e.g. re.IGNORECASE
- Cannot be set if `pat` is a compiled regex
regex : bool, default True
- If True, assumes the passed-in pattern is a regular expression.
- If False, treats the pattern as a literal string
- Cannot be set to False if `pat` is a compiled regex or `repl` is
a callable.
.. versionadded:: 0.23.0
Returns
-------
Series or Index of object
A copy of the object with all matching occurrences of `pat` replaced by
`repl`.
Raises
------
ValueError
* if `regex` is False and `repl` is a callable or `pat` is a compiled
regex
* if `pat` is a compiled regex and `case` or `flags` is set
Notes
-----
When `pat` is a compiled regex, all flags should be included in the
compiled regex. Use of `case`, `flags`, or `regex=False` with a compiled
regex will raise an error.
Examples
--------
When `pat` is a string and `regex` is True (the default), the given `pat`
is compiled as a regex. When `repl` is a string, it replaces matching
regex patterns as with :meth:`re.sub`. NaN value(s) in the Series are
left as is:
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f.', 'ba', regex=True)
0 bao
1 baz
2 NaN
dtype: object
When `pat` is a string and `regex` is False, every `pat` is replaced with
`repl` as with :meth:`str.replace`:
>>> pd.Series(['f.o', 'fuz', np.nan]).str.replace('f.', 'ba', regex=False)
0 bao
1 fuz
2 NaN
dtype: object
When `repl` is a callable, it is called on every `pat` using
:func:`re.sub`. The callable should expect one positional argument
(a regex object) and return a string.
To get the idea:
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr)
0 <_sre.SRE_Match object; span=(0, 1), match='f'>oo
1 <_sre.SRE_Match object; span=(0, 1), match='f'>uz
2 NaN
dtype: object
Reverse every lowercase alphabetic word:
>>> repl = lambda m: m.group(0)[::-1]
>>> pd.Series(['foo 123', 'bar baz', np.nan]).str.replace(r'[a-z]+', repl)
0 oof 123
1 rab zab
2 NaN
dtype: object
Using regex groups (extract second group and swap case):
>>> pat = r"(?P<one>\w+) (?P<two>\w+) (?P<three>\w+)"
>>> repl = lambda m: m.group('two').swapcase()
>>> pd.Series(['One Two Three', 'Foo Bar Baz']).str.replace(pat, repl)
0 tWO
1 bAR
dtype: object
Using a compiled regex with flags
>>> import re
>>> regex_pat = re.compile(r'FUZ', flags=re.IGNORECASE)
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace(regex_pat, 'bar')
0 foo
1 bar
2 NaN
dtype: object
"""
# Check whether repl is valid (GH 13438, GH 15055)
if not (is_string_like(repl) or callable(repl)):
raise TypeError("repl must be a string or callable")
is_compiled_re = is_re(pat)
if regex:
if is_compiled_re:
if (case is not None) or (flags != 0):
raise ValueError(
"case and flags cannot be set" " when pat is a compiled regex"
)
else:
# not a compiled regex
# set default case
if case is None:
case = True
# add case flag, if provided
if case is False:
flags |= re.IGNORECASE
if is_compiled_re or len(pat) > 1 or flags or callable(repl):
n = n if n >= 0 else 0
compiled = re.compile(pat, flags=flags)
f = lambda x: compiled.sub(repl=repl, string=x, count=n)
else:
f = lambda x: x.replace(pat, repl, n)
else:
if is_compiled_re:
raise ValueError(
"Cannot use a compiled regex as replacement " "pattern with regex=False"
)
if callable(repl):
raise ValueError("Cannot use a callable replacement when " "regex=False")
f = lambda x: x.replace(pat, repl, n)
return _na_map(f, arr)
def str_repeat(arr, repeats):
"""
Duplicate each string in the Series or Index.
Parameters
----------
repeats : int or sequence of int
Same value for all (int) or different value per (sequence).
Returns
-------
Series or Index of object
Series or Index of repeated string objects specified by
input parameter repeats.
Examples
--------
>>> s = pd.Series(['a', 'b', 'c'])
>>> s
0 a
1 b
2 c
dtype: object
Single int repeats string in Series
>>> s.str.repeat(repeats=2)
0 aa
1 bb
2 cc
dtype: object
Sequence of int repeats corresponding string in Series
>>> s.str.repeat(repeats=[1, 2, 3])
0 a
1 bb
2 ccc
dtype: object
"""
if is_scalar(repeats):
def scalar_rep(x):
try:
return bytes.__mul__(x, repeats)
except TypeError:
return str.__mul__(x, repeats)
return _na_map(scalar_rep, arr)
else:
def rep(x, r):
try:
return bytes.__mul__(x, r)
except TypeError:
return str.__mul__(x, r)
repeats = np.asarray(repeats, dtype=object)
result = libops.vec_binop(com.values_from_object(arr), repeats, rep)
return result
def str_match(arr, pat, case=True, flags=0, na=np.nan):
"""
Determine if each string matches a regular expression.
Parameters
----------
pat : str
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE.
na : default NaN
Fill value for missing values.
Returns
-------
Series/array of boolean values
See Also
--------
contains : Analogous, but less strict, relying on re.search instead of
re.match.
extract : Extract matched groups.
"""
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
dtype = bool
f = lambda x: bool(regex.match(x))
return _na_map(f, arr, na, dtype=dtype)
def _get_single_group_name(rx):
try:
return list(rx.groupindex.keys()).pop()
except IndexError:
return None
def _groups_or_na_fun(regex):
"""Used in both extract_noexpand and extract_frame"""
if regex.groups == 0:
raise ValueError("pattern contains no capture groups")
empty_row = [np.nan] * regex.groups
def f(x):
if not isinstance(x, str):
return empty_row
m = regex.search(x)
if m:
return [np.nan if item is None else item for item in m.groups()]
else:
return empty_row
return f
def _str_extract_noexpand(arr, pat, flags=0):
"""
Find groups in each string in the Series using passed regular
expression. This function is called from
str_extract(expand=False), and can return Series, DataFrame, or
Index.
"""
from pandas import DataFrame, Index
regex = re.compile(pat, flags=flags)
groups_or_na = _groups_or_na_fun(regex)
if regex.groups == 1:
result = np.array([groups_or_na(val)[0] for val in arr], dtype=object)
name = _get_single_group_name(regex)
else:
if isinstance(arr, Index):
raise ValueError("only one regex group is supported with Index")
name = None
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
if arr.empty:
result = DataFrame(columns=columns, dtype=object)
else:
result = DataFrame(
[groups_or_na(val) for val in arr],
columns=columns,
index=arr.index,
dtype=object,
)
return result, name
def _str_extract_frame(arr, pat, flags=0):
"""
For each subject string in the Series, extract groups from the
first match of regular expression pat. This function is called from
str_extract(expand=True), and always returns a DataFrame.
"""
from pandas import DataFrame
regex = re.compile(pat, flags=flags)
groups_or_na = _groups_or_na_fun(regex)
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
if len(arr) == 0:
return DataFrame(columns=columns, dtype=object)
try:
result_index = arr.index
except AttributeError:
result_index = None
return DataFrame(
[groups_or_na(val) for val in arr],
columns=columns,
index=result_index,
dtype=object,
)
def str_extract(arr, pat, flags=0, expand=True):
r"""
Extract capture groups in the regex `pat` as columns in a DataFrame.
For each subject string in the Series, extract groups from the
first match of regular expression `pat`.
Parameters
----------
pat : str
Regular expression pattern with capturing groups.
flags : int, default 0 (no flags)
Flags from the ``re`` module, e.g. ``re.IGNORECASE``, that
modify regular expression matching for things like case,
spaces, etc. For more details, see :mod:`re`.
expand : bool, default True
If True, return DataFrame with one column per capture group.
If False, return a Series/Index if there is one capture group
or DataFrame if there are multiple capture groups.
.. versionadded:: 0.18.0
Returns
-------
DataFrame or Series or Index
A DataFrame with one row for each subject string, and one
column for each group. Any capture group names in regular
expression pat will be used for column names; otherwise
capture group numbers will be used. The dtype of each result
column is always object, even when no match is found. If
``expand=False`` and pat has only one capture group, then
return a Series (if subject is a Series) or Index (if subject
is an Index).
See Also
--------
extractall : Returns all matches (not just the first match).
Examples
--------
A pattern with two groups will return a DataFrame with two columns.
Non-matches will be NaN.
>>> s = pd.Series(['a1', 'b2', 'c3'])
>>> s.str.extract(r'([ab])(\d)')
0 1
0 a 1
1 b 2
2 NaN NaN
A pattern may contain optional groups.
>>> s.str.extract(r'([ab])?(\d)')
0 1
0 a 1
1 b 2
2 NaN 3
Named groups will become column names in the result.
>>> s.str.extract(r'(?P<letter>[ab])(?P<digit>\d)')
letter digit
0 a 1
1 b 2
2 NaN NaN
A pattern with one group will return a DataFrame with one column
if expand=True.
>>> s.str.extract(r'[ab](\d)', expand=True)
0
0 1
1 2
2 NaN
A pattern with one group will return a Series if expand=False.
>>> s.str.extract(r'[ab](\d)', expand=False)
0 1
1 2
2 NaN
dtype: object
"""
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if expand:
return _str_extract_frame(arr._orig, pat, flags=flags)
else:
result, name = _str_extract_noexpand(arr._parent, pat, flags=flags)
return arr._wrap_result(result, name=name, expand=expand)
def str_extractall(arr, pat, flags=0):
r"""
For each subject string in the Series, extract groups from all
matches of regular expression pat. When each subject string in the
Series has exactly one match, extractall(pat).xs(0, level='match')
is the same as extract(pat).
.. versionadded:: 0.18.0
Parameters
----------
pat : str
Regular expression pattern with capturing groups.
flags : int, default 0 (no flags)
A ``re`` module flag, for example ``re.IGNORECASE``. These allow
to modify regular expression matching for things like case, spaces,
etc. Multiple flags can be combined with the bitwise OR operator,
for example ``re.IGNORECASE | re.MULTILINE``.
Returns
-------
DataFrame
A ``DataFrame`` with one row for each match, and one column for each
group. Its rows have a ``MultiIndex`` with first levels that come from
the subject ``Series``. The last level is named 'match' and indexes the
matches in each item of the ``Series``. Any capture group names in
regular expression pat will be used for column names; otherwise capture
group numbers will be used.
See Also
--------
extract : Returns first match only (not all matches).
Examples
--------
A pattern with one group will return a DataFrame with one column.
Indices with no matches will not appear in the result.
>>> s = pd.Series(["a1a2", "b1", "c1"], index=["A", "B", "C"])
>>> s.str.extractall(r"[ab](\d)")
0
match
A 0 1
1 2
B 0 1
Capture group names are used for column names of the result.
>>> s.str.extractall(r"[ab](?P<digit>\d)")
digit
match
A 0 1
1 2
B 0 1
A pattern with two groups will return a DataFrame with two columns.
>>> s.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
letter digit
match
A 0 a 1
1 a 2
B 0 b 1
Optional groups that do not match are NaN in the result.
>>> s.str.extractall(r"(?P<letter>[ab])?(?P<digit>\d)")
letter digit
match
A 0 a 1
1 a 2
B 0 b 1
C 0 NaN 1
"""
regex = re.compile(pat, flags=flags)
# the regex must contain capture groups.
if regex.groups == 0:
raise ValueError("pattern contains no capture groups")
if isinstance(arr, ABCIndexClass):
arr = arr.to_series().reset_index(drop=True)
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
match_list = []
index_list = []
is_mi = arr.index.nlevels > 1
for subject_key, subject in arr.items():
if isinstance(subject, str):
if not is_mi:
subject_key = (subject_key,)
for match_i, match_tuple in enumerate(regex.findall(subject)):
if isinstance(match_tuple, str):
match_tuple = (match_tuple,)
na_tuple = [np.NaN if group == "" else group for group in match_tuple]
match_list.append(na_tuple)
result_key = tuple(subject_key + (match_i,))
index_list.append(result_key)
from pandas import MultiIndex
index = MultiIndex.from_tuples(index_list, names=arr.index.names + ["match"])
result = arr._constructor_expanddim(match_list, index=index, columns=columns)
return result
def str_get_dummies(arr, sep="|"):
"""
Split each string in the Series by sep and return a DataFrame
of dummy/indicator variables.
Parameters
----------
sep : str, default "|"
String to split on.
Returns
-------
DataFrame
Dummy variables corresponding to values of the Series.
See Also
--------
get_dummies : Convert categorical variable into dummy/indicator
variables.
Examples
--------
>>> pd.Series(['a|b', 'a', 'a|c']).str.get_dummies()
a b c
0 1 1 0
1 1 0 0
2 1 0 1
>>> pd.Series(['a|b', np.nan, 'a|c']).str.get_dummies()
a b c
0 1 1 0
1 0 0 0
2 1 0 1
"""
arr = arr.fillna("")
try:
arr = sep + arr + sep
except TypeError:
arr = sep + arr.astype(str) + sep
tags = set()
for ts in arr.str.split(sep):
tags.update(ts)
tags = sorted(tags - {""})
dummies = np.empty((len(arr), len(tags)), dtype=np.int64)
for i, t in enumerate(tags):
pat = sep + t + sep
dummies[:, i] = lib.map_infer(arr.values, lambda x: pat in x)
return dummies, tags
def str_join(arr, sep):
"""
Join lists contained as elements in the Series/Index with passed delimiter.
If the elements of a Series are lists themselves, join the content of these
lists using the delimiter passed to the function.
This function is an equivalent to :meth:`str.join`.
Parameters
----------
sep : str
Delimiter to use between list entries.
Returns
-------
Series/Index: object
The list entries concatenated by intervening occurrences of the
delimiter.
Raises
------
AttributeError
If the supplied Series contains neither strings nor lists.
See Also
--------
str.join : Standard library version of this method.
Series.str.split : Split strings around given separator/delimiter.
Notes
-----
If any of the list items is not a string object, the result of the join
will be `NaN`.
Examples
--------
Example with a list that contains non-string elements.
>>> s = pd.Series([['lion', 'elephant', 'zebra'],
... [1.1, 2.2, 3.3],
... ['cat', np.nan, 'dog'],
... ['cow', 4.5, 'goat'],
... ['duck', ['swan', 'fish'], 'guppy']])
>>> s
0 [lion, elephant, zebra]
1 [1.1, 2.2, 3.3]
2 [cat, nan, dog]
3 [cow, 4.5, goat]
4 [duck, [swan, fish], guppy]
dtype: object
Join all lists using a '-'. The lists containing object(s) of types other
than str will produce a NaN.
>>> s.str.join('-')
0 lion-elephant-zebra
1 NaN
2 NaN
3 NaN
4 NaN
dtype: object
"""
return _na_map(sep.join, arr)
def str_findall(arr, pat, flags=0):
"""
Find all occurrences of pattern or regular expression in the Series/Index.
Equivalent to applying :func:`re.findall` to all the elements in the
Series/Index.
Parameters
----------
pat : str
Pattern or regular expression.
flags : int, default 0
Flags from ``re`` module, e.g. `re.IGNORECASE` (default is 0, which
means no flags).
Returns
-------
Series/Index of lists of strings
All non-overlapping matches of pattern or regular expression in each
string of this Series/Index.
See Also
--------
count : Count occurrences of pattern or regular expression in each string
of the Series/Index.
extractall : For each string in the Series, extract groups from all matches
of regular expression and return a DataFrame with one row for each
match and one column for each group.
re.findall : The equivalent ``re`` function to all non-overlapping matches
of pattern or regular expression in string, as a list of strings.
Examples
--------
>>> s = pd.Series(['Lion', 'Monkey', 'Rabbit'])
The search for the pattern 'Monkey' returns one match:
>>> s.str.findall('Monkey')
0 []
1 [Monkey]
2 []
dtype: object
On the other hand, the search for the pattern 'MONKEY' doesn't return any
match:
>>> s.str.findall('MONKEY')
0 []
1 []
2 []
dtype: object
Flags can be added to the pattern or regular expression. For instance,
to find the pattern 'MONKEY' ignoring the case:
>>> import re
>>> s.str.findall('MONKEY', flags=re.IGNORECASE)
0 []
1 [Monkey]
2 []
dtype: object
When the pattern matches more than one string in the Series, all matches
are returned:
>>> s.str.findall('on')
0 [on]
1 [on]
2 []
dtype: object
Regular expressions are supported too. For instance, the search for all the
strings ending with the word 'on' is shown next:
>>> s.str.findall('on$')
0 [on]
1 []
2 []
dtype: object
If the pattern is found more than once in the same string, then a list of
multiple strings is returned:
>>> s.str.findall('b')
0 []
1 []
2 [b, b]
dtype: object
"""
regex = re.compile(pat, flags=flags)
return _na_map(regex.findall, arr)
def str_find(arr, sub, start=0, end=None, side="left"):
"""
Return indexes in each strings in the Series/Index where the
substring is fully contained between [start:end]. Return -1 on failure.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
side : {'left', 'right'}, default 'left'
Specifies a starting side, equivalent to ``find`` or ``rfind``.
Returns
-------
Series or Index
Indexes where substring is found.
"""
if not isinstance(sub, str):
msg = "expected a string object, not {0}"
raise TypeError(msg.format(type(sub).__name__))
if side == "left":
method = "find"
elif side == "right":
method = "rfind"
else: # pragma: no cover
raise ValueError("Invalid side")
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return _na_map(f, arr, dtype=int)
def str_index(arr, sub, start=0, end=None, side="left"):
if not isinstance(sub, str):
msg = "expected a string object, not {0}"
raise TypeError(msg.format(type(sub).__name__))
if side == "left":
method = "index"
elif side == "right":
method = "rindex"
else: # pragma: no cover
raise ValueError("Invalid side")
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return _na_map(f, arr, dtype=int)
def str_pad(arr, width, side="left", fillchar=" "):
"""
Pad strings in the Series/Index up to width.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with character defined in `fillchar`.
side : {'left', 'right', 'both'}, default 'left'
Side from which to fill resulting string.
fillchar : str, default ' '
Additional character for filling, default is whitespace.
Returns
-------
Series or Index of object
Returns Series or Index with minimum number of char in object.
See Also
--------
Series.str.rjust : Fills the left side of strings with an arbitrary
character. Equivalent to ``Series.str.pad(side='left')``.
Series.str.ljust : Fills the right side of strings with an arbitrary
character. Equivalent to ``Series.str.pad(side='right')``.
Series.str.center : Fills boths sides of strings with an arbitrary
character. Equivalent to ``Series.str.pad(side='both')``.
Series.str.zfill : Pad strings in the Series/Index by prepending '0'
character. Equivalent to ``Series.str.pad(side='left', fillchar='0')``.
Examples
--------
>>> s = pd.Series(["caribou", "tiger"])
>>> s
0 caribou
1 tiger
dtype: object
>>> s.str.pad(width=10)
0 caribou
1 tiger
dtype: object
>>> s.str.pad(width=10, side='right', fillchar='-')
0 caribou---
1 tiger-----
dtype: object
>>> s.str.pad(width=10, side='both', fillchar='-')
0 -caribou--
1 --tiger---
dtype: object
"""
if not isinstance(fillchar, str):
msg = "fillchar must be a character, not {0}"
raise TypeError(msg.format(type(fillchar).__name__))
if len(fillchar) != 1:
raise TypeError("fillchar must be a character, not str")
if not is_integer(width):
msg = "width must be of integer type, not {0}"
raise TypeError(msg.format(type(width).__name__))
if side == "left":
f = lambda x: x.rjust(width, fillchar)
elif side == "right":
f = lambda x: x.ljust(width, fillchar)
elif side == "both":
f = lambda x: x.center(width, fillchar)
else: # pragma: no cover
raise ValueError("Invalid side")
return _na_map(f, arr)
def str_split(arr, pat=None, n=None):
if pat is None:
if n is None or n == 0:
n = -1
f = lambda x: x.split(pat, n)
else:
if len(pat) == 1:
if n is None or n == 0:
n = -1
f = lambda x: x.split(pat, n)
else:
if n is None or n == -1:
n = 0
regex = re.compile(pat)
f = lambda x: regex.split(x, maxsplit=n)
res = _na_map(f, arr)
return res
def str_rsplit(arr, pat=None, n=None):
if n is None or n == 0:
n = -1
f = lambda x: x.rsplit(pat, n)
res = _na_map(f, arr)
return res
def str_slice(arr, start=None, stop=None, step=None):
"""
Slice substrings from each element in the Series or Index.
Parameters
----------
start : int, optional
Start position for slice operation.
stop : int, optional
Stop position for slice operation.
step : int, optional
Step size for slice operation.
Returns
-------
Series or Index of object
Series or Index from sliced substring from original string object.
See Also
--------
Series.str.slice_replace : Replace a slice with a string.
Series.str.get : Return element at position.
Equivalent to `Series.str.slice(start=i, stop=i+1)` with `i`
being the position.
Examples
--------
>>> s = pd.Series(["koala", "fox", "chameleon"])
>>> s
0 koala
1 fox
2 chameleon
dtype: object
>>> s.str.slice(start=1)
0 oala
1 ox
2 hameleon
dtype: object
>>> s.str.slice(stop=2)
0 ko
1 fo
2 ch
dtype: object
>>> s.str.slice(step=2)
0 kaa
1 fx
2 caeen
dtype: object
>>> s.str.slice(start=0, stop=5, step=3)
0 kl
1 f
2 cm
dtype: object
Equivalent behaviour to:
>>> s.str[0:5:3]
0 kl
1 f
2 cm
dtype: object
"""
obj = slice(start, stop, step)
f = lambda x: x[obj]
return _na_map(f, arr)
def str_slice_replace(arr, start=None, stop=None, repl=None):
"""
Replace a positional slice of a string with another value.
Parameters
----------
start : int, optional
Left index position to use for the slice. If not specified (None),
the slice is unbounded on the left, i.e. slice from the start
of the string.
stop : int, optional
Right index position to use for the slice. If not specified (None),
the slice is unbounded on the right, i.e. slice until the
end of the string.
repl : str, optional
String for replacement. If not specified (None), the sliced region
is replaced with an empty string.
Returns
-------
Series or Index
Same type as the original object.
See Also
--------
Series.str.slice : Just slicing without replacement.
Examples
--------
>>> s = pd.Series(['a', 'ab', 'abc', 'abdc', 'abcde'])
>>> s
0 a
1 ab
2 abc
3 abdc
4 abcde
dtype: object
Specify just `start`, meaning replace `start` until the end of the
string with `repl`.
>>> s.str.slice_replace(1, repl='X')
0 aX
1 aX
2 aX
3 aX
4 aX
dtype: object
Specify just `stop`, meaning the start of the string to `stop` is replaced
with `repl`, and the rest of the string is included.
>>> s.str.slice_replace(stop=2, repl='X')
0 X
1 X
2 Xc
3 Xdc
4 Xcde
dtype: object
Specify `start` and `stop`, meaning the slice from `start` to `stop` is
replaced with `repl`. Everything before or after `start` and `stop` is
included as is.
>>> s.str.slice_replace(start=1, stop=3, repl='X')
0 aX
1 aX
2 aX
3 aXc
4 aXde
dtype: object
"""
if repl is None:
repl = ""
def f(x):
if x[start:stop] == "":
local_stop = start
else:
local_stop = stop
y = ""
if start is not None:
y += x[:start]
y += repl
if stop is not None:
y += x[local_stop:]
return y
return _na_map(f, arr)
def str_strip(arr, to_strip=None, side="both"):
"""
Strip whitespace (including newlines) from each string in the
Series/Index.
Parameters
----------
to_strip : str or unicode
side : {'left', 'right', 'both'}, default 'both'
Returns
-------
Series or Index
"""
if side == "both":
f = lambda x: x.strip(to_strip)
elif side == "left":
f = lambda x: x.lstrip(to_strip)
elif side == "right":
f = lambda x: x.rstrip(to_strip)
else: # pragma: no cover
raise ValueError("Invalid side")
return _na_map(f, arr)
def str_wrap(arr, width, **kwargs):
r"""
Wrap long strings in the Series/Index to be formatted in
paragraphs with length less than a given width.
This method has the same keyword parameters and defaults as
:class:`textwrap.TextWrapper`.
Parameters
----------
width : int
Maximum line width.
expand_tabs : bool, optional
If True, tab characters will be expanded to spaces (default: True).
replace_whitespace : bool, optional
If True, each whitespace character (as defined by string.whitespace)
remaining after tab expansion will be replaced by a single space
(default: True).
drop_whitespace : bool, optional
If True, whitespace that, after wrapping, happens to end up at the
beginning or end of a line is dropped (default: True).
break_long_words : bool, optional
If True, then words longer than width will be broken in order to ensure
that no lines are longer than width. If it is false, long words will
not be broken, and some lines may be longer than width (default: True).
break_on_hyphens : bool, optional
If True, wrapping will occur preferably on whitespace and right after
hyphens in compound words, as it is customary in English. If false,
only whitespaces will be considered as potentially good places for line
breaks, but you need to set break_long_words to false if you want truly
insecable words (default: True).
Returns
-------
Series or Index
Notes
-----
Internally, this method uses a :class:`textwrap.TextWrapper` instance with
default settings. To achieve behavior matching R's stringr library str_wrap
function, use the arguments:
- expand_tabs = False
- replace_whitespace = True
- drop_whitespace = True
- break_long_words = False
- break_on_hyphens = False
Examples
--------
>>> s = pd.Series(['line to be wrapped', 'another line to be wrapped'])
>>> s.str.wrap(12)
0 line to be\nwrapped
1 another line\nto be\nwrapped
dtype: object
"""
kwargs["width"] = width
tw = textwrap.TextWrapper(**kwargs)
return _na_map(lambda s: "\n".join(tw.wrap(s)), arr)
def str_translate(arr, table):
"""
Map all characters in the string through the given mapping table.
Equivalent to standard :meth:`str.translate`.
Parameters
----------
table : dict
table is a mapping of Unicode ordinals to Unicode ordinals, strings, or
None. Unmapped characters are left untouched.
Characters mapped to None are deleted. :meth:`str.maketrans` is a
helper function for making translation tables.
Returns
-------
Series or Index
"""
return _na_map(lambda x: x.translate(table), arr)
def str_get(arr, i):
"""
Extract element from each component at specified position.
Extract element from lists, tuples, or strings in each element in the
Series/Index.
Parameters
----------
i : int
Position of element to extract.
Returns
-------
Series or Index
Examples
--------
>>> s = pd.Series(["String",
... (1, 2, 3),
... ["a", "b", "c"],
... 123,
... -456,
... {1: "Hello", "2": "World"}])
>>> s
0 String
1 (1, 2, 3)
2 [a, b, c]
3 123
4 -456
5 {1: 'Hello', '2': 'World'}
dtype: object
>>> s.str.get(1)
0 t
1 2
2 b
3 NaN
4 NaN
5 Hello
dtype: object
>>> s.str.get(-1)
0 g
1 3
2 c
3 NaN
4 NaN
5 None
dtype: object
"""
def f(x):
if isinstance(x, dict):
return x.get(i)
elif len(x) > i >= -len(x):
return x[i]
return np.nan
return _na_map(f, arr)
def str_decode(arr, encoding, errors="strict"):
"""
Decode character string in the Series/Index using indicated encoding.
Equivalent to :meth:`str.decode` in python2 and :meth:`bytes.decode` in
python3.
Parameters
----------
encoding : str
errors : str, optional
Returns
-------
Series or Index
"""
if encoding in _cpython_optimized_decoders:
# CPython optimized implementation
f = lambda x: x.decode(encoding, errors)
else:
decoder = codecs.getdecoder(encoding)
f = lambda x: decoder(x, errors)[0]
return _na_map(f, arr)
def str_encode(arr, encoding, errors="strict"):
"""
Encode character string in the Series/Index using indicated encoding.
Equivalent to :meth:`str.encode`.
Parameters
----------
encoding : str
errors : str, optional
Returns
-------
encoded : Series/Index of objects
"""
if encoding in _cpython_optimized_encoders:
# CPython optimized implementation
f = lambda x: x.encode(encoding, errors)
else:
encoder = codecs.getencoder(encoding)
f = lambda x: encoder(x, errors)[0]
return _na_map(f, arr)
def forbid_nonstring_types(forbidden, name=None):
"""
Decorator to forbid specific types for a method of StringMethods.
For calling `.str.{method}` on a Series or Index, it is necessary to first
initialize the :class:`StringMethods` object, and then call the method.
However, different methods allow different input types, and so this can not
be checked during :meth:`StringMethods.__init__`, but must be done on a
per-method basis. This decorator exists to facilitate this process, and
make it explicit which (inferred) types are disallowed by the method.
:meth:`StringMethods.__init__` allows the *union* of types its different
methods allow (after skipping NaNs; see :meth:`StringMethods._validate`),
namely: ['string', 'empty', 'bytes', 'mixed', 'mixed-integer'].
The default string types ['string', 'empty'] are allowed for all methods.
For the additional types ['bytes', 'mixed', 'mixed-integer'], each method
then needs to forbid the types it is not intended for.
Parameters
----------
forbidden : list-of-str or None
List of forbidden non-string types, may be one or more of
`['bytes', 'mixed', 'mixed-integer']`.
name : str, default None
Name of the method to use in the error message. By default, this is
None, in which case the name from the method being wrapped will be
copied. However, for working with further wrappers (like _pat_wrapper
and _noarg_wrapper), it is necessary to specify the name.
Returns
-------
func : wrapper
The method to which the decorator is applied, with an added check that
enforces the inferred type to not be in the list of forbidden types.
Raises
------
TypeError
If the inferred type of the underlying data is in `forbidden`.
"""
# deal with None
forbidden = [] if forbidden is None else forbidden
allowed_types = {"string", "empty", "bytes", "mixed", "mixed-integer"} - set(
forbidden
)
def _forbid_nonstring_types(func):
func_name = func.__name__ if name is None else name
@wraps(func)
def wrapper(self, *args, **kwargs):
if self._inferred_dtype not in allowed_types:
msg = (
"Cannot use .str.{name} with values of inferred dtype "
"{inf_type!r}.".format(
name=func_name, inf_type=self._inferred_dtype
)
)
raise TypeError(msg)
return func(self, *args, **kwargs)
wrapper.__name__ = func_name
return wrapper
return _forbid_nonstring_types
def _noarg_wrapper(f, name=None, docstring=None, forbidden_types=["bytes"], **kargs):
@forbid_nonstring_types(forbidden_types, name=name)
def wrapper(self):
result = _na_map(f, self._parent, **kargs)
return self._wrap_result(result)
wrapper.__name__ = f.__name__ if name is None else name
if docstring is not None:
wrapper.__doc__ = docstring
else:
raise ValueError("Provide docstring")
return wrapper
def _pat_wrapper(
f, flags=False, na=False, name=None, forbidden_types=["bytes"], **kwargs
):
@forbid_nonstring_types(forbidden_types, name=name)
def wrapper1(self, pat):
result = f(self._parent, pat)
return self._wrap_result(result)
@forbid_nonstring_types(forbidden_types, name=name)
def wrapper2(self, pat, flags=0, **kwargs):
result = f(self._parent, pat, flags=flags, **kwargs)
return self._wrap_result(result)
@forbid_nonstring_types(forbidden_types, name=name)
def wrapper3(self, pat, na=np.nan):
result = f(self._parent, pat, na=na)
return self._wrap_result(result)
wrapper = wrapper3 if na else wrapper2 if flags else wrapper1
wrapper.__name__ = f.__name__ if name is None else name
if f.__doc__:
wrapper.__doc__ = f.__doc__
return wrapper
def copy(source):
"Copy a docstring from another source function (if present)"
def do_copy(target):
if source.__doc__:
target.__doc__ = source.__doc__
return target
return do_copy
class StringMethods(NoNewAttributesMixin):
"""
Vectorized string functions for Series and Index. NAs stay NA unless
handled otherwise by a particular method. Patterned after Python's string
methods, with some inspiration from R's stringr package.
Examples
--------
>>> s.str.split('_')
>>> s.str.replace('_', '')
"""
def __init__(self, data):
self._inferred_dtype = self._validate(data)
self._is_categorical = is_categorical_dtype(data)
# .values.categories works for both Series/Index
self._parent = data.values.categories if self._is_categorical else data
# save orig to blow up categoricals to the right type
self._orig = data
self._freeze()
@staticmethod
def _validate(data):
"""
Auxiliary function for StringMethods, infers and checks dtype of data.
This is a "first line of defence" at the creation of the StringMethods-
object (see _make_accessor), and just checks that the dtype is in the
*union* of the allowed types over all string methods below; this
restriction is then refined on a per-method basis using the decorator
@forbid_nonstring_types (more info in the corresponding docstring).
This really should exclude all series/index with any non-string values,
but that isn't practical for performance reasons until we have a str
dtype (GH 9343 / 13877)
Parameters
----------
data : The content of the Series
Returns
-------
dtype : inferred dtype of data
"""
if isinstance(data, ABCMultiIndex):
raise AttributeError(
"Can only use .str accessor with Index, " "not MultiIndex"
)
# see _libs/lib.pyx for list of inferred types
allowed_types = ["string", "empty", "bytes", "mixed", "mixed-integer"]
values = getattr(data, "values", data) # Series / Index
values = getattr(values, "categories", values) # categorical / normal
try:
inferred_dtype = lib.infer_dtype(values, skipna=True)
except ValueError:
# GH#27571 mostly occurs with ExtensionArray
inferred_dtype = None
if inferred_dtype not in allowed_types:
raise AttributeError("Can only use .str accessor with string " "values!")
return inferred_dtype
def __getitem__(self, key):
if isinstance(key, slice):
return self.slice(start=key.start, stop=key.stop, step=key.step)
else:
return self.get(key)
def __iter__(self):
i = 0
g = self.get(i)
while g.notna().any():
yield g
i += 1
g = self.get(i)
def _wrap_result(
self, result, use_codes=True, name=None, expand=None, fill_value=np.nan
):
from pandas import Index, Series, MultiIndex
# for category, we do the stuff on the categories, so blow it up
# to the full series again
# But for some operations, we have to do the stuff on the full values,
# so make it possible to skip this step as the method already did this
# before the transformation...
if use_codes and self._is_categorical:
# if self._orig is a CategoricalIndex, there is no .cat-accessor
result = take_1d(
result, Series(self._orig, copy=False).cat.codes, fill_value=fill_value
)
if not hasattr(result, "ndim") or not hasattr(result, "dtype"):
return result
assert result.ndim < 3
if expand is None:
# infer from ndim if expand is not specified
expand = result.ndim != 1
elif expand is True and not isinstance(self._orig, Index):
# required when expand=True is explicitly specified
# not needed when inferred
def cons_row(x):
if is_list_like(x):
return x
else:
return [x]
result = [cons_row(x) for x in result]
if result:
# propagate nan values to match longest sequence (GH 18450)
max_len = max(len(x) for x in result)
result = [
x * max_len if len(x) == 0 or x[0] is np.nan else x for x in result
]
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if expand is False:
# if expand is False, result should have the same name
# as the original otherwise specified
if name is None:
name = getattr(result, "name", None)
if name is None:
# do not use logical or, _orig may be a DataFrame
# which has "name" column
name = self._orig.name
# Wait until we are sure result is a Series or Index before
# checking attributes (GH 12180)
if isinstance(self._orig, Index):
# if result is a boolean np.array, return the np.array
# instead of wrapping it into a boolean Index (GH 8875)
if is_bool_dtype(result):
return result
if expand:
result = list(result)
out = MultiIndex.from_tuples(result, names=name)
if out.nlevels == 1:
# We had all tuples of length-one, which are
# better represented as a regular Index.
out = out.get_level_values(0)
return out
else:
return Index(result, name=name)
else:
index = self._orig.index
if expand:
cons = self._orig._constructor_expanddim
return cons(result, columns=name, index=index)
else:
# Must be a Series
cons = self._orig._constructor
return cons(result, name=name, index=index)
def _get_series_list(self, others, ignore_index=False):
"""
Auxiliary function for :meth:`str.cat`. Turn potentially mixed input
into a list of Series (elements without an index must match the length
of the calling Series/Index).
Parameters
----------
others : Series, Index, DataFrame, np.ndarray, list-like or list-like
of objects that are Series, Index or np.ndarray (1-dim)
ignore_index : boolean, default False
Determines whether to forcefully align others with index of caller
Returns
-------
tuple : (others transformed into list of Series,
boolean whether FutureWarning should be raised)
"""
# Once str.cat defaults to alignment, this function can be simplified;
# will not need `ignore_index` and the second boolean output anymore
from pandas import Index, Series, DataFrame
# self._orig is either Series or Index
idx = self._orig if isinstance(self._orig, Index) else self._orig.index
err_msg = (
"others must be Series, Index, DataFrame, np.ndarray or "
"list-like (either containing only strings or containing "
"only objects of type Series/Index/list-like/np.ndarray)"
)
# Generally speaking, all objects without an index inherit the index
# `idx` of the calling Series/Index - i.e. must have matching length.
# Objects with an index (i.e. Series/Index/DataFrame) keep their own
# index, *unless* ignore_index is set to True.
if isinstance(others, Series):
warn = not others.index.equals(idx)
# only reconstruct Series when absolutely necessary
los = [
Series(others.values, index=idx) if ignore_index and warn else others
]
return (los, warn)
elif isinstance(others, Index):
warn = not others.equals(idx)
los = [Series(others.values, index=(idx if ignore_index else others))]
return (los, warn)
elif isinstance(others, DataFrame):
warn = not others.index.equals(idx)
if ignore_index and warn:
# without copy, this could change "others"
# that was passed to str.cat
others = others.copy()
others.index = idx
return ([others[x] for x in others], warn)
elif isinstance(others, np.ndarray) and others.ndim == 2:
others = DataFrame(others, index=idx)
return ([others[x] for x in others], False)
elif is_list_like(others, allow_sets=False):
others = list(others) # ensure iterators do not get read twice etc
# in case of list-like `others`, all elements must be
# either one-dimensional list-likes or scalars
if all(is_list_like(x, allow_sets=False) for x in others):
los = []
join_warn = False
depr_warn = False
# iterate through list and append list of series for each
# element (which we check to be one-dimensional and non-nested)
while others:
nxt = others.pop(0) # nxt is guaranteed list-like by above
# GH 21950 - DeprecationWarning
# only allowing Series/Index/np.ndarray[1-dim] will greatly
# simply this function post-deprecation.
if not (
isinstance(nxt, (Series, Index))
or (isinstance(nxt, np.ndarray) and nxt.ndim == 1)
):
depr_warn = True
if not isinstance(nxt, (DataFrame, Series, Index, np.ndarray)):
# safety for non-persistent list-likes (e.g. iterators)
# do not map indexed/typed objects; info needed below
nxt = list(nxt)
# known types for which we can avoid deep inspection
no_deep = (
isinstance(nxt, np.ndarray) and nxt.ndim == 1
) or isinstance(nxt, (Series, Index))
# nested list-likes are forbidden:
# -> elements of nxt must not be list-like
is_legal = (no_deep and nxt.dtype == object) or all(
not is_list_like(x) for x in nxt
)
# DataFrame is false positive of is_legal
# because "x in df" returns column names
if not is_legal or isinstance(nxt, DataFrame):
raise TypeError(err_msg)
nxt, wnx = self._get_series_list(nxt, ignore_index=ignore_index)
los = los + nxt
join_warn = join_warn or wnx
if depr_warn:
warnings.warn(
"list-likes other than Series, Index, or "
"np.ndarray WITHIN another list-like are "
"deprecated and will be removed in a future "
"version.",
FutureWarning,
stacklevel=4,
)
return (los, join_warn)
elif all(not is_list_like(x) for x in others):
return ([Series(others, index=idx)], False)
raise TypeError(err_msg)
@forbid_nonstring_types(["bytes", "mixed", "mixed-integer"])
def cat(self, others=None, sep=None, na_rep=None, join=None):
"""
Concatenate strings in the Series/Index with given separator.
If `others` is specified, this function concatenates the Series/Index
and elements of `others` element-wise.
If `others` is not passed, then all values in the Series/Index are
concatenated into a single string with a given `sep`.
Parameters
----------
others : Series, Index, DataFrame, np.ndarray or list-like
Series, Index, DataFrame, np.ndarray (one- or two-dimensional) and
other list-likes of strings must have the same length as the
calling Series/Index, with the exception of indexed objects (i.e.
Series/Index/DataFrame) if `join` is not None.
If others is a list-like that contains a combination of Series,
Index or np.ndarray (1-dim), then all elements will be unpacked and
must satisfy the above criteria individually.
If others is None, the method returns the concatenation of all
strings in the calling Series/Index.
sep : str, default ''
The separator between the different elements/columns. By default
the empty string `''` is used.
na_rep : str or None, default None
Representation that is inserted for all missing values:
- If `na_rep` is None, and `others` is None, missing values in the
Series/Index are omitted from the result.
- If `na_rep` is None, and `others` is not None, a row containing a
missing value in any of the columns (before concatenation) will
have a missing value in the result.
join : {'left', 'right', 'outer', 'inner'}, default None
Determines the join-style between the calling Series/Index and any
Series/Index/DataFrame in `others` (objects without an index need
to match the length of the calling Series/Index). If None,
alignment is disabled, but this option will be removed in a future
version of pandas and replaced with a default of `'left'`. To
disable alignment, use `.values` on any Series/Index/DataFrame in
`others`.
.. versionadded:: 0.23.0
Returns
-------
str, Series or Index
If `others` is None, `str` is returned, otherwise a `Series/Index`
(same type as caller) of objects is returned.
See Also
--------
split : Split each string in the Series/Index.
join : Join lists contained as elements in the Series/Index.
Examples
--------
When not passing `others`, all values are concatenated into a single
string:
>>> s = pd.Series(['a', 'b', np.nan, 'd'])
>>> s.str.cat(sep=' ')
'a b d'
By default, NA values in the Series are ignored. Using `na_rep`, they
can be given a representation:
>>> s.str.cat(sep=' ', na_rep='?')
'a b ? d'
If `others` is specified, corresponding values are concatenated with
the separator. Result will be a Series of strings.
>>> s.str.cat(['A', 'B', 'C', 'D'], sep=',')
0 a,A
1 b,B
2 NaN
3 d,D
dtype: object
Missing values will remain missing in the result, but can again be
represented using `na_rep`
>>> s.str.cat(['A', 'B', 'C', 'D'], sep=',', na_rep='-')
0 a,A
1 b,B
2 -,C
3 d,D
dtype: object
If `sep` is not specified, the values are concatenated without
separation.
>>> s.str.cat(['A', 'B', 'C', 'D'], na_rep='-')
0 aA
1 bB
2 -C
3 dD
dtype: object
Series with different indexes can be aligned before concatenation. The
`join`-keyword works as in other methods.
>>> t = pd.Series(['d', 'a', 'e', 'c'], index=[3, 0, 4, 2])
>>> s.str.cat(t, join='left', na_rep='-')
0 aa
1 b-
2 -c
3 dd
dtype: object
>>>
>>> s.str.cat(t, join='outer', na_rep='-')
0 aa
1 b-
2 -c
3 dd
4 -e
dtype: object
>>>
>>> s.str.cat(t, join='inner', na_rep='-')
0 aa
2 -c
3 dd
dtype: object
>>>
>>> s.str.cat(t, join='right', na_rep='-')
3 dd
0 aa
4 -e
2 -c
dtype: object
For more examples, see :ref:`here <text.concatenate>`.
"""
from pandas import Index, Series, concat
if isinstance(others, str):
raise ValueError("Did you mean to supply a `sep` keyword?")
if sep is None:
sep = ""
if isinstance(self._orig, Index):
data = Series(self._orig, index=self._orig)
else: # Series
data = self._orig
# concatenate Series/Index with itself if no "others"
if others is None:
data = ensure_object(data)
na_mask = isna(data)
if na_rep is None and na_mask.any():
data = data[~na_mask]
elif na_rep is not None and na_mask.any():
data = np.where(na_mask, na_rep, data)
return sep.join(data)
try:
# turn anything in "others" into lists of Series
others, warn = self._get_series_list(others, ignore_index=(join is None))
except ValueError: # do not catch TypeError raised by _get_series_list
if join is None:
raise ValueError(
"All arrays must be same length, except "
"those having an index if `join` is not None"
)
else:
raise ValueError(
"If `others` contains arrays or lists (or "
"other list-likes without an index), these "
"must all be of the same length as the "
"calling Series/Index."
)
if join is None and warn:
warnings.warn(
"A future version of pandas will perform index "
"alignment when `others` is a Series/Index/"
"DataFrame (or a list-like containing one). To "
"disable alignment (the behavior before v.0.23) and "
"silence this warning, use `.values` on any Series/"
"Index/DataFrame in `others`. To enable alignment "
"and silence this warning, pass `join='left'|"
"'outer'|'inner'|'right'`. The future default will "
"be `join='left'`.",
FutureWarning,
stacklevel=3,
)
# if join is None, _get_series_list already force-aligned indexes
join = "left" if join is None else join
# align if required
if any(not data.index.equals(x.index) for x in others):
# Need to add keys for uniqueness in case of duplicate columns
others = concat(
others,
axis=1,
join=(join if join == "inner" else "outer"),
keys=range(len(others)),
sort=False,
copy=False,
)
data, others = data.align(others, join=join)
others = [others[x] for x in others] # again list of Series
all_cols = [ensure_object(x) for x in [data] + others]
na_masks = np.array([isna(x) for x in all_cols])
union_mask = np.logical_or.reduce(na_masks, axis=0)
if na_rep is None and union_mask.any():
# no na_rep means NaNs for all rows where any column has a NaN
# only necessary if there are actually any NaNs
result = np.empty(len(data), dtype=object)
np.putmask(result, union_mask, np.nan)
not_masked = ~union_mask
result[not_masked] = cat_safe([x[not_masked] for x in all_cols], sep)
elif na_rep is not None and union_mask.any():
# fill NaNs with na_rep in case there are actually any NaNs
all_cols = [
np.where(nm, na_rep, col) for nm, col in zip(na_masks, all_cols)
]
result = cat_safe(all_cols, sep)
else:
# no NaNs - can just concatenate
result = cat_safe(all_cols, sep)
if isinstance(self._orig, Index):
# add dtype for case that result is all-NA
result = Index(result, dtype=object, name=self._orig.name)
else: # Series
result = Series(
result, dtype=object, index=data.index, name=self._orig.name
)
return result
_shared_docs[
"str_split"
] = r"""
Split strings around given separator/delimiter.
Splits the string in the Series/Index from the %(side)s,
at the specified delimiter string. Equivalent to :meth:`str.%(method)s`.
Parameters
----------
pat : str, optional
String or regular expression to split on.
If not specified, split on whitespace.
n : int, default -1 (all)
Limit number of splits in output.
``None``, 0 and -1 will be interpreted as return all splits.
expand : bool, default False
Expand the splitted strings into separate columns.
* If ``True``, return DataFrame/MultiIndex expanding dimensionality.
* If ``False``, return Series/Index, containing lists of strings.
Returns
-------
Series, Index, DataFrame or MultiIndex
Type matches caller unless ``expand=True`` (see Notes).
See Also
--------
Series.str.split : Split strings around given separator/delimiter.
Series.str.rsplit : Splits string around given separator/delimiter,
starting from the right.
Series.str.join : Join lists contained as elements in the Series/Index
with passed delimiter.
str.split : Standard library version for split.
str.rsplit : Standard library version for rsplit.
Notes
-----
The handling of the `n` keyword depends on the number of found splits:
- If found splits > `n`, make first `n` splits only
- If found splits <= `n`, make all splits
- If for a certain row the number of found splits < `n`,
append `None` for padding up to `n` if ``expand=True``
If using ``expand=True``, Series and Index callers return DataFrame and
MultiIndex objects, respectively.
Examples
--------
>>> s = pd.Series(["this is a regular sentence",
... "https://docs.python.org/3/tutorial/index.html",
... np.nan])
0 this is a regular sentence
1 https://docs.python.org/3/tutorial/index.html
2 NaN
dtype: object
In the default setting, the string is split by whitespace.
>>> s.str.split()
0 [this, is, a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
Without the `n` parameter, the outputs of `rsplit` and `split`
are identical.
>>> s.str.rsplit()
0 [this, is, a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
The `n` parameter can be used to limit the number of splits on the
delimiter. The outputs of `split` and `rsplit` are different.
>>> s.str.split(n=2)
0 [this, is, a regular sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
>>> s.str.rsplit(n=2)
0 [this is a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
The `pat` parameter can be used to split by other characters.
>>> s.str.split(pat = "/")
0 [this is a regular sentence]
1 [https:, , docs.python.org, 3, tutorial, index...
2 NaN
dtype: object
When using ``expand=True``, the split elements will expand out into
separate columns. If NaN is present, it is propagated throughout
the columns during the split.
>>> s.str.split(expand=True)
0 1 2 3
0 this is a regular
1 https://docs.python.org/3/tutorial/index.html None None None
2 NaN NaN NaN NaN \
4
0 sentence
1 None
2 NaN
For slightly more complex use cases like splitting the html document name
from a url, a combination of parameter settings can be used.
>>> s.str.rsplit("/", n=1, expand=True)
0 1
0 this is a regular sentence None
1 https://docs.python.org/3/tutorial index.html
2 NaN NaN
Remember to escape special characters when explicitly using regular
expressions.
>>> s = pd.Series(["1+1=2"])
>>> s.str.split(r"\+|=", expand=True)
0 1 2
0 1 1 2
"""
@Appender(_shared_docs["str_split"] % {"side": "beginning", "method": "split"})
@forbid_nonstring_types(["bytes"])
def split(self, pat=None, n=-1, expand=False):
result = str_split(self._parent, pat, n=n)
return self._wrap_result(result, expand=expand)
@Appender(_shared_docs["str_split"] % {"side": "end", "method": "rsplit"})
@forbid_nonstring_types(["bytes"])
def rsplit(self, pat=None, n=-1, expand=False):
result = str_rsplit(self._parent, pat, n=n)
return self._wrap_result(result, expand=expand)
_shared_docs[
"str_partition"
] = """
Split the string at the %(side)s occurrence of `sep`.
This method splits the string at the %(side)s occurrence of `sep`,
and returns 3 elements containing the part before the separator,
the separator itself, and the part after the separator.
If the separator is not found, return %(return)s.
Parameters
----------
sep : str, default whitespace
String to split on.
pat : str, default whitespace
.. deprecated:: 0.24.0
Use ``sep`` instead
expand : bool, default True
If True, return DataFrame/MultiIndex expanding dimensionality.
If False, return Series/Index.
Returns
-------
DataFrame/MultiIndex or Series/Index of objects
See Also
--------
%(also)s
Series.str.split : Split strings around given separators.
str.partition : Standard library version.
Examples
--------
>>> s = pd.Series(['Linda van der Berg', 'George Pitt-Rivers'])
>>> s
0 Linda van der Berg
1 George Pitt-Rivers
dtype: object
>>> s.str.partition()
0 1 2
0 Linda van der Berg
1 George Pitt-Rivers
To partition by the last space instead of the first one:
>>> s.str.rpartition()
0 1 2
0 Linda van der Berg
1 George Pitt-Rivers
To partition by something different than a space:
>>> s.str.partition('-')
0 1 2
0 Linda van der Berg
1 George Pitt - Rivers
To return a Series containing tuples instead of a DataFrame:
>>> s.str.partition('-', expand=False)
0 (Linda van der Berg, , )
1 (George Pitt, -, Rivers)
dtype: object
Also available on indices:
>>> idx = pd.Index(['X 123', 'Y 999'])
>>> idx
Index(['X 123', 'Y 999'], dtype='object')
Which will create a MultiIndex:
>>> idx.str.partition()
MultiIndex([('X', ' ', '123'),
('Y', ' ', '999')],
dtype='object')
Or an index with tuples with ``expand=False``:
>>> idx.str.partition(expand=False)
Index([('X', ' ', '123'), ('Y', ' ', '999')], dtype='object')
"""
@Appender(
_shared_docs["str_partition"]
% {
"side": "first",
"return": "3 elements containing the string itself, followed by two "
"empty strings",
"also": "rpartition : Split the string at the last occurrence of " "`sep`.",
}
)
@deprecate_kwarg(old_arg_name="pat", new_arg_name="sep")
@forbid_nonstring_types(["bytes"])
def partition(self, sep=" ", expand=True):
f = lambda x: x.partition(sep)
result = _na_map(f, self._parent)
return self._wrap_result(result, expand=expand)
@Appender(
_shared_docs["str_partition"]
% {
"side": "last",
"return": "3 elements containing two empty strings, followed by the "
"string itself",
"also": "partition : Split the string at the first occurrence of " "`sep`.",
}
)
@deprecate_kwarg(old_arg_name="pat", new_arg_name="sep")
@forbid_nonstring_types(["bytes"])
def rpartition(self, sep=" ", expand=True):
f = lambda x: x.rpartition(sep)
result = _na_map(f, self._parent)
return self._wrap_result(result, expand=expand)
@copy(str_get)
def get(self, i):
result = str_get(self._parent, i)
return self._wrap_result(result)
@copy(str_join)
@forbid_nonstring_types(["bytes"])
def join(self, sep):
result = str_join(self._parent, sep)
return self._wrap_result(result)
@copy(str_contains)
@forbid_nonstring_types(["bytes"])
def contains(self, pat, case=True, flags=0, na=np.nan, regex=True):
result = str_contains(
self._parent, pat, case=case, flags=flags, na=na, regex=regex
)
return self._wrap_result(result, fill_value=na)
@copy(str_match)
@forbid_nonstring_types(["bytes"])
def match(self, pat, case=True, flags=0, na=np.nan):
result = str_match(self._parent, pat, case=case, flags=flags, na=na)
return self._wrap_result(result, fill_value=na)
@copy(str_replace)
@forbid_nonstring_types(["bytes"])
def replace(self, pat, repl, n=-1, case=None, flags=0, regex=True):
result = str_replace(
self._parent, pat, repl, n=n, case=case, flags=flags, regex=regex
)
return self._wrap_result(result)
@copy(str_repeat)
@forbid_nonstring_types(["bytes"])
def repeat(self, repeats):
result = str_repeat(self._parent, repeats)
return self._wrap_result(result)
@copy(str_pad)
@forbid_nonstring_types(["bytes"])
def pad(self, width, side="left", fillchar=" "):
result = str_pad(self._parent, width, side=side, fillchar=fillchar)
return self._wrap_result(result)
_shared_docs[
"str_pad"
] = """
Filling %(side)s side of strings in the Series/Index with an
additional character. Equivalent to :meth:`str.%(method)s`.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with ``fillchar``
fillchar : str
Additional character for filling, default is whitespace
Returns
-------
filled : Series/Index of objects
"""
@Appender(_shared_docs["str_pad"] % dict(side="left and right", method="center"))
@forbid_nonstring_types(["bytes"])
def center(self, width, fillchar=" "):
return self.pad(width, side="both", fillchar=fillchar)
@Appender(_shared_docs["str_pad"] % dict(side="right", method="ljust"))
@forbid_nonstring_types(["bytes"])
def ljust(self, width, fillchar=" "):
return self.pad(width, side="right", fillchar=fillchar)
@Appender(_shared_docs["str_pad"] % dict(side="left", method="rjust"))
@forbid_nonstring_types(["bytes"])
def rjust(self, width, fillchar=" "):
return self.pad(width, side="left", fillchar=fillchar)
@forbid_nonstring_types(["bytes"])
def zfill(self, width):
"""
Pad strings in the Series/Index by prepending '0' characters.
Strings in the Series/Index are padded with '0' characters on the
left of the string to reach a total string length `width`. Strings
in the Series/Index with length greater or equal to `width` are
unchanged.
Parameters
----------
width : int
Minimum length of resulting string; strings with length less
than `width` be prepended with '0' characters.
Returns
-------
Series/Index of objects
See Also
--------
Series.str.rjust : Fills the left side of strings with an arbitrary
character.
Series.str.ljust : Fills the right side of strings with an arbitrary
character.
Series.str.pad : Fills the specified sides of strings with an arbitrary
character.
Series.str.center : Fills boths sides of strings with an arbitrary
character.
Notes
-----
Differs from :meth:`str.zfill` which has special handling
for '+'/'-' in the string.
Examples
--------
>>> s = pd.Series(['-1', '1', '1000', 10, np.nan])
>>> s
0 -1
1 1
2 1000
3 10
4 NaN
dtype: object
Note that ``10`` and ``NaN`` are not strings, therefore they are
converted to ``NaN``. The minus sign in ``'-1'`` is treated as a
regular character and the zero is added to the left of it
(:meth:`str.zfill` would have moved it to the left). ``1000``
remains unchanged as it is longer than `width`.
>>> s.str.zfill(3)
0 0-1
1 001
2 1000
3 NaN
4 NaN
dtype: object
"""
result = str_pad(self._parent, width, side="left", fillchar="0")
return self._wrap_result(result)
@copy(str_slice)
def slice(self, start=None, stop=None, step=None):
result = str_slice(self._parent, start, stop, step)
return self._wrap_result(result)
@copy(str_slice_replace)
@forbid_nonstring_types(["bytes"])
def slice_replace(self, start=None, stop=None, repl=None):
result = str_slice_replace(self._parent, start, stop, repl)
return self._wrap_result(result)
@copy(str_decode)
def decode(self, encoding, errors="strict"):
# need to allow bytes here
result = str_decode(self._parent, encoding, errors)
return self._wrap_result(result)
@copy(str_encode)
@forbid_nonstring_types(["bytes"])
def encode(self, encoding, errors="strict"):
result = str_encode(self._parent, encoding, errors)
return self._wrap_result(result)
_shared_docs[
"str_strip"
] = r"""
Remove leading and trailing characters.
Strip whitespaces (including newlines) or a set of specified characters
from each string in the Series/Index from %(side)s.
Equivalent to :meth:`str.%(method)s`.
Parameters
----------
to_strip : str or None, default None
Specifying the set of characters to be removed.
All combinations of this set of characters will be stripped.
If None then whitespaces are removed.
Returns
-------
Series/Index of objects
See Also
--------
Series.str.strip : Remove leading and trailing characters in Series/Index.
Series.str.lstrip : Remove leading characters in Series/Index.
Series.str.rstrip : Remove trailing characters in Series/Index.
Examples
--------
>>> s = pd.Series(['1. Ant. ', '2. Bee!\n', '3. Cat?\t', np.nan])
>>> s
0 1. Ant.
1 2. Bee!\n
2 3. Cat?\t
3 NaN
dtype: object
>>> s.str.strip()
0 1. Ant.
1 2. Bee!
2 3. Cat?
3 NaN
dtype: object
>>> s.str.lstrip('123.')
0 Ant.
1 Bee!\n
2 Cat?\t
3 NaN
dtype: object
>>> s.str.rstrip('.!? \n\t')
0 1. Ant
1 2. Bee
2 3. Cat
3 NaN
dtype: object
>>> s.str.strip('123.!? \n\t')
0 Ant
1 Bee
2 Cat
3 NaN
dtype: object
"""
@Appender(
_shared_docs["str_strip"] % dict(side="left and right sides", method="strip")
)
@forbid_nonstring_types(["bytes"])
def strip(self, to_strip=None):
result = str_strip(self._parent, to_strip, side="both")
return self._wrap_result(result)
@Appender(_shared_docs["str_strip"] % dict(side="left side", method="lstrip"))
@forbid_nonstring_types(["bytes"])
def lstrip(self, to_strip=None):
result = str_strip(self._parent, to_strip, side="left")
return self._wrap_result(result)
@Appender(_shared_docs["str_strip"] % dict(side="right side", method="rstrip"))
@forbid_nonstring_types(["bytes"])
def rstrip(self, to_strip=None):
result = str_strip(self._parent, to_strip, side="right")
return self._wrap_result(result)
@copy(str_wrap)
@forbid_nonstring_types(["bytes"])
def wrap(self, width, **kwargs):
result = str_wrap(self._parent, width, **kwargs)
return self._wrap_result(result)
@copy(str_get_dummies)
@forbid_nonstring_types(["bytes"])
def get_dummies(self, sep="|"):
# we need to cast to Series of strings as only that has all
# methods available for making the dummies...
data = self._orig.astype(str) if self._is_categorical else self._parent
result, name = str_get_dummies(data, sep)
return self._wrap_result(
result, use_codes=(not self._is_categorical), name=name, expand=True
)
@copy(str_translate)
@forbid_nonstring_types(["bytes"])
def translate(self, table):
result = str_translate(self._parent, table)
return self._wrap_result(result)
count = _pat_wrapper(str_count, flags=True, name="count")
startswith = _pat_wrapper(str_startswith, na=True, name="startswith")
endswith = _pat_wrapper(str_endswith, na=True, name="endswith")
findall = _pat_wrapper(str_findall, flags=True, name="findall")
@copy(str_extract)
@forbid_nonstring_types(["bytes"])
def extract(self, pat, flags=0, expand=True):
return str_extract(self, pat, flags=flags, expand=expand)
@copy(str_extractall)
@forbid_nonstring_types(["bytes"])
def extractall(self, pat, flags=0):
return str_extractall(self._orig, pat, flags=flags)
_shared_docs[
"find"
] = """
Return %(side)s indexes in each strings in the Series/Index
where the substring is fully contained between [start:end].
Return -1 on failure. Equivalent to standard :meth:`str.%(method)s`.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
Returns
-------
found : Series/Index of integer values
See Also
--------
%(also)s
"""
@Appender(
_shared_docs["find"]
% dict(
side="lowest",
method="find",
also="rfind : Return highest indexes in each strings.",
)
)
@forbid_nonstring_types(["bytes"])
def find(self, sub, start=0, end=None):
result = str_find(self._parent, sub, start=start, end=end, side="left")
return self._wrap_result(result)
@Appender(
_shared_docs["find"]
% dict(
side="highest",
method="rfind",
also="find : Return lowest indexes in each strings.",
)
)
@forbid_nonstring_types(["bytes"])
def rfind(self, sub, start=0, end=None):
result = str_find(self._parent, sub, start=start, end=end, side="right")
return self._wrap_result(result)
@forbid_nonstring_types(["bytes"])
def normalize(self, form):
"""
Return the Unicode normal form for the strings in the Series/Index.
For more information on the forms, see the
:func:`unicodedata.normalize`.
Parameters
----------
form : {'NFC', 'NFKC', 'NFD', 'NFKD'}
Unicode form
Returns
-------
normalized : Series/Index of objects
"""
import unicodedata
f = lambda x: unicodedata.normalize(form, x)
result = _na_map(f, self._parent)
return self._wrap_result(result)
_shared_docs[
"index"
] = """
Return %(side)s indexes in each strings where the substring is
fully contained between [start:end]. This is the same as
``str.%(similar)s`` except instead of returning -1, it raises a ValueError
when the substring is not found. Equivalent to standard ``str.%(method)s``.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
Returns
-------
found : Series/Index of objects
See Also
--------
%(also)s
"""
@Appender(
_shared_docs["index"]
% dict(
side="lowest",
similar="find",
method="index",
also="rindex : Return highest indexes in each strings.",
)
)
@forbid_nonstring_types(["bytes"])
def index(self, sub, start=0, end=None):
result = str_index(self._parent, sub, start=start, end=end, side="left")
return self._wrap_result(result)
@Appender(
_shared_docs["index"]
% dict(
side="highest",
similar="rfind",
method="rindex",
also="index : Return lowest indexes in each strings.",
)
)
@forbid_nonstring_types(["bytes"])
def rindex(self, sub, start=0, end=None):
result = str_index(self._parent, sub, start=start, end=end, side="right")
return self._wrap_result(result)
_shared_docs[
"len"
] = """
Compute the length of each element in the Series/Index. The element may be
a sequence (such as a string, tuple or list) or a collection
(such as a dictionary).
Returns
-------
Series or Index of int
A Series or Index of integer values indicating the length of each
element in the Series or Index.
See Also
--------
str.len : Python built-in function returning the length of an object.
Series.size : Returns the length of the Series.
Examples
--------
Returns the length (number of characters) in a string. Returns the
number of entries for dictionaries, lists or tuples.
>>> s = pd.Series(['dog',
... '',
... 5,
... {'foo' : 'bar'},
... [2, 3, 5, 7],
... ('one', 'two', 'three')])
>>> s
0 dog
1
2 5
3 {'foo': 'bar'}
4 [2, 3, 5, 7]
5 (one, two, three)
dtype: object
>>> s.str.len()
0 3.0
1 0.0
2 NaN
3 1.0
4 4.0
5 3.0
dtype: float64
"""
len = _noarg_wrapper(
len, docstring=_shared_docs["len"], forbidden_types=None, dtype=int
)
_shared_docs[
"casemethods"
] = """
Convert strings in the Series/Index to %(type)s.
%(version)s
Equivalent to :meth:`str.%(method)s`.
Returns
-------
Series/Index of objects
See Also
--------
Series.str.lower : Converts all characters to lowercase.
Series.str.upper : Converts all characters to uppercase.
Series.str.title : Converts first character of each word to uppercase and
remaining to lowercase.
Series.str.capitalize : Converts first character to uppercase and
remaining to lowercase.
Series.str.swapcase : Converts uppercase to lowercase and lowercase to
uppercase.
Series.str.casefold: Removes all case distinctions in the string.
Examples
--------
>>> s = pd.Series(['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe'])
>>> s
0 lower
1 CAPITALS
2 this is a sentence
3 SwApCaSe
dtype: object
>>> s.str.lower()
0 lower
1 capitals
2 this is a sentence
3 swapcase
dtype: object
>>> s.str.upper()
0 LOWER
1 CAPITALS
2 THIS IS A SENTENCE
3 SWAPCASE
dtype: object
>>> s.str.title()
0 Lower
1 Capitals
2 This Is A Sentence
3 Swapcase
dtype: object
>>> s.str.capitalize()
0 Lower
1 Capitals
2 This is a sentence
3 Swapcase
dtype: object
>>> s.str.swapcase()
0 LOWER
1 capitals
2 THIS IS A SENTENCE
3 sWaPcAsE
dtype: object
"""
# _doc_args holds dict of strings to use in substituting casemethod docs
_doc_args = {} # type: Dict[str, Dict[str, str]]
_doc_args["lower"] = dict(type="lowercase", method="lower", version="")
_doc_args["upper"] = dict(type="uppercase", method="upper", version="")
_doc_args["title"] = dict(type="titlecase", method="title", version="")
_doc_args["capitalize"] = dict(
type="be capitalized", method="capitalize", version=""
)
_doc_args["swapcase"] = dict(type="be swapcased", method="swapcase", version="")
_doc_args["casefold"] = dict(
type="be casefolded",
method="casefold",
version="\n .. versionadded:: 0.25.0\n",
)
lower = _noarg_wrapper(
lambda x: x.lower(),
name="lower",
docstring=_shared_docs["casemethods"] % _doc_args["lower"],
)
upper = _noarg_wrapper(
lambda x: x.upper(),
name="upper",
docstring=_shared_docs["casemethods"] % _doc_args["upper"],
)
title = _noarg_wrapper(
lambda x: x.title(),
name="title",
docstring=_shared_docs["casemethods"] % _doc_args["title"],
)
capitalize = _noarg_wrapper(
lambda x: x.capitalize(),
name="capitalize",
docstring=_shared_docs["casemethods"] % _doc_args["capitalize"],
)
swapcase = _noarg_wrapper(
lambda x: x.swapcase(),
name="swapcase",
docstring=_shared_docs["casemethods"] % _doc_args["swapcase"],
)
casefold = _noarg_wrapper(
lambda x: x.casefold(),
name="casefold",
docstring=_shared_docs["casemethods"] % _doc_args["casefold"],
)
_shared_docs[
"ismethods"
] = """
Check whether all characters in each string are %(type)s.
This is equivalent to running the Python string method
:meth:`str.%(method)s` for each element of the Series/Index. If a string
has zero characters, ``False`` is returned for that check.
Returns
-------
Series or Index of bool
Series or Index of boolean values with the same length as the original
Series/Index.
See Also
--------
Series.str.isalpha : Check whether all characters are alphabetic.
Series.str.isnumeric : Check whether all characters are numeric.
Series.str.isalnum : Check whether all characters are alphanumeric.
Series.str.isdigit : Check whether all characters are digits.
Series.str.isdecimal : Check whether all characters are decimal.
Series.str.isspace : Check whether all characters are whitespace.
Series.str.islower : Check whether all characters are lowercase.
Series.str.isupper : Check whether all characters are uppercase.
Series.str.istitle : Check whether all characters are titlecase.
Examples
--------
**Checks for Alphabetic and Numeric Characters**
>>> s1 = pd.Series(['one', 'one1', '1', ''])
>>> s1.str.isalpha()
0 True
1 False
2 False
3 False
dtype: bool
>>> s1.str.isnumeric()
0 False
1 False
2 True
3 False
dtype: bool
>>> s1.str.isalnum()
0 True
1 True
2 True
3 False
dtype: bool
Note that checks against characters mixed with any additional punctuation
or whitespace will evaluate to false for an alphanumeric check.
>>> s2 = pd.Series(['A B', '1.5', '3,000'])
>>> s2.str.isalnum()
0 False
1 False
2 False
dtype: bool
**More Detailed Checks for Numeric Characters**
There are several different but overlapping sets of numeric characters that
can be checked for.
>>> s3 = pd.Series(['23', '³', '⅕', ''])
The ``s3.str.isdecimal`` method checks for characters used to form numbers
in base 10.
>>> s3.str.isdecimal()
0 True
1 False
2 False
3 False
dtype: bool
The ``s.str.isdigit`` method is the same as ``s3.str.isdecimal`` but also
includes special digits, like superscripted and subscripted digits in
unicode.
>>> s3.str.isdigit()
0 True
1 True
2 False
3 False
dtype: bool
The ``s.str.isnumeric`` method is the same as ``s3.str.isdigit`` but also
includes other characters that can represent quantities such as unicode
fractions.
>>> s3.str.isnumeric()
0 True
1 True
2 True
3 False
dtype: bool
**Checks for Whitespace**
>>> s4 = pd.Series([' ', '\\t\\r\\n ', ''])
>>> s4.str.isspace()
0 True
1 True
2 False
dtype: bool
**Checks for Character Case**
>>> s5 = pd.Series(['leopard', 'Golden Eagle', 'SNAKE', ''])
>>> s5.str.islower()
0 True
1 False
2 False
3 False
dtype: bool
>>> s5.str.isupper()
0 False
1 False
2 True
3 False
dtype: bool
The ``s5.str.istitle`` method checks for whether all words are in title
case (whether only the first letter of each word is capitalized). Words are
assumed to be as any sequence of non-numeric characters separated by
whitespace characters.
>>> s5.str.istitle()
0 False
1 True
2 False
3 False
dtype: bool
"""
_doc_args["isalnum"] = dict(type="alphanumeric", method="isalnum")
_doc_args["isalpha"] = dict(type="alphabetic", method="isalpha")
_doc_args["isdigit"] = dict(type="digits", method="isdigit")
_doc_args["isspace"] = dict(type="whitespace", method="isspace")
_doc_args["islower"] = dict(type="lowercase", method="islower")
_doc_args["isupper"] = dict(type="uppercase", method="isupper")
_doc_args["istitle"] = dict(type="titlecase", method="istitle")
_doc_args["isnumeric"] = dict(type="numeric", method="isnumeric")
_doc_args["isdecimal"] = dict(type="decimal", method="isdecimal")
isalnum = _noarg_wrapper(
lambda x: x.isalnum(),
name="isalnum",
docstring=_shared_docs["ismethods"] % _doc_args["isalnum"],
)
isalpha = _noarg_wrapper(
lambda x: x.isalpha(),
name="isalpha",
docstring=_shared_docs["ismethods"] % _doc_args["isalpha"],
)
isdigit = _noarg_wrapper(
lambda x: x.isdigit(),
name="isdigit",
docstring=_shared_docs["ismethods"] % _doc_args["isdigit"],
)
isspace = _noarg_wrapper(
lambda x: x.isspace(),
name="isspace",
docstring=_shared_docs["ismethods"] % _doc_args["isspace"],
)
islower = _noarg_wrapper(
lambda x: x.islower(),
name="islower",
docstring=_shared_docs["ismethods"] % _doc_args["islower"],
)
isupper = _noarg_wrapper(
lambda x: x.isupper(),
name="isupper",
docstring=_shared_docs["ismethods"] % _doc_args["isupper"],
)
istitle = _noarg_wrapper(
lambda x: x.istitle(),
name="istitle",
docstring=_shared_docs["ismethods"] % _doc_args["istitle"],
)
isnumeric = _noarg_wrapper(
lambda x: x.isnumeric(),
name="isnumeric",
docstring=_shared_docs["ismethods"] % _doc_args["isnumeric"],
)
isdecimal = _noarg_wrapper(
lambda x: x.isdecimal(),
name="isdecimal",
docstring=_shared_docs["ismethods"] % _doc_args["isdecimal"],
)
@classmethod
def _make_accessor(cls, data):
cls._validate(data)
return cls(data)
| apache-2.0 |
mzl9039/spark | python/pyspark/sql/session.py | 14 | 25557 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
else:
from itertools import imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.catalog import Catalog
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, _verify_type, \
_infer_schema, _has_nulltype, _merge_type, _create_converter, _parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(object):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
@since(2.0)
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf`
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master
"""
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive serdes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
@since(2.0)
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == s1.sparkContext.getConf().get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if session is None or session._sc._jsc is None:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
sc = SparkContext.getOrCreate(sparkConf)
# This SparkContext may be an existing one.
for key, value in self._options.items():
# we need to propagate the confs
# before we create the SparkSession. Otherwise, confs like
# warehouse path and metastore url will not be set correctly (
# these confs cannot be changed once the SparkSession is created).
sc._conf.set(key, value)
session = SparkSession(sc)
for key, value in self._options.items():
session._jsparkSession.sessionState().conf().setConfString(key, value)
for key, value in self._options.items():
session.sparkContext._conf.set(key, value)
return session
builder = Builder()
_instantiatedSession = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
"""Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
# If we had an instantiated SparkSession attached with a SparkContext
# which is stopped now, we need to renew the instantiated SparkSession.
# Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
if SparkSession._instantiatedSession is None \
or SparkSession._instantiatedSession._sc._jsc is None:
SparkSession._instantiatedSession = self
def _repr_html_(self):
return """
<div>
<p><b>SparkSession - {catalogImplementation}</b></p>
{sc_HTML}
</div>
""".format(
catalogImplementation=self.conf.get("spark.sql.catalogImplementation"),
sc_HTML=self.sparkContext._repr_html_()
)
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
"""
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
from pyspark.sql.context import UDFRegistration
return UDFRegistration(self._wrapped)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, map(_infer_schema, data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(_infer_schema).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,
etc.), or :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
if schema is None:
schema = [str(x) for x in data.columns]
data = [r.tolist() for r in data.to_records(index=False)]
verify_func = _verify_type if verifySchema else lambda _, t: True
if isinstance(schema, StructType):
def prepare(obj):
verify_func(obj, schema)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
def prepare(obj):
verify_func(obj, dataType)
return obj,
else:
if isinstance(schema, list):
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Evolving.
:return: :class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Evolving.
:return: :class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
self._sc.stop()
SparkSession._instantiatedSession = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
lazywei/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
bachiraoun/fullrmc | Constraints/StructureFactorConstraints.py | 1 | 64342 | """
StructureFactorConstraints contains classes for all constraints related experimental static structure factor functions.
.. inheritance-diagram:: fullrmc.Constraints.StructureFactorConstraints
:parts: 1
"""
# standard libraries imports
from __future__ import print_function
import itertools, re
# external libraries imports
import numpy as np
from pdbparser.Utilities.Database import is_element_property, get_element_property
from pdbparser.Utilities.Collection import get_normalized_weighting
# fullrmc imports
from ..Globals import INT_TYPE, FLOAT_TYPE, PI, PRECISION, LOGGER
from ..Globals import str, long, unicode, bytes, basestring, range, xrange, maxint
from ..Core.Collection import is_number, is_integer, get_path
from ..Core.Collection import reset_if_collected_out_of_date, get_real_elements_weight
from ..Core.Collection import get_caller_frames
from ..Core.Constraint import Constraint, ExperimentalConstraint
from ..Core.pairs_histograms import multiple_pairs_histograms_coords, full_pairs_histograms_coords
class StructureFactorConstraint(ExperimentalConstraint):
"""
Controls the Structure Factor noted as S(Q) and also called
total-scattering structure function or Static Structure Factor.
S(Q) is a dimensionless quantity and normalized such as the average
value :math:`<S(Q)>=1`.
It is worth mentioning that S(Q) is nothing other than the normalized and
corrected diffraction pattern if all experimental artefacts powder.
The computation of S(Q) is done through an inverse Sine Fourier transform
of the computed pair distribution function G(r).
.. math::
S(Q) = 1+ \\frac{1}{Q} \\int_{0}^{\\infty} G(r) sin(Qr) dr
From an atomistic model and histogram point of view, G(r) is computed as
the following:
.. math::
G(r) = 4 \\pi r (\\rho_{r} - \\rho_{0})
= 4 \\pi \\rho_{0} r (g(r)-1)
= \\frac{R(r)}{r} - 4 \\pi \\rho_{0}
g(r) is calculated after binning all pair atomic distances into a
weighted histograms as the following:
.. math::
g(r) = \\sum \\limits_{i,j}^{N} w_{i,j} \\frac{\\rho_{i,j}(r)}{\\rho_{0}}
= \\sum \\limits_{i,j}^{N} w_{i,j} \\frac{n_{i,j}(r) / v(r)}{N_{i,j} / V}
Where:\n
:math:`Q` is the momentum transfer. \n
:math:`r` is the distance between two atoms. \n
:math:`\\rho_{i,j}(r)` is the pair density function of atoms i and j. \n
:math:`\\rho_{0}` is the average number density of the system. \n
:math:`w_{i,j}` is the relative weighting of atom types i and j. \n
:math:`R(r)` is the radial distribution function (rdf). \n
:math:`N` is the total number of atoms. \n
:math:`V` is the volume of the system. \n
:math:`n_{i,j}(r)` is the number of atoms i neighbouring j at a distance r. \n
:math:`v(r)` is the annulus volume at distance r and of thickness dr. \n
:math:`N_{i,j}` is the total number of atoms i and j in the system. \n
+----------------------------------------------------------------------+
|.. figure:: reduced_structure_factor_constraint_plot_method.png |
| :width: 530px |
| :height: 400px |
| :align: left |
| |
| Reduced structure factor of memory shape Nickel-Titanium alloy. |
+----------------------------------------------------------------------+
:Parameters:
#. experimentalData (numpy.ndarray, string): Experimental data as
numpy.ndarray or string path to load data using numpy.loadtxt
method.
#. dataWeights (None, numpy.ndarray): Weights array of the same number
of points of experimentalData used in the constraint's standard
error computation. Therefore particular fitting emphasis can be
put on different data points that might be considered as more or less
important in order to get a reasonable and plausible modal.\n
If None is given, all data points are considered of the same
importance in the computation of the constraint's standard error.\n
If numpy.ndarray is given, all weights must be positive and all
zeros weighted data points won't contribute to the total
constraint's standard error. At least a single weight point is
required to be non-zeros and the weights array will be automatically
scaled upon setting such as the the sum of all the weights
is equal to the number of data points.
#. weighting (string): The elements weighting scheme. It must be any
atomic attribute (atomicNumber, neutronCohb, neutronIncohb,
neutronCohXs, neutronIncohXs, atomicWeight, covalentRadius) defined
in pdbparser database. In case of xrays or neutrons experimental
weights, one can simply set weighting to 'xrays' or 'neutrons'
and the value will be automatically adjusted to respectively
'atomicNumber' and 'neutronCohb'. If attribute values are
missing in the pdbparser database, atomic weights must be
given in atomsWeight dictionary argument.
#. atomsWeight (None, dict): Atoms weight dictionary where keys are
atoms element and values are custom weights. If None is given
or partially given, missing elements weighting will be fully set
given weighting scheme.
#. rmin (None, number): The minimum distance value to compute G(r)
histogram. If None is given, rmin is computed as
:math:`2 \\pi / Q_{max}`.
#. rmax (None, number): The maximum distance value to compute G(r)
histogram. If None is given, rmax is computed as
:math:`2 \\pi / dQ`.
#. dr (None, number): The distance bin value to compute G(r)
histogram. If None is given, bin is computed as
:math:`2 \\pi / (Q_{max}-Q_{min})`.
#. scaleFactor (number): A normalization scale factor used to normalize
the computed data to the experimental ones.
#. adjustScaleFactor (list, tuple): Used to adjust fit or guess
the best scale factor during stochastic engine runtime.
It must be a list of exactly three entries.\n
#. The frequency in number of generated moves of finding the best
scale factor. If 0 frequency is given, it means that the scale
factor is fixed.
#. The minimum allowed scale factor value.
#. The maximum allowed scale factor value.
#. windowFunction (None, numpy.ndarray): The window function to
convolute with the computed pair distribution function of the
system prior to comparing it with the experimental data. In
general, the experimental pair distribution function G(r) shows
artificial wrinkles, among others the main reason is because
G(r) is computed by applying a sine Fourier transform to the
experimental structure factor S(q). Therefore window function is
used to best imitate the numerical artefacts in the experimental
data.
#. limits (None, tuple, list): The distance limits to compute the
histograms. If None is given, the limits will be automatically
set the the min and max distance of the experimental data.
Otherwise, a tuple of exactly two items where the first is the
minimum distance or None and the second is the maximum distance
or None.
**NB**: If adjustScaleFactor first item (frequency) is 0, the scale factor
will remain untouched and the limits minimum and maximum won't be checked.
.. code-block:: python
# import fullrmc modules
from fullrmc.Engine import Engine
from fullrmc.Constraints.StructureFactorConstraints import StructureFactorConstraint
# create engine
ENGINE = Engine(path='my_engine.rmc')
# set pdb file
ENGINE.set_pdb('system.pdb')
# create and add constraint
SFC = StructureFactorConstraint(experimentalData="sq.dat", weighting="atomicNumber")
ENGINE.add_constraints(SFC)
"""
def __init__(self, experimentalData, dataWeights=None,
weighting="atomicNumber", atomsWeight=None,
rmin=None, rmax=None, dr=None,
scaleFactor=1.0, adjustScaleFactor=(0, 0.8, 1.2),
windowFunction=None, limits=None):
# initialize variables
self.__experimentalQValues = None
self.__experimentalSF = None
self.__rmin = None
self.__rmax = None
self.__dr = None
self.__minimumDistance = None
self.__maximumDistance = None
self.__bin = None
self.__shellCenters = None
self.__histogramSize = None
self.__shellVolumes = None
self.__Gr2SqMatrix = None
# initialize constraint
super(StructureFactorConstraint, self).__init__( experimentalData=experimentalData, dataWeights=dataWeights, scaleFactor=scaleFactor, adjustScaleFactor=adjustScaleFactor)
# set atomsWeight
self.set_atoms_weight(atomsWeight)
# set elements weighting
self.set_weighting(weighting)
self.__set_weighting_scheme()
# set window function
self.set_window_function(windowFunction)
# set r parameters
self.set_rmin(rmin)
self.set_rmax(rmax)
self.set_dr(dr)
# set frame data
FRAME_DATA = [d for d in self.FRAME_DATA]
FRAME_DATA.extend(['_StructureFactorConstraint__experimentalQValues',
'_StructureFactorConstraint__experimentalSF',
'_StructureFactorConstraint__elementsPairs',
'_StructureFactorConstraint__weightingScheme',
'_StructureFactorConstraint__atomsWeight',
'_StructureFactorConstraint__qmin',
'_StructureFactorConstraint__qmax',
'_StructureFactorConstraint__rmin',
'_StructureFactorConstraint__rmax',
'_StructureFactorConstraint__dr',
'_StructureFactorConstraint__minimumDistance',
'_StructureFactorConstraint__maximumDistance',
'_StructureFactorConstraint__bin',
'_StructureFactorConstraint__shellCenters',
'_StructureFactorConstraint__histogramSize',
'_StructureFactorConstraint__shellVolumes',
'_StructureFactorConstraint__Gr2SqMatrix',
'_StructureFactorConstraint__windowFunction',
'_elementsWeight',] )
RUNTIME_DATA = [d for d in self.RUNTIME_DATA]
RUNTIME_DATA.extend( [] )
object.__setattr__(self, 'FRAME_DATA', tuple(FRAME_DATA) )
object.__setattr__(self, 'RUNTIME_DATA', tuple(RUNTIME_DATA) )
def _codify_update__(self, name='constraint', addDependencies=True):
dependencies = []
code = []
if addDependencies:
code.extend(dependencies)
dw = self.dataWeights
if dw is not None:
dw = list(dw)
code.append("dw = {dw}".format(dw=dw))
wf = self.windowFunction
if isinstance(wf, np.ndarray):
code.append("wf = np.array({wf})".format(wf=list(wf)))
else:
code.append("wf = {wf}".format(wf=wf))
code.append("{name}.set_used({val})".format(name=name, val=self.used))
code.append("{name}.set_scale_factor({val})".format(name=name, val=self.scaleFactor))
code.append("{name}.set_adjust_scale_factor({val})".format(name=name, val=self.adjustScaleFactor))
code.append("{name}.set_data_weights(dw)".format(name=name))
code.append("{name}.set_atoms_weight({val})".format(name=name, val=self.atomsWeight))
code.append("{name}.set_window_function(wf)".format(name=name))
code.append("{name}.set_rmin({val})".format(name=name, val=self.rmin))
code.append("{name}.set_rmax({val})".format(name=name, val=self.rmax))
code.append("{name}.set_dr({val})".format(name=name, val=self.dr))
code.append("{name}.set_limits({val})".format(name=name, val=self.limits))
# return
return dependencies, '\n'.join(code)
def _codify__(self, engine, name='constraint', addDependencies=True):
assert isinstance(name, basestring), LOGGER.error("name must be a string")
assert re.match('[a-zA-Z_][a-zA-Z0-9_]*$', name) is not None, LOGGER.error("given name '%s' can't be used as a variable name"%name)
klass = self.__class__.__name__
dependencies = ['import numpy as np','from fullrmc.Constraints import StructureFactorConstraints']
code = []
if addDependencies:
code.extend(dependencies)
x = list(self.experimentalData[:,0])
y = list(self.experimentalData[:,1])
code.append("x = {x}".format(x=x))
code.append("y = {y}".format(y=y))
code.append("d = np.transpose([x,y]).astype(np.float32)")
dw = self.dataWeights
if dw is not None:
dw = list(dw)
code.append("dw = {dw}".format(dw=dw))
wf = self.windowFunction
if isinstance(wf, np.ndarray):
code.append("wf = np.array({wf})".format(wf=list(wf)))
else:
code.append("wf = {wf}".format(wf=wf))
code.append("{name} = {klass}s.{klass}\
(experimentalData=d, dataWeights=dw, weighting='{weighting}', atomsWeight={atomsWeight}, \
rmin={rmin}, rmax={rmax}, dr={dr}, scaleFactor={scaleFactor}, adjustScaleFactor={adjustScaleFactor}, \
shapeFuncParams=sfp, windowFunction=wf, limits={limits})".format(name=name, klass=klass,
weighting=self.weighting, atomsWeight=self.atomsWeight, rmin=self.rmin,
rmax=self.rmax, dr=self.dr, scaleFactor=self.scaleFactor,
adjustScaleFactor=self.adjustScaleFactor, limits=self.limits))
code.append("{engine}.add_constraints([{name}])".format(engine=engine, name=name))
# return
return dependencies, '\n'.join(code)
#def __getstate__(self):
# # make sure that __Gr2SqMatrix is not pickled but saved to the disk as None
# state = super(StructureFactorConstraint, self).__getstate__()
# state["_StructureFactorConstraint__Gr2SqMatrix"] = None
# return state
#
#def __setstate__(self, state):
# # make sure to regenerate G(r) to S(q) matrix at loading time
# self.__dict__.update( state )
# self.__set_Gr_2_Sq_matrix()
#
def __set_Gr_2_Sq_matrix(self):
if self.__experimentalQValues is None or self.__shellCenters is None:
self.__Gr2SqMatrix = None
else:
Qs = self.__experimentalQValues
Rs = self.__shellCenters
dr = self.__shellCenters[1]-self.__shellCenters[0]
qr = Rs.reshape((-1,1))*(np.ones((len(Rs),1), dtype=FLOAT_TYPE)*Qs)
sinqr = np.sin(qr)
sinqr_q = sinqr/Qs
self.__Gr2SqMatrix = dr*sinqr_q
def __set_weighting_scheme(self):
if self.engine is not None:
self.__elementsPairs = sorted(itertools.combinations_with_replacement(self.engine.elements,2))
#elementsWeight = dict([(el,float(get_element_property(el,self.__weighting))) for el in self.engine.elements])
#self._elementsWeight = dict([(el,self.__atomsWeight.get(el, float(get_element_property(el,self.__weighting)))) for el in self.engine.elements])
self._elementsWeight = get_real_elements_weight(elements=self.engine.elements, weightsDict=self.__atomsWeight, weighting=self.__weighting)
self.__weightingScheme = get_normalized_weighting(numbers=self.engine.numberOfAtomsPerElement, weights=self._elementsWeight)
for k in self.__weightingScheme:
self.__weightingScheme[k] = FLOAT_TYPE(self.__weightingScheme[k])
else:
self.__elementsPairs = None
self.__weightingScheme = None
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__elementsPairs' : self.__elementsPairs,
'_StructureFactorConstraint__weightingScheme': self.__weightingScheme})
def __set_histogram(self):
if self.__minimumDistance is None or self.__maximumDistance is None or self.__bin is None:
self.__shellCenters = None
self.__histogramSize = None
self.__shellVolumes = None
else:
# compute edges
if self.engine is not None and self.rmax is None:
minHalfBox = np.min( [np.linalg.norm(v)/2. for v in self.engine.basisVectors])
self.__edges = np.arange(self.__minimumDistance,minHalfBox, self.__bin).astype(FLOAT_TYPE)
else:
self.__edges = np.arange(self.__minimumDistance, self.__maximumDistance+self.__bin, self.__bin).astype(FLOAT_TYPE)
# adjust rmin and rmax
self.__minimumDistance = self.__edges[0]
self.__maximumDistance = self.__edges[-1]
# compute shellCenters
self.__shellCenters = (self.__edges[0:-1]+self.__edges[1:])/FLOAT_TYPE(2.)
# set histogram size
self.__histogramSize = INT_TYPE( len(self.__edges)-1 )
# set shell centers and volumes
self.__shellVolumes = FLOAT_TYPE(4.0/3.)*PI*((self.__edges[1:])**3 - self.__edges[0:-1]**3)
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__minimumDistance': self.__minimumDistance,
'_StructureFactorConstraint__maximumDistance': self.__maximumDistance,
'_StructureFactorConstraint__shellCenters' : self.__shellCenters,
'_StructureFactorConstraint__histogramSize' : self.__histogramSize,
'_StructureFactorConstraint__shellVolumes' : self.__shellVolumes})
# reset constraint
self.reset_constraint()
# reset sq matrix
self.__set_Gr_2_Sq_matrix()
def _on_collector_reset(self):
pass
@property
def rmin(self):
""" Histogram minimum distance. """
return self.__rmin
@property
def rmax(self):
""" Histogram maximum distance. """
return self.__rmax
@property
def dr(self):
""" Histogram bin size."""
return self.__dr
@property
def bin(self):
""" Computed histogram distance bin size."""
return self.__bin
@property
def minimumDistance(self):
""" Computed histogram minimum distance. """
return self.__minimumDistance
@property
def maximumDistance(self):
""" Computed histogram maximum distance. """
return self.__maximumDistance
@property
def qmin(self):
""" Experimental data reciprocal distances minimum. """
return self.__qmin
@property
def qmax(self):
""" Experimental data reciprocal distances maximum. """
return self.__qmax
@property
def dq(self):
""" Experimental data reciprocal distances bin size. """
return self.__experimentalQValues[1]-self.__experimentalQValues[0]
@property
def experimentalQValues(self):
""" Experimental data used q values. """
return self.__experimentalQValues
@property
def histogramSize(self):
""" Histogram size"""
return self.__histogramSize
@property
def shellCenters(self):
""" Shells center array"""
return self.__shellCenters
@property
def shellVolumes(self):
""" Shells volume array"""
return self.__shellVolumes
@property
def experimentalSF(self):
""" Experimental Structure Factor or S(q)"""
return self.__experimentalSF
@property
def elementsPairs(self):
""" Elements pairs """
return self.__elementsPairs
@property
def atomsWeight(self):
"""Custom atoms weight"""
return self.__atomsWeight
@property
def weighting(self):
""" Elements weighting definition. """
return self.__weighting
@property
def weightingScheme(self):
""" Elements weighting scheme. """
return self.__weightingScheme
@property
def windowFunction(self):
""" Convolution window function. """
return self.__windowFunction
@property
def Gr2SqMatrix(self):
""" G(r) to S(q) transformation matrix."""
return self.__Gr2SqMatrix
@property
def _experimentalX(self):
"""For internal use only to interface
ExperimentalConstraint.get_constraints_properties"""
return self.__experimentalQValues
@property
def _experimentalY(self):
"""For internal use only to interface
ExperimentalConstraint.get_constraints_properties"""
return self.__experimentalSF
@property
def _modelX(self):
"""For internal use only to interface
ExperimentalConstraint.get_constraints_properties"""
return self.__experimentalQValues
def listen(self, message, argument=None):
"""
Listens to any message sent from the Broadcaster.
:Parameters:
#. message (object): Any python object to send to constraint's
listen method.
#. argument (object): Any type of argument to pass to the
listeners.
"""
if message in ("engine set","update pdb","update molecules indexes","update elements indexes","update names indexes"):
self.__set_weighting_scheme()
# reset histogram
if self.engine is not None:
self.__set_histogram()
self.reset_constraint() # ADDED 2017-JAN-08
elif message in("update boundary conditions",):
self.reset_constraint()
def set_rmin(self, rmin):
"""
Set rmin value.
:parameters:
#. rmin (None, number): The minimum distance value to compute G(r)
histogram. If None is given, rmin is computed as
:math:`2 \\pi / Q_{max}`.
"""
if rmin is None:
minimumDistance = FLOAT_TYPE( 2.*PI/self.__qmax )
else:
assert is_number(rmin), LOGGER.error("rmin must be None or a number")
minimumDistance = FLOAT_TYPE(rmin)
if self.__maximumDistance is not None:
assert minimumDistance<self.__maximumDistance, LOGGER.error("rmin must be smaller than rmax %s"%self.__maximumDistance)
self.__rmin = rmin
self.__minimumDistance = minimumDistance
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__rmin': self.__rmin,
'_StructureFactorConstraint__minimumDistance': self.__minimumDistance})
# reset histogram
self.__set_histogram()
def set_rmax(self, rmax):
"""
Set rmax value.
:Parameters:
#. rmax (None, number): The maximum distance value to compute G(r)
histogram. If None is given, rmax is computed as
:math:`2 \\pi / dQ`.
"""
if rmax is None:
dq = self.__experimentalQValues[1]-self.__experimentalQValues[0]
maximumDistance = FLOAT_TYPE( 2.*PI/dq )
else:
assert is_number(rmax), LOGGER.error("rmax must be None or a number")
maximumDistance = FLOAT_TYPE(rmax)
if self.__minimumDistance is not None:
assert maximumDistance>self.__minimumDistance, LOGGER.error("rmax must be bigger than rmin %s"%self.__minimumDistance)
self.__rmax = rmax
self.__maximumDistance = maximumDistance
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__rmax': self.__rmax,
'_StructureFactorConstraint__maximumDistance': self.__maximumDistance})
# reset histogram
self.__set_histogram()
def set_dr(self, dr):
"""
Set dr value.
:Parameters:
#. dr (None, number): The distance bin value to compute G(r)
histogram. If None is given, bin is computed as
:math:`2 \\pi / (Q_{max}-Q_{min})`.
"""
if dr is None:
bin = 2.*PI/self.__qmax
rbin = round(bin,1)
if rbin>bin:
rbin -= 0.1
bin = FLOAT_TYPE( rbin )
else:
assert is_number(dr), LOGGER.error("dr must be None or a number")
bin = FLOAT_TYPE(dr)
self.__dr = dr
self.__bin = bin
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__dr': self.__dr,
'_StructureFactorConstraint__bin': self.__bin})
# reset histogram
self.__set_histogram()
def set_weighting(self, weighting):
"""
Set elements weighting. It must be a valid entry of pdbparser atom's
database.
:Parameters:
#. weighting (string): The elements weighting scheme. It must be
any atomic attribute (atomicNumber, neutronCohb, neutronIncohb,
neutronCohXs, neutronIncohXs, atomicWeight, covalentRadius)
defined in pdbparser database. In case of xrays or neutrons
experimental weights, one can simply set weighting to 'xrays'
or 'neutrons' and the value will be automatically adjusted to
respectively 'atomicNumber' and 'neutronCohb'. If attribute
values are missing in the pdbparser database, atomic weights
must be given in atomsWeight dictionary argument.
"""
if weighting.lower() in ["xrays","x-rays","xray","x-ray"]:
LOGGER.fixed("'%s' weighting is set to atomicNumber"%weighting)
weighting = "atomicNumber"
elif weighting.lower() in ["neutron","neutrons"]:
LOGGER.fixed("'%s' weighting is set to neutronCohb"%weighting)
weighting = "neutronCohb"
assert is_element_property(weighting),LOGGER.error( "weighting is not a valid pdbparser atoms database entry")
assert weighting != "atomicFormFactor", LOGGER.error("atomicFormFactor weighting is not allowed")
self.__weighting = weighting
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__weighting': self.__weighting})
def set_atoms_weight(self, atomsWeight):
"""
Custom set atoms weight. This is the way to setting a atoms weights
different than the given weighting scheme.
:Parameters:
#. atomsWeight (None, dict): Atoms weight dictionary where keys are
atoms element and values are custom weights. If None is given
or partially given, missing elements weighting will be fully set
given weighting scheme.
"""
if atomsWeight is None:
AW = {}
else:
assert isinstance(atomsWeight, dict),LOGGER.error("atomsWeight must be None or a dictionary")
AW = {}
for k in atomsWeight:
assert isinstance(k, basestring),LOGGER.error("atomsWeight keys must be strings")
try:
val = float(atomsWeight[k])
except:
raise LOGGER.error( "atomsWeight values must be numerical")
AW[k]=val
# set atomsWeight
self.__atomsWeight = AW
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__atomsWeight': self.__atomsWeight})
def set_window_function(self, windowFunction):
"""
Set convolution window function.
:Parameters:
#. windowFunction (None, numpy.ndarray): The window function to
convolute with the computed pair distribution function of the
system prior to comparing it with the experimental data. In
general, the experimental pair distribution function G(r) shows
artificial wrinkles, among others the main reason is because
G(r) is computed by applying a sine Fourier transform to the
experimental structure factor S(q). Therefore window function is
used to best imitate the numerical artefacts in the experimental
data.
"""
if windowFunction is not None:
assert isinstance(windowFunction, np.ndarray), LOGGER.error("windowFunction must be a numpy.ndarray")
assert windowFunction.dtype.type is FLOAT_TYPE, LOGGER.error("windowFunction type must be %s"%FLOAT_TYPE)
assert len(windowFunction.shape) == 1, LOGGER.error("windowFunction must be of dimension 1")
assert len(windowFunction) <= self.experimentalData.shape[0], LOGGER.error("windowFunction length must be smaller than experimental data")
# normalize window function
windowFunction /= np.sum(windowFunction)
# check window size
# set windowFunction
self.__windowFunction = windowFunction
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__windowFunction': self.__windowFunction})
def set_experimental_data(self, experimentalData):
"""
Set constraint's experimental data.
:Parameters:
#. experimentalData (numpy.ndarray, string): The experimental
data as numpy.ndarray or string path to load data using
numpy.loadtxt function.
"""
# get experimental data
super(StructureFactorConstraint, self).set_experimental_data(experimentalData=experimentalData)
# set limits
self.set_limits(self.limits)
def set_limits(self, limits):
"""
Set the reciprocal distance limits (qmin, qmax).
:Parameters:
#. limits (None, tuple, list): Distance limits to bound
experimental data and compute histograms.
If None is given, the limits will be automatically set to
min and max reciprocal distance recorded in experimental data.
If given, a tuple of minimum reciprocal distance (qmin) or None
and maximum reciprocal distance (qmax) or None should be given.
"""
self._ExperimentalConstraint__set_limits(limits)
# set qvalues
self.__experimentalQValues = self.experimentalData[self.limitsIndexStart:self.limitsIndexEnd+1,0].astype(FLOAT_TYPE)
self.__experimentalSF = self.experimentalData[self.limitsIndexStart:self.limitsIndexEnd+1,1].astype(FLOAT_TYPE)
# set qmin and qmax
self.__qmin = self.__experimentalQValues[0]
self.__qmax = self.__experimentalQValues[-1]
assert self.__qmin>0, LOGGER.error("qmin must be bigger than 0. Experimental null q values are ambigous. Try setting limits.")
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__experimentalQValues': self.__experimentalQValues,
'_StructureFactorConstraint__experimentalSF' : self.__experimentalSF,
'_StructureFactorConstraint__qmin' : self.__qmin,
'_StructureFactorConstraint__qmax' : self.__qmax})
# set used dataWeights
self._set_used_data_weights(limitsIndexStart=self.limitsIndexStart, limitsIndexEnd=self.limitsIndexEnd)
# reset constraint
self.reset_constraint()
# reset sq matrix
self.__set_Gr_2_Sq_matrix()
def update_standard_error(self):
""" Compute and set constraint's standardError."""
# set standardError
totalSQ = self.get_constraint_value()["total_no_window"]
self.set_standard_error(self.compute_standard_error(modelData = totalSQ))
def check_experimental_data(self, experimentalData):
"""
Check whether experimental data is correct.
:Parameters:
#. experimentalData (object): The experimental data to check.
:Returns:
#. result (boolean): Whether it is correct or not.
#. message (str): Checking message that explains whats's wrong
with the given data
"""
if not isinstance(experimentalData, np.ndarray):
return False, "experimentalData must be a numpy.ndarray"
if experimentalData.dtype.type is not FLOAT_TYPE:
return False, "experimentalData type must be %s"%FLOAT_TYPE
if len(experimentalData.shape) !=2:
return False, "experimentalData must be of dimension 2"
if experimentalData.shape[1] !=2:
return False, "experimentalData must have only 2 columns"
# check distances order
inOrder = (np.array(sorted(experimentalData[:,0]), dtype=FLOAT_TYPE)-experimentalData[:,0])<=PRECISION
if not np.all(inOrder):
return False, "experimentalData distances are not sorted in order"
if experimentalData[0][0]<0:
return False, "experimentalData distances min value is found negative"
# data format is correct
return True, ""
def compute_standard_error(self, modelData):
"""
Compute the standard error (StdErr) as the squared deviations
between model computed data and the experimental ones.
.. math::
StdErr = \\sum \\limits_{i}^{N} W_{i}(Y(X_{i})-F(X_{i}))^{2}
Where:\n
:math:`N` is the total number of experimental data points. \n
:math:`W_{i}` is the data point weight. It becomes equivalent to 1 when dataWeights is set to None. \n
:math:`Y(X_{i})` is the experimental data point :math:`X_{i}`. \n
:math:`F(X_{i})` is the computed from the model data :math:`X_{i}`. \n
:Parameters:
#. modelData (numpy.ndarray): The data to compare with the
experimental one and compute the squared deviation.
:Returns:
#. standardError (number): The calculated constraint's
standardError.
"""
# compute difference
diff = self.__experimentalSF-modelData
# return standard error
if self._usedDataWeights is None:
return np.add.reduce((diff)**2)
else:
return np.add.reduce(self._usedDataWeights*((diff)**2))
def _get_Sq_from_Gr(self, Gr):
return np.sum(Gr.reshape((-1,1))*self.__Gr2SqMatrix, axis=0)+1
def _apply_scale_factor(self, Sq, scaleFactor):
if scaleFactor != 1:
Sq = scaleFactor*(Sq-1) + 1
return Sq
def __get_total_Sq(self, data, rho0):
"""This method is created just to speed up the computation of
the total Sq upon fitting."""
Gr = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
for pair in self.__elementsPairs:
# get weighting scheme
wij = self.__weightingScheme.get(pair[0]+"-"+pair[1], None)
if wij is None:
wij = self.__weightingScheme[pair[1]+"-"+pair[0]]
# get number of atoms per element
ni = self.engine.numberOfAtomsPerElement[pair[0]]
nj = self.engine.numberOfAtomsPerElement[pair[1]]
# get index of element
idi = self.engine.elements.index(pair[0])
idj = self.engine.elements.index(pair[1])
# get Nij
if idi == idj:
Nij = ni*(ni-1)/2.0
Dij = Nij/self.engine.volume
nij = data["intra"][idi,idj,:]+data["inter"][idi,idj,:]
Gr += wij*nij/Dij
else:
Nij = ni*nj
Dij = Nij/self.engine.volume
nij = data["intra"][idi,idj,:]+data["intra"][idj,idi,:] + data["inter"][idi,idj,:]+data["inter"][idj,idi,:]
Gr += wij*nij/Dij
# Devide by shells volume
Gr /= self.shellVolumes
# compute total G(r)
#rho0 = (self.engine.numberOfAtoms/self.engine.volume).astype(FLOAT_TYPE)
Gr = (FLOAT_TYPE(4.)*PI*self.__shellCenters*rho0)*(Gr-1)
# Compute S(q) from G(r)
Sq = self._get_Sq_from_Gr(Gr)
# Multiply by scale factor
self._fittedScaleFactor = self.get_adjusted_scale_factor(self.__experimentalSF, Sq, self._usedDataWeights)
# apply scale factor
Sq = self._apply_scale_factor(Sq, self._fittedScaleFactor)
# apply multiframe prior and weight
Sq = self._apply_multiframe_prior(Sq)
# convolve total with window function
if self.__windowFunction is not None:
Sq = np.convolve(Sq, self.__windowFunction, 'same')
return Sq
def get_adjusted_scale_factor(self, experimentalData, modelData, dataWeights):
"""Overload to reduce S(q) prior to fitting scale factor.
S(q) -> 1 at high q and this will create a wrong scale factor.
Overloading can be avoided but it's better to for performance reasons
"""
SF = self.scaleFactor
# check to update scaleFactor
if self.adjustScaleFactorFrequency:
if not self.engine.accepted%self.adjustScaleFactorFrequency:
SF = self.fit_scale_factor(experimentalData-1, modelData-1, dataWeights)
return SF
def _get_constraint_value(self, data, applyMultiframePrior=True):
# http://erice2011.docking.org/upload/Other/Billinge_PDF/03-ReadingMaterial/BillingePDF2011.pdf page 6
#import time
#startTime = time.clock()
output = {}
for pair in self.__elementsPairs:
output["sf_intra_%s-%s" % pair] = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
output["sf_inter_%s-%s" % pair] = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
output["sf_total_%s-%s" % pair] = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
gr = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
for pair in self.__elementsPairs:
# get weighting scheme
wij = self.__weightingScheme.get(pair[0]+"-"+pair[1], None)
if wij is None:
wij = self.__weightingScheme[pair[1]+"-"+pair[0]]
# get number of atoms per element
ni = self.engine.numberOfAtomsPerElement[pair[0]]
nj = self.engine.numberOfAtomsPerElement[pair[1]]
# get index of element
idi = self.engine.elements.index(pair[0])
idj = self.engine.elements.index(pair[1])
# get Nij
if idi == idj:
Nij = ni*(ni-1)/2.0
output["sf_intra_%s-%s" % pair] += data["intra"][idi,idj,:]
output["sf_inter_%s-%s" % pair] += data["inter"][idi,idj,:]
else:
Nij = ni*nj
output["sf_intra_%s-%s" % pair] += data["intra"][idi,idj,:] + data["intra"][idj,idi,:]
output["sf_inter_%s-%s" % pair] += data["inter"][idi,idj,:] + data["inter"][idj,idi,:]
# compute g(r)
nij = output["sf_intra_%s-%s" % pair] + output["sf_inter_%s-%s" % pair]
dij = nij/self.__shellVolumes
Dij = Nij/self.engine.volume
gr += wij*dij/Dij
# calculate intensityFactor
intensityFactor = (self.engine.volume*wij)/(Nij*self.__shellVolumes)
# divide by factor
output["sf_intra_%s-%s" % pair] *= intensityFactor
output["sf_inter_%s-%s" % pair] *= intensityFactor
output["sf_total_%s-%s" % pair] = output["sf_intra_%s-%s" % pair] + output["sf_inter_%s-%s" % pair]
# Compute S(q) from G(r)
output["sf_intra_%s-%s" % pair] = self._get_Sq_from_Gr(output["sf_intra_%s-%s" % pair])
output["sf_inter_%s-%s" % pair] = self._get_Sq_from_Gr(output["sf_inter_%s-%s" % pair])
output["sf_total_%s-%s" % pair] = self._get_Sq_from_Gr(output["sf_total_%s-%s" % pair])
# compute total G(r)
rho0 = (self.engine.numberOfAtoms/self.engine.volume).astype(FLOAT_TYPE)
Gr = (FLOAT_TYPE(4.)*PI*self.__shellCenters*rho0) * (gr-1)
# Compute S(q) from G(r)
Sq = self._get_Sq_from_Gr(Gr)
# multiply by scale factor
output["total_no_window"] = self._apply_scale_factor(Sq, self._fittedScaleFactor)
# apply multiframe prior and weight
if applyMultiframePrior:
output["total_no_window"] = self._apply_multiframe_prior(output["total_no_window"])
# convolve total with window function
if self.__windowFunction is not None:
output["total"] = np.convolve(output["total_no_window"], self.__windowFunction, 'same').astype(FLOAT_TYPE)
else:
output["total"] = output["total_no_window"]
return output
def get_constraint_value(self, applyMultiframePrior=True):
"""
Compute all partial Structure Factor (SQs).
:Parameters:
#. applyMultiframePrior (boolean): Whether to apply subframe weight
and prior to the total. This will only have an effect when used
frame is a subframe and in case subframe weight and prior is
defined.
:Returns:
#. SQs (dictionary): The SQs dictionnary, where keys are the
element wise intra and inter molecular SQs and values are
the computed SQs.
"""
if self.data is None:
LOGGER.warn("data must be computed first using 'compute_data' method.")
return {}
return self._get_constraint_value(self.data, applyMultiframePrior=applyMultiframePrior)
def get_constraint_original_value(self):
"""
Compute all partial Pair Distribution Functions (PDFs).
:Returns:
#. PDFs (dictionary): The PDFs dictionnary, where keys are the
element wise intra and inter molecular PDFs and values are the
computed PDFs.
"""
if self.originalData is None:
LOGGER.warn("originalData must be computed first using 'compute_data' method.")
return {}
return self._get_constraint_value(self.originalData)
@reset_if_collected_out_of_date
def compute_data(self, update=True):
""" Compute constraint's data.
:Parameters:
#. update (boolean): whether to update constraint data and
standard error with new computation. If data is computed and
updated by another thread or process while the stochastic
engine is running, this might lead to a state alteration of
the constraint which will lead to a no additional accepted
moves in the run
:Returns:
#. data (dict): constraint data dictionary
#. standardError (float): constraint standard error
"""
intra,inter = full_pairs_histograms_coords( boxCoords = self.engine.boxCoordinates,
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex,
elementIndex = self.engine.elementsIndex,
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
ncores = self.engine._runtime_ncores )
# create data and compute standard error
data = {"intra":intra, "inter":inter}
totalSQ = self.__get_total_Sq(data, rho0=self.engine.numberDensity)
stdError = self.compute_standard_error(modelData = totalSQ)
# update
if update:
self.set_data(data)
self.set_active_atoms_data_before_move(None)
self.set_active_atoms_data_after_move(None)
self.set_standard_error(stdError)
# set original data
if self.originalData is None:
self._set_original_data(self.data)
# return
return data, stdError
def compute_before_move(self, realIndexes, relativeIndexes):
"""
Compute constraint before move is executed
:Parameters:
#. realIndexes (numpy.ndarray): Not used here.
#. relativeIndexes (numpy.ndarray): Group atoms relative index
the move will be applied to.
"""
intraM,interM = multiple_pairs_histograms_coords( indexes = relativeIndexes,
boxCoords = self.engine.boxCoordinates,
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex,
elementIndex = self.engine.elementsIndex,
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
allAtoms = True,
ncores = self.engine._runtime_ncores )
intraF,interF = full_pairs_histograms_coords( boxCoords = self.engine.boxCoordinates[relativeIndexes],
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex[relativeIndexes],
elementIndex = self.engine.elementsIndex[relativeIndexes],
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
ncores = self.engine._runtime_ncores )
self.set_active_atoms_data_before_move( {"intra":intraM-intraF, "inter":interM-interF} )
self.set_active_atoms_data_after_move(None)
def compute_after_move(self, realIndexes, relativeIndexes, movedBoxCoordinates):
"""
Compute constraint after move is executed
:Parameters:
#. realIndexes (numpy.ndarray): Not used here.
#. relativeIndexes (numpy.ndarray): Group atoms relative index
the move will be applied to.
#. movedBoxCoordinates (numpy.ndarray): The moved atoms new coordinates.
"""
# change coordinates temporarily
boxData = np.array(self.engine.boxCoordinates[relativeIndexes], dtype=FLOAT_TYPE)
self.engine.boxCoordinates[relativeIndexes] = movedBoxCoordinates
# calculate pair distribution function
intraM,interM = multiple_pairs_histograms_coords( indexes = relativeIndexes,
boxCoords = self.engine.boxCoordinates,
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex,
elementIndex = self.engine.elementsIndex,
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
allAtoms = True,
ncores = self.engine._runtime_ncores )
intraF,interF = full_pairs_histograms_coords( boxCoords = self.engine.boxCoordinates[relativeIndexes],
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex[relativeIndexes],
elementIndex = self.engine.elementsIndex[relativeIndexes],
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
ncores = self.engine._runtime_ncores )
# set active atoms data
self.set_active_atoms_data_after_move( {"intra":intraM-intraF, "inter":interM-interF} )
# reset coordinates
self.engine.boxCoordinates[relativeIndexes] = boxData
# compute standardError after move
dataIntra = self.data["intra"]-self.activeAtomsDataBeforeMove["intra"]+self.activeAtomsDataAfterMove["intra"]
dataInter = self.data["inter"]-self.activeAtomsDataBeforeMove["inter"]+self.activeAtomsDataAfterMove["inter"]
totalSQ = self.__get_total_Sq({"intra":dataIntra, "inter":dataInter}, rho0=self.engine.numberDensity)
self.set_after_move_standard_error( self.compute_standard_error(modelData = totalSQ) )
# increment tried
self.increment_tried()
def accept_move(self, realIndexes, relativeIndexes):
"""
Accept move
:Parameters:
#. realIndexes (numpy.ndarray): Not used here.
#. relativeIndexes (numpy.ndarray): Not used here.
"""
dataIntra = self.data["intra"]-self.activeAtomsDataBeforeMove["intra"]+self.activeAtomsDataAfterMove["intra"]
dataInter = self.data["inter"]-self.activeAtomsDataBeforeMove["inter"]+self.activeAtomsDataAfterMove["inter"]
# change permanently _data
self.set_data( {"intra":dataIntra, "inter":dataInter} )
# reset activeAtoms data
self.set_active_atoms_data_before_move(None)
self.set_active_atoms_data_after_move(None)
# update standardError
self.set_standard_error( self.afterMoveStandardError )
self.set_after_move_standard_error( None )
# set new scale factor
self._set_fitted_scale_factor_value(self._fittedScaleFactor)
# increment accepted
self.increment_accepted()
def reject_move(self, realIndexes, relativeIndexes):
"""
Reject move
:Parameters:
#. realIndexes (numpy.ndarray): Not used here.
#. relativeIndexes (numpy.ndarray): Not used here.
"""
# reset activeAtoms data
self.set_active_atoms_data_before_move(None)
self.set_active_atoms_data_after_move(None)
# update standardError
self.set_after_move_standard_error( None )
def compute_as_if_amputated(self, realIndex, relativeIndex):
"""
Compute and return constraint's data and standard error as if
given atom is amputated.
:Parameters:
#. realIndex (numpy.ndarray): Atom's index as a numpy array
of a single element.
#. relativeIndex (numpy.ndarray): Atom's relative index as a
numpy array of a single element.
"""
# compute data
self.compute_before_move(realIndexes=realIndex, relativeIndexes=relativeIndex)
dataIntra = self.data["intra"]-self.activeAtomsDataBeforeMove["intra"]
dataInter = self.data["inter"]-self.activeAtomsDataBeforeMove["inter"]
data = {"intra":dataIntra, "inter":dataInter}
# temporarily adjust self.__weightingScheme
weightingScheme = self.__weightingScheme
relativeIndex = relativeIndex[0]
selectedElement = self.engine.allElements[relativeIndex]
self.engine.numberOfAtomsPerElement[selectedElement] -= 1
self.__weightingScheme = get_normalized_weighting(numbers=self.engine.numberOfAtomsPerElement, weights=self._elementsWeight )
for k in self.__weightingScheme:
self.__weightingScheme[k] = FLOAT_TYPE(self.__weightingScheme[k])
## END OF ADDED 08 FEB 2017
# compute standard error
if not self.engine._RT_moveGenerator.allowFittingScaleFactor:
SF = self.adjustScaleFactorFrequency
self._set_adjust_scale_factor_frequency(0)
rho0 = ((self.engine.numberOfAtoms-1)/self.engine.volume).astype(FLOAT_TYPE)
totalSQ = self.__get_total_Sq(data, rho0=rho0)
standardError = self.compute_standard_error(modelData = totalSQ)
if not self.engine._RT_moveGenerator.allowFittingScaleFactor:
self._set_adjust_scale_factor_frequency(SF)
# reset activeAtoms data
self.set_active_atoms_data_before_move(None)
# set amputation
self.set_amputation_data( {'data':data, 'weightingScheme':self.__weightingScheme} )
# compute standard error
self.set_amputation_standard_error( standardError )
# reset weightingScheme and number of atoms per element
self.__weightingScheme = weightingScheme
self.engine.numberOfAtomsPerElement[selectedElement] += 1
def accept_amputation(self, realIndex, relativeIndex):
"""
Accept amputated atom and sets constraints data and standard error accordingly.
:Parameters:
#. realIndex (numpy.ndarray): Not used here.
#. relativeIndex (numpy.ndarray): Not used here.
"""
#self.set_data( self.amputationData ) ## COMMENTED 08 FEB 2017
self.set_data( self.amputationData['data'] )
self.__weightingScheme = self.amputationData['weightingScheme']
self.set_standard_error( self.amputationStandardError )
self.set_amputation_data( None )
self.set_amputation_standard_error( None )
# set new scale factor
self._set_fitted_scale_factor_value(self._fittedScaleFactor)
def reject_amputation(self, realIndex, relativeIndex):
"""
Reject amputated atom and set constraint's data and standard
error accordingly.
:Parameters:
#. realIndex (numpy.ndarray): Not used here.
#. relativeIndex (numpy.ndarray): Not used here.
"""
self.set_amputation_data( None )
self.set_amputation_standard_error( None )
def _on_collector_collect_atom(self, realIndex):
pass
def _on_collector_release_atom(self, realIndex):
pass
def _constraint_copy_needs_lut(self):
return {'_StructureFactorConstraint__elementsPairs' :'_StructureFactorConstraint__elementsPairs',
'_StructureFactorConstraint__histogramSize' :'_StructureFactorConstraint__histogramSize',
'_StructureFactorConstraint__weightingScheme' :'_StructureFactorConstraint__weightingScheme',
'_StructureFactorConstraint__shellVolumes' :'_StructureFactorConstraint__shellVolumes',
'_StructureFactorConstraint__shellCenters' :'_StructureFactorConstraint__shellCenters',
'_StructureFactorConstraint__windowFunction' :'_StructureFactorConstraint__windowFunction',
'_StructureFactorConstraint__experimentalQValues' :'_StructureFactorConstraint__experimentalQValues',
'_StructureFactorConstraint__experimentalSF' :'_StructureFactorConstraint__experimentalSF',
'_StructureFactorConstraint__Gr2SqMatrix' :'_StructureFactorConstraint__Gr2SqMatrix',
'_StructureFactorConstraint__minimumDistance' :'_StructureFactorConstraint__minimumDistance',
'_StructureFactorConstraint__maximumDistance' :'_StructureFactorConstraint__maximumDistance',
'_StructureFactorConstraint__bin' :'_StructureFactorConstraint__bin',
'_ExperimentalConstraint__scaleFactor' :'_ExperimentalConstraint__scaleFactor',
'_ExperimentalConstraint__dataWeights' :'_ExperimentalConstraint__dataWeights',
'_ExperimentalConstraint__multiframePrior' :'_ExperimentalConstraint__multiframePrior',
'_ExperimentalConstraint__multiframeWeight' :'_ExperimentalConstraint__multiframeWeight',
'_ExperimentalConstraint__limits' :'_ExperimentalConstraint__limits',
'_ExperimentalConstraint__limitsIndexStart' :'_ExperimentalConstraint__limitsIndexStart',
'_ExperimentalConstraint__limitsIndexEnd' :'_ExperimentalConstraint__limitsIndexEnd',
'_Constraint__used' :'_Constraint__used',
'_Constraint__data' :'_Constraint__data',
'_Constraint__state' :'_Constraint__state',
'_Constraint__standardError' :'_Constraint__standardError',
'_fittedScaleFactor' :'_fittedScaleFactor',
'_usedDataWeights' :'_usedDataWeights',
'_Engine__state' :'_Engine__state',
'_Engine__boxCoordinates' :'_Engine__boxCoordinates',
'_Engine__basisVectors' :'_Engine__basisVectors',
'_Engine__isPBC' :'_Engine__isPBC',
'_Engine__moleculesIndex' :'_Engine__moleculesIndex',
'_Engine__elementsIndex' :'_Engine__elementsIndex',
'_Engine__numberOfAtomsPerElement' :'_Engine__numberOfAtomsPerElement',
'_Engine__elements' :'_Engine__elements',
'_Engine__numberDensity' :'_Engine__numberDensity',
'_Engine__volume' :'_Engine__volume',
'_Engine__realCoordinates' :'_Engine__realCoordinates',
'_atomsCollector' :'_atomsCollector',
('engine','_atomsCollector') :'_atomsCollector',
}
def plot(self, xlabelParams={'xlabel':'$Q(\\AA^{-1})$', 'size':10},
ylabelParams={'ylabel':'$S(Q)$', 'size':10},
**kwargs):
"""
Alias to ExperimentalConstraint.plot with additional parameters
:Additional/Adjusted Parameters:
#. xlabelParams (None, dict): modified matplotlib.axes.Axes.set_xlabel
parameters.
#. ylabelParams (None, dict): modified matplotlib.axes.Axes.set_ylabel
parameters.
"""
return super(StructureFactorConstraint, self).plot(xlabelParams= xlabelParams,
ylabelParams= ylabelParams,
**kwargs)
class ReducedStructureFactorConstraint(StructureFactorConstraint):
"""
The Reduced Structure Factor that we will also note S(Q)
is exactly the same quantity as the Structure Factor but with
the slight difference that it is normalized to 0 rather than 1
and therefore :math:`<S(Q)>=0`.
The computation of S(Q) is done through a Sine inverse Fourier transform
of the computed pair distribution function noted as G(r).
.. math::
S(Q) = \\frac{1}{Q} \\int_{0}^{\\infty} G(r) sin(Qr) dr
The only reason why the Reduced Structure Factor is implemented, is because
many experimental data are treated in this form. And it is just convenient
not to manipulate the experimental data every time.
"""
def _get_Sq_from_Gr(self, Gr):
return np.sum(Gr.reshape((-1,1))*self.Gr2SqMatrix, axis=0)
def _apply_scale_factor(self, Sq, scaleFactor):
if scaleFactor != 1:
Sq = scaleFactor*Sq
return Sq
def get_adjusted_scale_factor(self, experimentalData, modelData, dataWeights):
""" dummy overload that does exactly the same thing
"""
SF = self.scaleFactor
# check to update scaleFactor
if self.adjustScaleFactorFrequency:
if not self.engine.accepted%self.adjustScaleFactorFrequency:
SF = self.fit_scale_factor(experimentalData, modelData, dataWeights)
return SF
def plot(self, xlabelParams={'xlabel':'$Q(\\AA^{-1})$', 'size':10},
ylabelParams={'ylabel':'$S(Q)-1$', 'size':10},
**kwargs):
"""
Alias to ExperimentalConstraint.plot with additional parameters
:Additional/Adjusted Parameters:
#. xlabelParams (None, dict): modified matplotlib.axes.Axes.set_xlabel
parameters.
#. ylabelParams (None, dict): modified matplotlib.axes.Axes.set_ylabel
parameters.
"""
return super(StructureFactorConstraint, self).plot(xlabelParams= xlabelParams,
ylabelParams= ylabelParams,
**kwargs)
| agpl-3.0 |
asdf123101/HDPG1D | hdpg1d/adaptation.py | 1 | 8070 | import numpy as np
from numpy import concatenate as cat
from scipy.sparse import csr_matrix
import scipy.sparse.linalg as spla
from copy import copy
import matplotlib.pyplot as plt
import warnings
from .preprocess import shape, discretization, boundaryCondition
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
# supress the deprecation warning
warnings.filterwarnings("ignore", ".*GUI is implemented.*")
class hdpg1d(object):
"""
1D HDG solver
"""
def __init__(self, coeff):
self.numEle = coeff.numEle
self.numBasisFuncs = coeff.pOrder + 1
self.coeff = coeff
self.mesh = np.linspace(0, 1, self.numEle + 1)
self.enrichOrder = 1
self.primalSoln = None
self.adjointSoln = None
self.estErrorList = [[], []]
self.trueErrorList = [[], []]
def separateSoln(self, soln):
"""Separate gradState (q and u), stateFace from the given soln"""
gradState, stateFace = np.split(
soln, [len(soln) - self.numEle + 1])
return gradState, stateFace
def plotState(self, counter):
"""Plot solution u with smooth higher oredr quadrature"""
stateSmooth = np.array([])
stateNode = np.zeros(self.numEle + 1)
xSmooth = np.array([])
gradState, _ = self.separateSoln(self.primalSoln)
halfLenState = int(len(gradState) / 2)
state = gradState[halfLenState:2 * halfLenState]
# quadrature rule
gorder = 10 * self.numBasisFuncs
xi, wi = np.polynomial.legendre.leggauss(gorder)
shp, shpx = shape(xi, self.numBasisFuncs)
for j in range(1, self.numEle + 1):
xSmooth = np.hstack((xSmooth, (self.mesh[(j - 1)] + self.mesh[j]) / 2 + (
self.mesh[j] - self.mesh[j - 1]) / 2 * xi))
stateSmooth = np.hstack(
(stateSmooth, shp.T.dot(state[(j - 1) * self.numBasisFuncs:j * self.numBasisFuncs])))
stateNode[j - 1] = state[(j - 1) * self.numBasisFuncs]
stateNode[-1] = state[-1]
plt.figure(1)
plt.plot(xSmooth, stateSmooth, '-', color='C3')
plt.plot(self.mesh, stateNode, 'C3.')
plt.xlabel('$x$', fontsize=17)
plt.ylabel('$u$', fontsize=17)
# plt.axis([-0.05, 1.05, 0, 1.3])
plt.grid()
plt.pause(5e-1)
def meshAdapt(self, index):
"""Given the index list, adapt the mesh"""
inValue = np.zeros(len(index))
for i in np.arange(len(index)):
inValue[i] = (self.mesh[index[i]] +
self.mesh[index[i] - 1]) / 2
self.mesh = np.sort(np.insert(self.mesh, 0, inValue))
def solvePrimal(self):
"""Solve the primal problem"""
if 'matLocal' in locals():
# if matLocal exists,
# only change the mesh instead of initializing again
matLocal.mesh = self.mesh
else:
matLocal = discretization(self.coeff, self.mesh)
matGroup = matLocal.matGroup()
A, B, _, C, D, E, F, G, H, L, R = matGroup
# solve by exploiting the local global separation
K = -cat((C.T, G), axis=1)\
.dot(np.linalg.inv(np.bmat([[A, -B], [B.T, D]]))
.dot(cat((C, E)))) + H
sK = csr_matrix(K)
F_hat = np.array([L]).T - cat((C.T, G), axis=1)\
.dot(np.linalg.inv(np.bmat([[A, -B], [B.T, D]])))\
.dot(np.array([cat((R, F))]).T)
def invRHS(vec):
"""Construct preconditioner"""
matVec = spla.spsolve(sK, vec)
return matVec
n = len(F_hat)
preconditioner = spla.LinearOperator((n, n), invRHS)
stateFace = spla.gmres(sK, F_hat, M=preconditioner)[0]
# stateFace = np.linalg.solve(K, F_hat)
gradState = np.linalg.inv(np.asarray(np.bmat([[A, -B], [B.T, D]]))).dot(
cat((R, F)) - cat((C, E)).dot(stateFace))
self.primalSoln = cat((gradState, stateFace))
def solveAdjoint(self):
"""Solve the adjoint problem"""
# solve in the enriched space
_coeff = copy(self.coeff)
_coeff.pOrder = _coeff.pOrder + 1
if 'matAdjoint' in locals():
matAdjoint.mesh = self.mesh
else:
matAdjoint = discretization(_coeff, self.mesh)
matGroup = matAdjoint.matGroup()
A, B, _, C, D, E, F, G, H, L, R = matGroup
# add adjoint LHS conditions
F = np.zeros(len(F))
R[-1] = -boundaryCondition('adjoint')[1]
# assemble global matrix LHS
LHS = np.bmat([[A, -B, C],
[B.T, D, E],
[C.T, G, H]])
sLHS = csr_matrix(LHS)
RHS = cat((R, F, L))
# solve in one shoot using GMRES
def invRHS(vec):
"""Construct preconditioner"""
matVec = spla.spsolve(sLHS, vec)
return matVec
n = len(RHS)
preconditioner = spla.LinearOperator((n, n), invRHS)
soln = spla.gmres(sLHS, RHS, M=preconditioner)[0]
# soln = np.linalg.solve(LHS.T, RHS)
self.adjointSoln = soln
def DWResidual(self):
if 'matResidual' in locals():
matResidual.mesh = self.mesh
else:
matResidual = discretization(
self.coeff, self.mesh, self.enrichOrder)
matGroup = matResidual.matGroup()
A, B, BonQ, C, D, E, F, G, H, L, R = matGroup
LHS = np.bmat([[A, -B, C],
[BonQ, D, E]])
RHS = cat((R, F))
residual = np.zeros(self.numEle)
numEnrich = self.numBasisFuncs + self.enrichOrder
adjointGradState, adjointStateFace = self.separateSoln(
self.adjointSoln)
for i in np.arange(self.numEle):
primalResidual = (LHS.dot(self.primalSoln) - RHS).A1
uLength = self.numEle * numEnrich
stepLength = i * numEnrich
uDWR = primalResidual[stepLength:stepLength + numEnrich].dot(
(1 - adjointGradState)[stepLength:stepLength + numEnrich])
qDWR = primalResidual[uLength + stepLength:uLength +
stepLength + numEnrich]\
.dot((1 - adjointGradState)[uLength + stepLength:uLength +
stepLength + numEnrich])
residual[i] = uDWR + qDWR
# sort residual index
residualIndex = np.argsort(np.abs(residual))
# select top \theta% elements with the largest error
theta = 0.15
refineIndex = residualIndex[
int(self.numEle * (1 - theta)):len(residual)] + 1
return np.abs(np.sum(residual)), refineIndex
def adaptive(self):
TOL = self.coeff.TOL
estError = 10
nodeCount = 0
maxCount = self.coeff.MAXIT
while estError > TOL and nodeCount < maxCount:
# solve
self.solvePrimal()
self.solveAdjoint()
# plot the solution at certain counter
if nodeCount in [0, 4, 9, 19, maxCount]:
plt.clf()
self.plotState(nodeCount)
# record error
self.trueErrorList[0].append(self.numEle)
self.trueErrorList[1].append(
self.primalSoln[self.numEle * self.numBasisFuncs - 1])
estError, index = self.DWResidual()
self.estErrorList[0].append(self.numEle)
self.estErrorList[1].append(estError)
# adapt
index = index.tolist()
self.meshAdapt(index)
self.numEle = self.numEle + len(index)
nodeCount += 1
print("Iteration {}. Estimated target function error {:.3e}."
.format(nodeCount, estError))
if nodeCount == maxCount:
print("Max iteration number is reached "
"while the convergence criterion is not satisfied.\n"
"Check the problem statement or "
"raise the max iteration number, then try again.\n")
| mit |
apdjustino/DRCOG_Urbansim | src/opus_gui/results_manager/run/indicator_framework/visualizer/visualizers/matplotlib_lorenzcurve.py | 1 | 10890 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
import os, re, sys, time, traceback
from copy import copy
from opus_gui.results_manager.run.indicator_framework.visualizer.visualizers.abstract_visualization import Visualization
from opus_core.logger import logger
from numpy import array, arange
from numpy import ones, zeros, hstack, vstack
from numpy import trapz, trim_zeros
from pylab import subplot, plot, show
from pylab import xlabel, ylabel, title, text
from pylab import MultipleLocator, FormatStrFormatter
from pylab import savefig, clf, close
class LorenzCurve(Visualization):
def __init__(self, source_data, dataset_name,
attribute = None,
years = None, operation = None, name = None, scale = None,
storage_location = None):
Visualizer.__init__(self, source_data, dataset_name, [attribute],
years, operation, name,
storage_location)
self._values = None
self._ginicoeff = None
def is_single_year_indicator_image_type(self):
return True
def get_file_extension(self):
return 'png'
def get_visualization_shorthand(self):
return 'lorenzcurve'
def get_additional_metadata(self):
return {}
def _create_indicator(self, year):
"""Create a Lorenz Curve for the given indicator,
save it to the cache directory's 'indicators' sub-directory.
"""
attribute_short = self.get_attribute_alias(attribute = self.attributes[0],
year = year)
title = attribute_short + ' ' + str(year)
if self.run_description is not None:
title += '\n' + self.run_description
# Do calculation
# Make fresh copy with dtype float64 to avoid overflows
self._values = array(self._get_indicator(year, wrap = False).astype('float64'))
self._compute_lorenz()
file_path = self.get_file_path(year = year)
self._plot(attribute_short, file_path );
return file_path
def _compute_lorenz(self ):
''' Do the lorenz curve computation and save the result in the corresponding
class variables
'''
self._values.sort()
#remove 0 values from array
self._values = trim_zeros(self._values,'f')
num_values = self._values.size
F = arange(1, num_values + 1, 1, "float64")/num_values
L = self._values.cumsum(dtype="float64")/sum(self._values)
# Add (0, 0) as the first point for completeness (e.g. plotting)
origin = array([[0], [0]])
self._values = vstack((F, L))
self._values = hstack((origin, self._values))
# This is the simple form of (0.5 - integral) / 0.5
self._ginicoeff = 1 - 2 * trapz(self._values[1], self._values[0])
def _plot(self, attribute_name, file_path=None ):
clf() # Clear existing plot
a = self._values[0] * 100
b = self._values[1] * 100
ax = subplot(111)
plot(a, a, 'k--', a, b, 'r')
ax.set_ylim([0,100])
ax.grid(color='0.5', linestyle=':', linewidth=0.5)
xlabel('population')
ylabel(attribute_name)
title('Lorenz curve')
font = {'fontname' : 'Courier',
'color' : 'r',
'fontweight' : 'bold',
'fontsize' : 11
}
box = { 'pad' : 6,
'facecolor' : 'w',
'linewidth' : 1,
'fill' : True
}
text(5, 90, 'Gini coefficient: %(gini)f' % {'gini' : self._ginicoeff}, font, color='k', bbox=box )
majorLocator = MultipleLocator(20)
majorFormatter = FormatStrFormatter('%d %%')
minorLocator = MultipleLocator(5)
ax.xaxis.set_major_locator( majorLocator )
ax.xaxis.set_major_formatter( majorFormatter)
ax.xaxis.set_minor_locator( minorLocator )
ax.yaxis.set_major_locator( majorLocator )
ax.yaxis.set_major_formatter( majorFormatter)
ax.yaxis.set_minor_locator( minorLocator )
if file_path:
savefig(file_path)
close()
else:
show()
import os
from opus_core.tests import opus_unittest
from numpy import allclose
from opus_gui.results_manager.run.indicator_framework.test_classes.abstract_indicator_test import AbstractIndicatorTest
class Tests(AbstractIndicatorTest):
def skip_test_create_indicator(self):
indicator_path = os.path.join(self.temp_cache_path, 'indicators')
self.assert_(not os.path.exists(indicator_path))
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
lorenzcurve.create(False)
self.assert_(os.path.exists(indicator_path))
self.assert_(os.path.exists(os.path.join(indicator_path, 'test__lorenzcurve__attribute__1980.png')))
def skip_test_perfect_equality(self):
"""Perfect equality is when everybody has the same amount of something"""
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
incomes = ones(100)
lorenzcurve._values = incomes
lorenzcurve._compute_lorenz()
wanted_result = vstack((arange(0, 101) / 100., arange(0, 101) / 100.))
self.assert_(allclose(lorenzcurve._values, wanted_result))
def skip_test_perfect_inequality(self):
"""Perfect inequality is when one person has all of something"""
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
incomes = zeros(100)
incomes[0] = 42
lorenzcurve._values = incomes
lorenzcurve._compute_lorenz()
#We strip all the zero values, so the result consists of only two values
wanted_result = [[0.,1.],[0.,1.]]
self.assert_(allclose(lorenzcurve._values, wanted_result))
def skip_test_small_lorenz(self):
"""Test case for less than 100 people"""
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
incomes = array([1, 1, 2, 3, 4, 5])
lorenzcurve._values = incomes
lorenzcurve._compute_lorenz()
wanted_result = array(
[[ 0, 1/6., 2/6., 3/6., 4/6., 5/6., 6/6. ],
[ 0, 1/16., 2/16., 4/16., 7/16., 11/16., 16/16. ]])
self.assert_(allclose(lorenzcurve._values, wanted_result))
def skip_test_small_gini(self):
"""Test case for gini coefficient for the small case"""
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
incomes = array([1, 1, 2, 3, 4, 5])
lorenzcurve._values = incomes
lorenzcurve._compute_lorenz()
self.assertAlmostEqual(lorenzcurve._ginicoeff, 0.3125)
def skip_test_large_lorenz(self):
"""Test case for more than 100 people"""
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
incomes = array([731, 700, 619, 450, 419, 512, 232, 266, 131, 188,
498, 293, 935, 177, 160, 380, 538, 783, 256, 280,
731, 362, 870, 970, 674, 211, 524, 207, 513, 461,
280, 275, 410, 282, 144, 682, 573, 252, 382, 909,
719, 666, 236, 636, 628, 542, 630, 484, 629, 974,
747, 509, 281, 725, 377, 565, 495, 840, 391, 191,
929, 679, 217, 179, 336, 562, 293, 881, 271, 172,
426, 697, 293, 576, 203, 390, 522, 948, 312, 491,
531, 959, 646, 495, 306, 631, 722, 322, 876, 586,
316, 124, 796, 250, 456, 112, 661, 294, 749, 619,
134, 582, 996, 413, 421, 219, 796, 923, 832, 557])
lorenzcurve._values = incomes
lorenzcurve._compute_lorenz()
wanted_result_F = arange(0, 111) / 110.
wanted_result_L = array([ 0, 0.00202803, 0.00427335, 0.00664542, 0.00907181, 0.01167928,
0.01457647, 0.01769094, 0.02089595, 0.02413718, 0.02754138,
0.03099989, 0.0346757 , 0.03842393, 0.04224459, 0.0461739 ,
0.05013943, 0.05434035, 0.0586137 , 0.06314055, 0.06770362,
0.07233912, 0.07715569, 0.0820628 , 0.08704234, 0.09211241,
0.09718249, 0.10227067, 0.10737696, 0.11268243, 0.1179879 ,
0.12329338, 0.12861696, 0.13415782, 0.13980734, 0.14552928,
0.15135987, 0.15744396, 0.16399884, 0.17082534, 0.17770615,
0.18462318, 0.19168508, 0.19876507, 0.20618911, 0.21366748,
0.22125448, 0.2288777 , 0.23659146, 0.2447398 , 0.25299678,
0.26134429, 0.27010828, 0.27899902, 0.28796219, 0.29692536,
0.30594285, 0.31515953, 0.32443052, 0.33371962, 0.34317169,
0.35265998, 0.36227502, 0.3720168 , 0.38183102, 0.39191685,
0.40209322, 0.41232391, 0.42269945, 0.43312932, 0.44366784,
0.45427878, 0.46548727, 0.47669576, 0.48806721, 0.49945678,
0.51086445, 0.52229023, 0.53380654, 0.54550393, 0.55747293,
0.56953247, 0.58173686, 0.5940318 , 0.60638105, 0.61900192,
0.63167711, 0.64469634, 0.65776989, 0.67089777, 0.68413428,
0.6973708 , 0.71089704, 0.72445949, 0.7386376 , 0.7530511 ,
0.7674646 , 0.78252997, 0.79774019, 0.81349364, 0.82935574,
0.84530837, 0.86176801, 0.87848115, 0.89530294, 0.91223337,
0.9293992 , 0.94676421, 0.9643284 , 0.98196502, 1. ])
self.assert_(allclose(lorenzcurve._values, vstack((wanted_result_F, wanted_result_L))))
if __name__ == '__main__':
try:
import matplotlib
except:
print 'could not import matplotlib'
else:
opus_unittest.main()
| agpl-3.0 |
NDKoehler/DataScienceBowl2017_7th_place | dsb3_networks/classification/resnet2D_0.7res_80/config_2Dfinal.py | 1 | 3207 | from collections import defaultdict
from datetime import datetime
import json
import tensorflow as tf
import os, sys
import pandas as pd
#config dic
H = defaultdict(lambda: None)
#All possible config options:
H['optimizer'] = 'MomentumOptimizer'#'RMSPropOptimizer'
H['learning_rate'] = 0.001
H['momentum'] = 0.9 #0.99
H['kernel_num'] = 16 #32
H['dropout_keep_prob'] = 1.0
H['gpu_fraction'] = 0.9
H['num_classes'] = 2
H['model_name'] = 'resnet2D'
H['pretrained_checkpoint_dir'] = '../luna_resnet2D/output_dir/gold_prio3_plane_mil0'#../luna_resnet2D/output_dir/gen8_20z_3rot_stage1_deep
H['output_dir'] = 'output_dir/old_but_gold_plane_mil0_b4_init_luna' #cross_crop_retrain_zrot
H['predictions_dir'] = ''
H['allow_soft_placement'] = True
H['log_device_placement'] = False
H['max_steps'] = 35
H['MOVING_AVERAGE_DECAY'] = 0.9
H['BATCH_NORM_CENTER'] = True
H['BATCH_NORM_SCALE'] = True
H['weights_initializer'] = 'xavier_initializer' #'xavier_initializer', 'xavier_initializer_conv2d', 'truncated_normal_initializer'
H['gpus'] = [1]
H['summary_step'] = 10
# list iterator
# H['train_lst'] = '../data/multiview-2/tr.lst'
# H['val_lst'] = '../data/multiview-2/va.lst'
H['train_lst'] = '../../../datapipeline_final/dsb3_0/interpolate_candidates_res07/tr_patients_80.lst'
H['val_lst'] = '../../../datapipeline_final/dsb3_0/interpolate_candidates_res07/va_patients_20.lst'
#tr_path = '/media/niklas/Data_3/dsb3/datapipeline_gen9/dsb3_0/interpolate_candidates/cv5/cv/tr' + str(run_id) + '.lst'
#va_path = '/media/niklas/Data_3/dsb3/datapipeline_gen9/dsb3_0/interpolate_candidates/cv5/cv/va' + str(run_id) + '.lst'
#H['train_lst'] = tr_path
#H['val_lst'] = va_path
H['candidate_mode'] = False
# crossed axes options - cross is centrally cropped -> layers are stacked in z-dim
H['num_crossed_layers'] = 1
H['crossed_axes'] = [0,1,2]
H['rand_drop_planes']=0
H['plane_mil'] = False
# y and x image_shape must be equal -> z has same shape!!!
# you can crop if the equal z,y and x in image shape are and smaller than in in_image_shape
# images
# in_image_shapes[1:] must be equal to len of crop_before_loading_in_RAM_ZminZmaxYminYmaxXminXmax
H['in_image_shape'] = [5, 64, 64, 64, 2] #256
# not working #H['crop_before_loading_in_RAM_ZminZmaxYminYmaxXminXmax'] = [False,False,False,False,False,False] # Default = False or None
H['image_shape'] = [5, 3*H['num_crossed_layers'], 64, 64, 2]
H['label_shape'] = [1] #256
H['batch_size'] = 8
#iterator settings
H['load_in_ram'] = True
# due to time consuming operation and quality loss only rotation around one axis is processed randomly chosen
H['rand_rot_axes'] = [0]#,1,2] # 0: z, 1: y, 2: x (attention: x and y rotation lasts long)
H['rand_rot'] = True
H['degree_90_rot'] = H['rand_rot']
H['min_rot_angle'] = -10 #degree
H['max_rot_angle'] = 10 #degree
H['rand_mirror_axes'] = [0,1,2] # 0: z, 1: y, 2: x else False
H['rand_cropping_ZminZmaxYminYmaxXminXmax'] = [False,False,False,False,False,False] # crop within given range # default False: full range
H['save_step'] = 10 # saving checkpoint
H['tr_num_examples'] = len(pd.read_csv(H['train_lst'], header=None, sep='\t'))
H['va_num_examples'] = len(pd.read_csv(H['val_lst'], header=None, sep='\t'))
| mit |
IssamLaradji/scikit-learn | sklearn/tests/test_grid_search.py | 8 | 26766 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import warnings
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import distributions
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler,
ChangedBehaviorWarning)
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans, SpectralClustering
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def test_parameter_grid():
"""Test basic properties of ParameterGrid."""
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
has_empty = ParameterGrid([{'C': [1, 10]}, {}])
assert_equal(len(has_empty), 3)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}])
def test_grid_search():
"""Test that the best estimator contains the right value for foo_param"""
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
"""Test search over a "grid" with only one point.
Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
"""
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]})
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
"""Test that grid search can be used for model selection only"""
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
"""Test that grid search will capture errors on data with different
length"""
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
"""Test that grid search works with both dense and sparse matrices"""
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
#np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
"""Test that grid search works when the input features are given in the
form of a precomputed kernel matrix """
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
"""Test that grid search returns an error with a non-square precomputed
training kernel matrix"""
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
"""Test that grid search returns an error when using a kernel_function"""
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
def test_refit():
"""Regression test for bug in refitting
Simulates re-fitting a broken estimator; this used to break with
sparse SVMs.
"""
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_X_as_list():
"""Pass X as list in GridSearchCV"""
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
"""Pass y as list in GridSearchCV"""
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_bad_estimator():
# test grid-search with clustering algorithm which doesn't support
# "predict"
sc = SpectralClustering()
grid_search = GridSearchCV(sc, param_grid=dict(gamma=[.1, 1, 10]),
scoring='ari')
assert_raise_message(TypeError, "'score' or a 'predict'", grid_search.fit,
[[1]])
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": distributions.uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=distributions.expon(scale=10),
gamma=distributions.expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
"""Test that a fit search can be pickled"""
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
""" Test search with multi-output estimator"""
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters, cv=cv)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters, cv=cv)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
"""Test predict_proba when disabled on estimator."""
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
""" Test GridSearchCV with Imputer """
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
"""GridSearchCV with on_error != 'raise'
Ensures that a warning is raised and score reset where appropriate.
"""
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
"""GridSearchCV with on_error == 'raise' raises the error"""
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
| bsd-3-clause |
IshankGulati/scikit-learn | benchmarks/bench_plot_svd.py | 72 | 2914 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
import six
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(six.iteritems(results))):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
jdvelasq/cashflows | cashflows/inflation.py | 1 | 4674 | """
Constant dollar transformations
===============================================================================
Overview
-------------------------------------------------------------------------------
The function ``const2curr`` computes the equivalent generic cashflow in current
dollars from a generic cashflow in constant dollars of the date given by
``base_date``. ``inflation`` is the inflation rate per compounding period.
``curr2const`` computes the inverse transformation.
Functions in this module
-------------------------------------------------------------------------------
"""
import pandas as pd
# cashflows.
from cashflows.timeseries import *
from cashflows.rate import *
from cashflows.common import *
def const2curr(cflo, inflation, base_date=0):
"""Converts a cashflow of constant dollars to current dollars
of the time `base_date`.
Args:
cflo (pandas.Series): Generic cashflow.
inflation (pandas.Series): Inflation rate per compounding period.
base_date (int, str): base date.
Returns:
A cashflow in current money (pandas.Series)
**Examples.**
>>> cflo=cashflow(const_value=[100] * 5, start='2000', freq='A')
>>> inflation=interest_rate(const_value=[10, 10, 20, 20, 20], start='2000', freq='A')
>>> const2curr(cflo=cflo, inflation=inflation) # doctest: +NORMALIZE_WHITESPACE
2000 100.00
2001 110.00
2002 132.00
2003 158.40
2004 190.08
Freq: A-DEC, dtype: float64
>>> const2curr(cflo=cflo, inflation=inflation, base_date=0) # doctest: +NORMALIZE_WHITESPACE
2000 100.00
2001 110.00
2002 132.00
2003 158.40
2004 190.08
Freq: A-DEC, dtype: float64
>>> const2curr(cflo=cflo, inflation=inflation, base_date='2000') # doctest: +NORMALIZE_WHITESPACE
2000 100.00
2001 110.00
2002 132.00
2003 158.40
2004 190.08
Freq: A-DEC, dtype: float64
>>> const2curr(cflo=cflo, inflation=inflation, base_date=4) # doctest: +NORMALIZE_WHITESPACE
2000 52.609428
2001 57.870370
2002 69.444444
2003 83.333333
2004 100.000000
Freq: A-DEC, dtype: float64
>>> const2curr(cflo=cflo, inflation=inflation, base_date='2004') # doctest: +NORMALIZE_WHITESPACE
2000 52.609428
2001 57.870370
2002 69.444444
2003 83.333333
2004 100.000000
Freq: A-DEC, dtype: float64
"""
if not isinstance(cflo, pd.Series):
raise TypeError("cflo must be a TimeSeries object")
if not isinstance(inflation, pd.Series):
raise TypeError("inflation must be a TimeSeries object")
verify_period_range([cflo, inflation])
factor = to_compound_factor(prate=inflation, base_date=base_date)
result = cflo.copy()
for time, _ in enumerate(result):
result[time] *= factor[time]
return result
def curr2const(cflo, inflation, base_date=0):
"""Converts a cashflow of current dollars to constant dollars of
the date `base_date`.
Args:
cflo (pandas.Series): Generic cashflow.
inflation_rate (float, pandas.Series): Inflation rate per compounding period.
base_date (int): base time..
Returns:
A cashflow in constant dollars
>>> cflo = cashflow(const_value=[100] * 5, start='2015', freq='A')
>>> inflation = interest_rate(const_value=[10, 10, 20, 20, 20], start='2015', freq='A')
>>> curr2const(cflo=cflo, inflation=inflation) # doctest: +NORMALIZE_WHITESPACE
2015 100.000000
2016 90.909091
2017 75.757576
2018 63.131313
2019 52.609428
Freq: A-DEC, dtype: float64
>>> curr2const(cflo=cflo, inflation=inflation, base_date=4) # doctest: +NORMALIZE_WHITESPACE
2015 190.08
2016 172.80
2017 144.00
2018 120.00
2019 100.00
Freq: A-DEC, dtype: float64
>>> curr2const(cflo=cflo, inflation=inflation, base_date='2017') # doctest: +NORMALIZE_WHITESPACE
2015 132.000000
2016 120.000000
2017 100.000000
2018 83.333333
2019 69.444444
Freq: A-DEC, dtype: float64
"""
if not isinstance(cflo, pd.Series):
raise TypeError("cflo must be a TimeSeries object")
if not isinstance(inflation, pd.Series):
raise TypeError("inflation must be a TimeSeries object")
verify_period_range([cflo, inflation])
factor = to_discount_factor(prate=inflation, base_date=base_date)
result = cflo.copy()
for time, _ in enumerate(result):
result[time] *= factor[time]
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| mit |
mjudsp/Tsallis | sklearn/datasets/species_distributions.py | 64 | 7917 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
PY2 = True
except ImportError:
# Python 3
from urllib.request import urlopen
PY2 = False
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.datasets.base import _pkl_filepath
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = DIRECTORY_URL + "samples.zip"
COVERAGES_URL = DIRECTORY_URL + "coverages.zip"
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = int(header[b'NODATA_value'])
if nodata != -9999:
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY2:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
else:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
archive_path = _pkl_filepath(data_home, DATA_ARCHIVE_NAME)
if not exists(archive_path):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, archive_path, compress=9)
else:
bunch = joblib.load(archive_path)
return bunch
| bsd-3-clause |
cbertinato/pandas | pandas/tests/io/test_gcs.py | 1 | 2310 | from io import StringIO
import numpy as np
import pytest
from pandas import DataFrame, date_range, read_csv
from pandas.util import _test_decorators as td
from pandas.util.testing import assert_frame_equal
from pandas.io.common import is_gcs_url
def test_is_gcs_url():
assert is_gcs_url("gcs://pandas/somethingelse.com")
assert is_gcs_url("gs://pandas/somethingelse.com")
assert not is_gcs_url("s3://pandas/somethingelse.com")
@td.skip_if_no('gcsfs')
def test_read_csv_gcs(monkeypatch):
df1 = DataFrame({'int': [1, 3], 'float': [2.0, np.nan], 'str': ['t', 's'],
'dt': date_range('2018-06-18', periods=2)})
class MockGCSFileSystem:
def open(*args):
return StringIO(df1.to_csv(index=False))
monkeypatch.setattr('gcsfs.GCSFileSystem', MockGCSFileSystem)
df2 = read_csv('gs://test/test.csv', parse_dates=['dt'])
assert_frame_equal(df1, df2)
@td.skip_if_no('gcsfs')
def test_to_csv_gcs(monkeypatch):
df1 = DataFrame({'int': [1, 3], 'float': [2.0, np.nan], 'str': ['t', 's'],
'dt': date_range('2018-06-18', periods=2)})
s = StringIO()
class MockGCSFileSystem:
def open(*args):
return s
monkeypatch.setattr('gcsfs.GCSFileSystem', MockGCSFileSystem)
df1.to_csv('gs://test/test.csv', index=True)
df2 = read_csv(StringIO(s.getvalue()), parse_dates=['dt'], index_col=0)
assert_frame_equal(df1, df2)
@td.skip_if_no('gcsfs')
def test_gcs_get_filepath_or_buffer(monkeypatch):
df1 = DataFrame({'int': [1, 3], 'float': [2.0, np.nan], 'str': ['t', 's'],
'dt': date_range('2018-06-18', periods=2)})
def mock_get_filepath_or_buffer(*args, **kwargs):
return (StringIO(df1.to_csv(index=False)),
None, None, False)
monkeypatch.setattr('pandas.io.gcs.get_filepath_or_buffer',
mock_get_filepath_or_buffer)
df2 = read_csv('gs://test/test.csv', parse_dates=['dt'])
assert_frame_equal(df1, df2)
@pytest.mark.skipif(td.safe_import('gcsfs'),
reason='Only check when gcsfs not installed')
def test_gcs_not_present_exception():
with pytest.raises(ImportError) as e:
read_csv('gs://test/test.csv')
assert 'gcsfs library is required' in str(e.value)
| bsd-3-clause |
yancz1989/cancer | utilities.py | 1 | 4491 | import SimpleITK as sitk
import numpy as np
import csv
import os
import json
from PIL import Image
import matplotlib.pyplot as plt
import SimpleITK as sitk
from cv2 import imread, imwrite
def load_itk_image(filename):
itkimage = sitk.ReadImage(filename)
numpyImage = sitk.GetArrayFromImage(itkimage)
numpyOrigin = np.array(list(reversed(itkimage.GetOrigin())))
numpySpacing = np.array(list(reversed(itkimage.GetSpacing())))
return numpyImage, numpyOrigin, numpySpacing
def readCSV(filename):
lines = []
with open(filename, "rb") as f:
csvreader = csv.reader(f)
for line in csvreader:
lines.append(line)
return lines
def voxel_2_world(voxel_coord, itkimage):
world_coord = list(reversed(
itkimage.TransformContinuousIndexToPhysicalPoint(list(reversed(voxel_coord)))))
return world_coord
def voxelCoordToWorld(voxelCoord, origin, spacing):
stretchedVoxelCoord = voxelCoord * spacing
worldCoord = stretchedVoxelCoord + origin
return worldCoord
def worldToVoxelCoord(worldCoord, origin, spacing):
stretchedVoxelCoord = np.absolute(worldCoord - origin)
voxelCoord = stretchedVoxelCoord / spacing
return voxelCoord
def normalizePlanes(npzarray):
maxHU = 400.
minHU = -1000.
npzarray = (npzarray - minHU) / (maxHU - minHU)
npzarray[npzarray > 1] = 1.
npzarray[npzarray < 0] = 0.
return npzarray
def readFileNameMap(map_filename):
file_map = {}
with open(map_filename) as map_file:
file_name_list = json.load(map_file)
for it in file_name_list:
file_map[it['ID_name']] = it['long_name']
return file_map
def parse_image_file(filename):
cols = filename.split("-")
subset = cols[0]
key = cols[1]
z_axis = int(cols[2])
return key, subset[:subset.index('/')], z_axis
def filterBoxes(boxes, threshold):
filtered_boxes = []
for box in boxes:
if box[4] >= threshold:
filtered_boxes.append(box)
return filtered_boxes
def readResultMap(result_filename, file_map, threshold):
result_map = {}
with open(result_filename) as result_file:
result_list = json.load(result_file)
for it in result_list:
filename = it['file']
key = file_map[filename]
key = os.path.splitext(key)[0]
boxes = it['box']
boxes = filterBoxes(boxes, threshold)
if not result_map.get(key):
result_map[key] = []
cols = filename.split('_')
index = int(cols[2])
result_map[key].append((index, boxes))
for key, val in result_map.iteritems():
val.sort()
return result_map
def readImageMap(filename):
lines = readCSV(filename)
result = {}
for line in lines[1:]:
worldCoord = np.asarray(
[float(line[3]), float(line[2]), float(line[1])])
radius = float(line[4]) / 2.0 + 1.0
if not result.get(line[0]):
result[line[0]] = []
result[line[0]].append((worldCoord, radius))
return result
def trans(boxes, H, confs, thr = -1.0):
gw = H['grid_width']
gh = H['grid_height']
cell_pix_size = H['region_size']
rnnl = H['rnn_len']
ncls = H['num_classes']
boxes = np.reshape(boxes, (-1, gh, gw, rnnl, 4))
confs = np.reshape(confs, (-1, gh, gw, rnnl, ncls))
ret = []
for i in range(rnnl):
for y in range(gh):
for x in range(gw):
if np.max(confs[0, y, x, i, 1:]) > thr:
box = boxes[0, y, x, i, :]
abs_cx = int(box[0]) + cell_pix_size/2 + cell_pix_size * x
abs_cy = int(box[1]) + cell_pix_size/2 + cell_pix_size * y
w = box[2]
h = box[3]
ret.append([abs_cx, abs_cy, w, h, np.max(confs[0, y, x, i, 1: ])])
return np.array(ret)
def split(meta_root, samples):
np.random.seed(2012310818)
l = len(samples)
idxes = np.random.permutation(np.arange(l))
train = [dat[i] for i in idxes[0 : int(l * 0.7)]]
vals = [dat[i] for i in idxes[int(l * 0.7) : ]]
with open(meta_root + 'train.json', 'w') as g:
json.dump(train, g)
with open(meta_root + 'vals.json', 'w') as g:
json.dump(vals, g)
def writeCSV(filename, lines):
with open(filename, "wb") as f:
csvwriter = csv.writer(f)
csvwriter.writerows(lines)
def tryFloat(value):
try:
value = float(value)
except:
value = value
return value
def getColumn(lines, columnid, elementType=''):
column = []
for line in lines:
try:
value = line[columnid]
except:
continue
if elementType == 'float':
value = tryFloat(value)
column.append(value)
return column
def mkdir(d):
if not os.path.exists(d):
os.mkdir(d)
| mit |
mjudsp/Tsallis | examples/tree/plot_tree_regression.py | 95 | 1516 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="darkorange", label="data")
plt.plot(X_test, y_1, color="cornflowerblue", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, color="yellowgreen", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
thientu/scikit-learn | sklearn/metrics/scorer.py | 211 | 13141 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <amueller@ais.uni-bonn.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
from functools import partial
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should a be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif has_scoring:
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(partial(metric, pos_label=None,
average=average))
| bsd-3-clause |
legacysurvey/pipeline | py/obiwan/decals_sim_randoms.py | 2 | 11774 | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
def add_scatter(ax,x,y,c='b',m='o',lab='',s=80,drawln=False,alpha=1):
ax.scatter(x,y, s=s, lw=2.,facecolors='none',edgecolors=c, marker=m,label=lab,alpha=alpha)
if drawln: ax.plot(x,y, c=c,ls='-')
def draw_unit_sphere(ramin=243.,ramax=246.,dcmin=7.,dcmax=10.,Nran=216000,seed=2015):
'''# from https://github.com/desihub/imaginglss/master/scripts/imglss-mpi-make-random.py'''
rng = np.random.RandomState(seed)
u1,u2= rng.uniform(size=(2, Nran))
#
cmin = np.sin(dcmin*np.pi/180)
cmax = np.sin(dcmax*np.pi/180)
#
RA = ramin + u1*(ramax-ramin)
DEC = 90-np.arccos(cmin+u2*(cmax-cmin))*180./np.pi
return RA,DEC
class QuickRandoms(object):
'''Draw randomly from unit sphere
Example:
qran= QuickRandoms(ramin=243.,ramax=246.,dcmin=7.,dcmax=10.,Nran=216000)
qran.get_randoms()
# save and plot
qran.save_randoms()
qran.plot(xlim=(244.,244.1),ylim=(8.,8.1)) #,xlim=(244.,244.+10./360),ylim=(8.,8.+10./360.))
'''
def __init__(self,ramin=243.,ramax=246.,dcmin=7.,dcmax=10.,Nran=216000):
self.ramin=ramin
self.ramax=ramax
self.dcmin=dcmin
self.dcmax=dcmax
self.Nran=Nran
def get_randoms(self, fn='quick_randoms.pickle'):
if os.path.exists(fn):
ra,dec= self.read_randoms()
else:
ra,dec=draw_unit_sphere(ramin=self.ramin,ramax=self.ramax,\
dcmin=self.dcmin,dcmax=self.dcmax,Nran=self.Nran)
self.ra,self.dec=ra,dec
def save_randoms(self,fn='quick_randoms.pickle'):
if not os.path.exists(fn):
fout=open(fn, 'w')
pickle.dump((self.ra,self.dec),fout)
fout.close()
print("Wrote randoms to: %s" % fn)
else:
print("WARNING: %s exists, not overwritting it" % fn)
def read_randoms(self,fn='quick_randoms.pickle'):
print("Reading randoms from %s" % fn)
fobj=open(fn, 'r')
ra,dec= pickle.load(fobj)
fobj.close()
return ra,dec
def plot(self,xlim=None,ylim=None,text=''):
fig,ax=plt.subplots()
add_scatter(ax,self.ra,self.dec,c='b',m='.',alpha=0.5)
ax.set_xlabel('RA')
ax.set_ylabel('DEC')
if xlim is not None and ylim is not None:
ax.set_xlim(xlim)
ax.set_ylim(ylim)
text='_xlim%0.5f_%.5f_ylim%.5f_%.5f' % (xlim[0],xlim[1],ylim[0],ylim[1])
plt.savefig("quick_randoms%s.png" % text)
plt.close()
class DesiRandoms(object):
'''Draw randomly from unit sphere & provide 2 masks:
mask1: inbricks -- indices where ra,dec pts are in LegacySurvey bricks
mask2: inimages -- union with inbricks and where we have legacy survey imaging data at these ra,dec pts
Example:
ran= DesiRandoms(ramin=243.,ramax=246.,dcmin=7.,dcmax=10.,Nran=216000)
ran.get_randoms()
# save randoms if file not exist and plot
ran.save_randoms()
ran.plot(xlim=(244.,244.1),ylim=(8.,8.1)) #,xlim=(244.,244.+10./360),ylim=(8.,8.+10./360.))
'''
def __init__(self,ramin=243.,ramax=246.,dcmin=7.,dcmax=10.,Nran=216000):
self.ramin=ramin
self.ramax=ramax
self.dcmin=dcmin
self.dcmax=dcmax
self.Nran=Nran
def get_randoms(self,fn='desi_randoms.pickle'):
if os.path.exists(fn):
self.ra,self.dec,self.i_inbricks,self.i_inimages= self.read_randoms()
else:
self.ra,self.dec,self.i_inbricks,self.i_inimages= self.make_randoms()
def save_randoms(self,fn='desi_randoms.pickle'):
if not os.path.exists(fn):
fout=open(fn, 'w')
pickle.dump((self.ra,self.dec,self.i_inbricks,self.i_inimages),fout)
fout.close()
print("Wrote: %s" % fn)
else:
print "WARNING: not saving randoms b/c file already exists: %s" % fn
def make_randoms(self):
'''Nran -- # of randoms'''
import imaginglss
from imaginglss.model import dataproduct
from imaginglss.model.datarelease import contains
import h5py
print "Creating %d Randoms" % self.Nran
# dr2 cache
decals = imaginglss.DECALS('/project/projectdirs/desi/users/burleigh/dr3_testdir_for_bb/imaginglss/dr2.conf.py')
foot= decals.datarelease.create_footprint((self.ramin,self.ramax,self.dcmin,self.dcmax))
print('Total sq.deg. covered by Bricks= ',foot.area)
# Sample full ra,dec box
ra,dec=draw_unit_sphere(ramin=self.ramin,ramax=self.ramax,\
dcmin=self.dcmin,dcmax=self.dcmax,Nran=self.Nran)
#randoms = np.empty(len(ra), dtype=dataproduct.RandomCatalogue)
#randoms['RA'] = ra
#randoms['DEC'] = dec
# Get indices of inbrick points, copied from def filter()
coord= np.array((ra,dec))
bid = foot.brickindex.query_internal(coord)
i_inbricks = contains(foot._covered_brickids, bid)
i_inbricks = np.where(i_inbricks)[0]
print('Number Density in bricks= ',len(ra)/foot.area)
# Union of inbricks and have imaging data, evaluate depths at ra,dec
coord= coord[:, i_inbricks]
cat_lim = decals.datarelease.read_depths(coord, 'grz')
depth= cat_lim['DECAM_DEPTH'] ** -0.5 / cat_lim['DECAM_MW_TRANSMISSION']
nanmask= np.isnan(depth)
nanmask=np.all(nanmask[:,[1,2,4]],axis=1) # shape (Nran,)
i_inimages= i_inbricks[nanmask == False]
print('ra.size=%d, i_inbricks.size=%d, i_inimages.size=%d' % (ra.size, i_inbricks.size, i_inimages.size))
# We are not using Yu's randoms dtype=dataproduct.RandomCatalogue
#randoms['INTRINSIC_NOISELEVEL'][:, :6] = (cat_lim['DECAM_DEPTH'] ** -0.5 / cat_lim['DECAM_MW_TRANSMISSION'])
#randoms['INTRINSIC_NOISELEVEL'][:, 6:] = 0
#nanmask = np.isnan(randoms['INTRINSIC_NOISELEVEL'])
#randoms['INTRINSIC_NOISELEVEL'][nanmask] = np.inf
print('Total sq.deg. where have imaging data approx.= ',foot.area*(len(ra[i_inimages]))/len(ra))
print('Number Density for sources where have images= ',len(ra[i_inimages])/foot.area)
# save ra,dec,mask to file
#with h5py.File('eboss_randoms.hdf5', 'w') as ff:
# ds = ff.create_dataset('_HEADER', shape=(0,))
# ds.attrs['FootPrintArea'] = decals.datarelease.footprint.area
# ds.attrs['NumberDensity'] = 1.0 * len(randoms) / decals.datarelease.footprint.area
# for column in randoms.dtype.names:
# ds = ff.create_dataset(column, data=randoms[column])
# ds = ff.create_dataset('nanmask', data=nanmask)
return ra,dec, i_inbricks,i_inimages
def plot(self,name='desirandoms.png'):
fig,ax=plt.subplots(1,3,sharey=True,sharex=True,figsize=(15,5))
add_scatter(ax[0],self.ra, self.dec, c='b',m='o')
add_scatter(ax[1],self.ra[self.i_inbricks], self.dec[self.i_inbricks], c='b',m='.')
add_scatter(ax[2],self.ra[self.i_inimages], self.dec[self.i_inimages], c='b',m='.')
for i,title in zip(range(3),['All','in Bricks','in Images']):
ti=ax[i].set_title(title)
xlab=ax[i].set_xlabel('ra')
ax[i].set_ylim((self.dec.min(),self.dec.max()))
ax[i].set_xlim((self.ra.min(),self.ra.max()))
ylab=ax[0].set_ylabel('dec')
plt.savefig(name, bbox_extra_artists=[ti,xlab,ylab], bbox_inches='tight',dpi=150)
plt.close()
print "wrote: %s" % name
class Angular_Correlator(object):
'''Compute w(theta) from observed ra,dec and random ra,dec
uses landy szalay estimator: DD - 2DR + RR / RR
two numerical methods: 1) Yu Feng's kdcount, 2) astroML
Example:
ac= Angular_Correlator(gal_ra,gal_dec,ran_ra,ran_dec)
ac.compute()
ac.plot()
'''
def __init__(self,gal_ra,gal_dec,ran_ra,ran_dec,ncores=1):
self.gal_ra=gal_ra
self.gal_dec=gal_dec
self.ran_ra=ran_ra
self.ran_dec=ran_dec
self.ncores=ncores
def compute(self):
self.theta,self.w={},{}
for key in ['astroML','yu']:
self.theta[key],self.w[key]= self.get_angular_corr(whos=key)
self.plot()
def get_angular_corr(self,whos='yu'):
if whos == 'yu': return self.ac_yu()
elif whos == 'astroML': return self.ac_astroML()
else: raise ValueError()
def ac_astroML(self):
'''from two_point_angular() in astroML/correlation.py'''
from astroML.correlation import two_point,ra_dec_to_xyz,angular_dist_to_euclidean_dist
# 3d project
data = np.asarray(ra_dec_to_xyz(self.gal_ra, self.gal_dec), order='F').T
data_R = np.asarray(ra_dec_to_xyz(self.ran_ra, self.ran_dec), order='F').T
# convert spherical bins to cartesian bins
bins = 10 ** np.linspace(np.log10(1. / 60.), np.log10(6), 16)
bins_transform = angular_dist_to_euclidean_dist(bins)
w= two_point(data, bins_transform, method='landy-szalay',data_R=data_R)
bin_centers = 0.5 * (bins[1:] + bins[:-1])
return bin_centers, w
def ac_yu(self):
from kdcount import correlate
from kdcount import sphere
abin = sphere.AngularBinning(np.logspace(-4, -2.6, 10))
D = sphere.points(self.gal_ra, self.gal_dec)
R = sphere.points(self.ran_ra, self.ran_dec) #weights=wt_array
DD = correlate.paircount(D, D, abin, np=self.ncores)
DR = correlate.paircount(D, R, abin, np=self.ncores)
RR = correlate.paircount(R, R, abin, np=self.ncores)
r = D.norm / R.norm
w= (DD.sum1 - 2 * r * DR.sum1 + r ** 2 * RR.sum1) / (r ** 2 * RR.sum1)
return abin.angular_centers,w
def plot(self,name='wtheta.png'):
fig,ax=plt.subplots()
for key,col,mark in zip(['yu','astroML'],['g','b'],['o']*2):
print "%s: theta,w" % key,self.theta[key],self.w[key]
add_scatter(ax,self.theta[key], self.w[key], c=col,m=mark,lab=key,alpha=0.5)
t = np.array([0.01, 10])
plt.plot(t, 10 * (t / 0.01) ** -0.8, ':k', lw=1)
ax.legend(loc='upper right',scatterpoints=1)
xlab=ax.set_xlabel(r'$\theta$ (deg)')
ylab=ax.set_ylabel(r'$\hat{w}(\theta)$')
ax.set_xscale('log')
ax.set_yscale('log')
plt.savefig(name, bbox_extra_artists=[xlab,ylab], bbox_inches='tight',dpi=150)
plt.close()
print "wrote: %s" % name
def ac_unit_test():
'''angular correlation func unit test'''
qran= QuickRandoms(ramin=243.,ramax=246.,dcmin=7.,dcmax=10.,Nran=216000)
qran.get_randoms()
# subset
index= np.all((qran.ra >= 244.,qran.ra <= 244.5,\
qran.dec >= 8.,qran.dec <= 8.5),axis=0)
ra,dec= qran.ra[index],qran.dec[index]
# use these as Ducks for DesiRandoms
ran= DesiRandoms()
ran.ra,ran.dec= ra,dec
index= np.all((ran.ra >= 244.,ran.ra <= 244.25),axis=0)
ran.i_inbricks= np.where(index)[0]
index= np.all((index,ran.dec >= 8.1,ran.dec <= 8.4),axis=0)
ran.i_inimages= np.where(index)[0]
ran.plot()
# wtheta
ac= Angular_Correlator(ran.ra[ran.i_inimages],ran.dec[ran.i_inimages],ran.ra,ran.dec)
ac.compute()
ac.plot()
print 'finished unit_test'
if __name__ == '__main__':
#ac_unit_test()
#Nran=int(2.4e3*9.)
#ran= DesiRandoms(ramin=243.,ramax=246.,dcmin=7.,dcmax=10.,Nran=216000)
Nran=int(2.4e3*1.e2)
ran= DesiRandoms(ramin=120.,ramax=130.,dcmin=20.,dcmax=30.,Nran=Nran)
ran.get_randoms()
# save randoms if file not exist and plot
ran.save_randoms(fn='desi_randoms_qual.pickle')
ran.plot()
| gpl-2.0 |
abinashpanda/pgmpy | pgmpy/tests/test_models/test_BayesianModel.py | 2 | 25284 | import unittest
import networkx as nx
import pandas as pd
import numpy as np
import numpy.testing as np_test
from pgmpy.models import BayesianModel, MarkovModel
import pgmpy.tests.help_functions as hf
from pgmpy.factors.discrete import TabularCPD, JointProbabilityDistribution, DiscreteFactor
from pgmpy.independencies import Independencies
from pgmpy.estimators import BayesianEstimator, BaseEstimator, MaximumLikelihoodEstimator
class TestBaseModelCreation(unittest.TestCase):
def setUp(self):
self.G = BayesianModel()
def test_class_init_without_data(self):
self.assertIsInstance(self.G, nx.DiGraph)
def test_class_init_with_data_string(self):
self.g = BayesianModel([('a', 'b'), ('b', 'c')])
self.assertListEqual(sorted(self.g.nodes()), ['a', 'b', 'c'])
self.assertListEqual(hf.recursive_sorted(self.g.edges()),
[['a', 'b'], ['b', 'c']])
def test_class_init_with_data_nonstring(self):
BayesianModel([(1, 2), (2, 3)])
def test_add_node_string(self):
self.G.add_node('a')
self.assertListEqual(self.G.nodes(), ['a'])
def test_add_node_nonstring(self):
self.G.add_node(1)
def test_add_nodes_from_string(self):
self.G.add_nodes_from(['a', 'b', 'c', 'd'])
self.assertListEqual(sorted(self.G.nodes()), ['a', 'b', 'c', 'd'])
def test_add_nodes_from_non_string(self):
self.G.add_nodes_from([1, 2, 3, 4])
def test_add_edge_string(self):
self.G.add_edge('d', 'e')
self.assertListEqual(sorted(self.G.nodes()), ['d', 'e'])
self.assertListEqual(self.G.edges(), [('d', 'e')])
self.G.add_nodes_from(['a', 'b', 'c'])
self.G.add_edge('a', 'b')
self.assertListEqual(hf.recursive_sorted(self.G.edges()),
[['a', 'b'], ['d', 'e']])
def test_add_edge_nonstring(self):
self.G.add_edge(1, 2)
def test_add_edge_selfloop(self):
self.assertRaises(ValueError, self.G.add_edge, 'a', 'a')
def test_add_edge_result_cycle(self):
self.G.add_edges_from([('a', 'b'), ('a', 'c')])
self.assertRaises(ValueError, self.G.add_edge, 'c', 'a')
def test_add_edges_from_string(self):
self.G.add_edges_from([('a', 'b'), ('b', 'c')])
self.assertListEqual(sorted(self.G.nodes()), ['a', 'b', 'c'])
self.assertListEqual(hf.recursive_sorted(self.G.edges()),
[['a', 'b'], ['b', 'c']])
self.G.add_nodes_from(['d', 'e', 'f'])
self.G.add_edges_from([('d', 'e'), ('e', 'f')])
self.assertListEqual(sorted(self.G.nodes()),
['a', 'b', 'c', 'd', 'e', 'f'])
self.assertListEqual(hf.recursive_sorted(self.G.edges()),
hf.recursive_sorted([('a', 'b'), ('b', 'c'),
('d', 'e'), ('e', 'f')]))
def test_add_edges_from_nonstring(self):
self.G.add_edges_from([(1, 2), (2, 3)])
def test_add_edges_from_self_loop(self):
self.assertRaises(ValueError, self.G.add_edges_from,
[('a', 'a')])
def test_add_edges_from_result_cycle(self):
self.assertRaises(ValueError, self.G.add_edges_from,
[('a', 'b'), ('b', 'c'), ('c', 'a')])
def test_update_node_parents_bm_constructor(self):
self.g = BayesianModel([('a', 'b'), ('b', 'c')])
self.assertListEqual(self.g.predecessors('a'), [])
self.assertListEqual(self.g.predecessors('b'), ['a'])
self.assertListEqual(self.g.predecessors('c'), ['b'])
def test_update_node_parents(self):
self.G.add_nodes_from(['a', 'b', 'c'])
self.G.add_edges_from([('a', 'b'), ('b', 'c')])
self.assertListEqual(self.G.predecessors('a'), [])
self.assertListEqual(self.G.predecessors('b'), ['a'])
self.assertListEqual(self.G.predecessors('c'), ['b'])
def tearDown(self):
del self.G
class TestBayesianModelMethods(unittest.TestCase):
def setUp(self):
self.G = BayesianModel([('a', 'd'), ('b', 'd'),
('d', 'e'), ('b', 'c')])
self.G1 = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
diff_cpd = TabularCPD('diff', 2, values=[[0.2], [0.8]])
intel_cpd = TabularCPD('intel', 3, values=[[0.5], [0.3], [0.2]])
grade_cpd = TabularCPD('grade', 3, values=[[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
evidence=['diff', 'intel'], evidence_card=[2, 3])
self.G1.add_cpds(diff_cpd, intel_cpd, grade_cpd)
def test_moral_graph(self):
moral_graph = self.G.moralize()
self.assertListEqual(sorted(moral_graph.nodes()), ['a', 'b', 'c', 'd', 'e'])
for edge in moral_graph.edges():
self.assertTrue(edge in [('a', 'b'), ('a', 'd'), ('b', 'c'), ('d', 'b'), ('e', 'd')] or
(edge[1], edge[0]) in [('a', 'b'), ('a', 'd'), ('b', 'c'), ('d', 'b'), ('e', 'd')])
def test_moral_graph_with_edge_present_over_parents(self):
G = BayesianModel([('a', 'd'), ('d', 'e'), ('b', 'd'), ('b', 'c'), ('a', 'b')])
moral_graph = G.moralize()
self.assertListEqual(sorted(moral_graph.nodes()), ['a', 'b', 'c', 'd', 'e'])
for edge in moral_graph.edges():
self.assertTrue(edge in [('a', 'b'), ('c', 'b'), ('d', 'a'), ('d', 'b'), ('d', 'e')] or
(edge[1], edge[0]) in [('a', 'b'), ('c', 'b'), ('d', 'a'), ('d', 'b'), ('d', 'e')])
def test_local_independencies(self):
self.assertEqual(self.G.local_independencies('a'), Independencies(['a', ['b', 'c']]))
self.assertEqual(self.G.local_independencies('c'), Independencies(['c', ['a', 'd', 'e'], 'b']))
self.assertEqual(self.G.local_independencies('d'), Independencies(['d', 'c', ['b', 'a']]))
self.assertEqual(self.G.local_independencies('e'), Independencies(['e', ['c', 'b', 'a'], 'd']))
self.assertEqual(self.G.local_independencies('b'), Independencies(['b', 'a']))
self.assertEqual(self.G1.local_independencies('grade'), Independencies())
def test_get_independencies(self):
chain = BayesianModel([('X', 'Y'), ('Y', 'Z')])
self.assertEqual(chain.get_independencies(), Independencies(('X', 'Z', 'Y'), ('Z', 'X', 'Y')))
fork = BayesianModel([('Y', 'X'), ('Y', 'Z')])
self.assertEqual(fork.get_independencies(), Independencies(('X', 'Z', 'Y'), ('Z', 'X', 'Y')))
collider = BayesianModel([('X', 'Y'), ('Z', 'Y')])
self.assertEqual(collider.get_independencies(), Independencies(('X', 'Z'), ('Z', 'X')))
def test_is_imap(self):
val = [0.01, 0.01, 0.08, 0.006, 0.006, 0.048, 0.004, 0.004, 0.032,
0.04, 0.04, 0.32, 0.024, 0.024, 0.192, 0.016, 0.016, 0.128]
JPD = JointProbabilityDistribution(['diff', 'intel', 'grade'], [2, 3, 3], val)
fac = DiscreteFactor(['diff', 'intel', 'grade'], [2, 3, 3], val)
self.assertTrue(self.G1.is_imap(JPD))
self.assertRaises(TypeError, self.G1.is_imap, fac)
def test_get_immoralities(self):
G = BayesianModel([('x', 'y'), ('z', 'y'), ('x', 'z'), ('w', 'y')])
self.assertEqual(G.get_immoralities(), {('w', 'x'), ('w', 'z')})
G1 = BayesianModel([('x', 'y'), ('z', 'y'), ('z', 'x'), ('w', 'y')])
self.assertEqual(G1.get_immoralities(), {('w', 'x'), ('w', 'z')})
G2 = BayesianModel([('x', 'y'), ('z', 'y'), ('x', 'z'), ('w', 'y'), ('w', 'x')])
self.assertEqual(G2.get_immoralities(), {('w', 'z')})
def test_is_iequivalent(self):
G = BayesianModel([('x', 'y'), ('z', 'y'), ('x', 'z'), ('w', 'y')])
self.assertRaises(TypeError, G.is_iequivalent, MarkovModel())
G1 = BayesianModel([('V', 'W'), ('W', 'X'), ('X', 'Y'), ('Z', 'Y')])
G2 = BayesianModel([('W', 'V'), ('X', 'W'), ('X', 'Y'), ('Z', 'Y')])
self.assertTrue(G1.is_iequivalent(G2))
G3 = BayesianModel([('W', 'V'), ('W', 'X'), ('Y', 'X'), ('Z', 'Y')])
self.assertFalse(G3.is_iequivalent(G2))
def tearDown(self):
del self.G
del self.G1
class TestBayesianModelCPD(unittest.TestCase):
def setUp(self):
self.G = BayesianModel([('d', 'g'), ('i', 'g'), ('g', 'l'),
('i', 's')])
def test_active_trail_nodes(self):
self.assertEqual(sorted(self.G.active_trail_nodes('d')), ['d', 'g', 'l'])
self.assertEqual(sorted(self.G.active_trail_nodes('i')), ['g', 'i', 'l', 's'])
def test_active_trail_nodes_args(self):
self.assertEqual(sorted(self.G.active_trail_nodes('d', observed='g')), ['d', 'i', 's'])
self.assertEqual(sorted(self.G.active_trail_nodes('l', observed='g')), ['l'])
self.assertEqual(sorted(self.G.active_trail_nodes('s', observed=['i', 'l'])), ['s'])
self.assertEqual(sorted(self.G.active_trail_nodes('s', observed=['d', 'l'])), ['g', 'i', 's'])
def test_is_active_trail_triplets(self):
self.assertTrue(self.G.is_active_trail('d', 'l'))
self.assertTrue(self.G.is_active_trail('g', 's'))
self.assertFalse(self.G.is_active_trail('d', 'i'))
self.assertTrue(self.G.is_active_trail('d', 'i', observed='g'))
self.assertFalse(self.G.is_active_trail('d', 'l', observed='g'))
self.assertFalse(self.G.is_active_trail('i', 'l', observed='g'))
self.assertTrue(self.G.is_active_trail('d', 'i', observed='l'))
self.assertFalse(self.G.is_active_trail('g', 's', observed='i'))
def test_is_active_trail(self):
self.assertFalse(self.G.is_active_trail('d', 's'))
self.assertTrue(self.G.is_active_trail('s', 'l'))
self.assertTrue(self.G.is_active_trail('d', 's', observed='g'))
self.assertFalse(self.G.is_active_trail('s', 'l', observed='g'))
def test_is_active_trail_args(self):
self.assertFalse(self.G.is_active_trail('s', 'l', 'i'))
self.assertFalse(self.G.is_active_trail('s', 'l', 'g'))
self.assertTrue(self.G.is_active_trail('d', 's', 'l'))
self.assertFalse(self.G.is_active_trail('d', 's', ['i', 'l']))
def test_get_cpds(self):
cpd_d = TabularCPD('d', 2, values=np.random.rand(2, 1))
cpd_i = TabularCPD('i', 2, values=np.random.rand(2, 1))
cpd_g = TabularCPD('g', 2, values=np.random.rand(2, 4),
evidence=['d', 'i'], evidence_card=[2, 2])
cpd_l = TabularCPD('l', 2, values=np.random.rand(2, 2),
evidence=['g'], evidence_card=[2])
cpd_s = TabularCPD('s', 2, values=np.random.rand(2, 2),
evidence=['i'], evidence_card=[2])
self.G.add_cpds(cpd_d, cpd_i, cpd_g, cpd_l, cpd_s)
self.assertEqual(self.G.get_cpds('d').variable, 'd')
def test_get_cpds1(self):
self.model = BayesianModel([('A', 'AB')])
cpd_a = TabularCPD('A', 2, values=np.random.rand(2, 1))
cpd_ab = TabularCPD('AB', 2, values=np.random.rand(2, 2),
evidence=['A'], evidence_card=[2])
self.model.add_cpds(cpd_a, cpd_ab)
self.assertEqual(self.model.get_cpds('A').variable, 'A')
self.assertEqual(self.model.get_cpds('AB').variable, 'AB')
self.assertRaises(ValueError, self.model.get_cpds, 'B')
self.model.add_node('B')
self.assertRaises(ValueError, self.model.get_cpds, 'B')
def test_add_single_cpd(self):
cpd_s = TabularCPD('s', 2, np.random.rand(2, 2), ['i'], [2])
self.G.add_cpds(cpd_s)
self.assertListEqual(self.G.get_cpds(), [cpd_s])
def test_add_multiple_cpds(self):
cpd_d = TabularCPD('d', 2, values=np.random.rand(2, 1))
cpd_i = TabularCPD('i', 2, values=np.random.rand(2, 1))
cpd_g = TabularCPD('g', 2, values=np.random.rand(2, 4),
evidence=['d', 'i'], evidence_card=[2, 2])
cpd_l = TabularCPD('l', 2, values=np.random.rand(2, 2),
evidence=['g'], evidence_card=[2])
cpd_s = TabularCPD('s', 2, values=np.random.rand(2, 2),
evidence=['i'], evidence_card=[2])
self.G.add_cpds(cpd_d, cpd_i, cpd_g, cpd_l, cpd_s)
self.assertEqual(self.G.get_cpds('d'), cpd_d)
self.assertEqual(self.G.get_cpds('i'), cpd_i)
self.assertEqual(self.G.get_cpds('g'), cpd_g)
self.assertEqual(self.G.get_cpds('l'), cpd_l)
self.assertEqual(self.G.get_cpds('s'), cpd_s)
def test_check_model(self):
cpd_g = TabularCPD('g', 2, values=np.array([[0.2, 0.3, 0.4, 0.6],
[0.8, 0.7, 0.6, 0.4]]),
evidence=['d', 'i'], evidence_card=[2, 2])
cpd_s = TabularCPD('s', 2, values=np.array([[0.2, 0.3],
[0.8, 0.7]]),
evidence=['i'], evidence_card=[2])
cpd_l = TabularCPD('l', 2, values=np.array([[0.2, 0.3],
[0.8, 0.7]]),
evidence=['g'], evidence_card=[2])
self.G.add_cpds(cpd_g, cpd_s, cpd_l)
self.assertRaises(ValueError, self.G.check_model)
cpd_d = TabularCPD('d', 2, values=[[0.8, 0.2]])
cpd_i = TabularCPD('i', 2, values=[[0.7, 0.3]])
self.G.add_cpds(cpd_d, cpd_i)
self.assertTrue(self.G.check_model())
def test_check_model1(self):
cpd_g = TabularCPD('g', 2, values=np.array([[0.2, 0.3],
[0.8, 0.7]]),
evidence=['i'], evidence_card=[2])
self.G.add_cpds(cpd_g)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_g)
cpd_g = TabularCPD('g', 2, values=np.array([[0.2, 0.3, 0.4, 0.6],
[0.8, 0.7, 0.6, 0.4]]),
evidence=['d', 's'], evidence_card=[2, 2])
self.G.add_cpds(cpd_g)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_g)
cpd_g = TabularCPD('g', 2, values=np.array([[0.2, 0.3],
[0.8, 0.7]]),
evidence=['l'], evidence_card=[2])
self.G.add_cpds(cpd_g)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_g)
cpd_l = TabularCPD('l', 2, values=np.array([[0.2, 0.3],
[0.8, 0.7]]),
evidence=['d'], evidence_card=[2])
self.G.add_cpds(cpd_l)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_l)
cpd_l = TabularCPD('l', 2, values=np.array([[0.2, 0.3, 0.4, 0.6],
[0.8, 0.7, 0.6, 0.4]]),
evidence=['d', 'i'], evidence_card=[2, 2])
self.G.add_cpds(cpd_l)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_l)
cpd_l = TabularCPD('l', 2, values=np.array([[0.2, 0.3, 0.4, 0.6, 0.2, 0.3, 0.4, 0.6],
[0.8, 0.7, 0.6, 0.4, 0.8, 0.7, 0.6, 0.4]]),
evidence=['g', 'd', 'i'], evidence_card=[2, 2, 2])
self.G.add_cpds(cpd_l)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_l)
def test_check_model2(self):
cpd_s = TabularCPD('s', 2, values=np.array([[0.5, 0.3],
[0.8, 0.7]]),
evidence=['i'], evidence_card=[2])
self.G.add_cpds(cpd_s)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_s)
cpd_g = TabularCPD('g', 2, values=np.array([[0.2, 0.3, 0.4, 0.6],
[0.3, 0.7, 0.6, 0.4]]),
evidence=['d', 'i'], evidence_card=[2, 2])
self.G.add_cpds(cpd_g)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_g)
cpd_l = TabularCPD('l', 2, values=np.array([[0.2, 0.3],
[0.1, 0.7]]),
evidence=['g'], evidence_card=[2])
self.G.add_cpds(cpd_l)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_l)
def tearDown(self):
del self.G
class TestBayesianModelFitPredict(unittest.TestCase):
def setUp(self):
self.model_disconnected = BayesianModel()
self.model_disconnected.add_nodes_from(['A', 'B', 'C', 'D', 'E'])
self.model_connected = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
self.model2 = BayesianModel([('A', 'C'), ('B', 'C')])
self.data1 = pd.DataFrame(data={'A': [0, 0, 1], 'B': [0, 1, 0], 'C': [1, 1, 0]})
self.data2 = pd.DataFrame(data={'A': [0, np.NaN, 1],
'B': [0, 1, 0],
'C': [1, 1, np.NaN],
'D': [np.NaN, 'Y', np.NaN]})
def test_bayesian_fit(self):
print(isinstance(BayesianEstimator, BaseEstimator))
print(isinstance(MaximumLikelihoodEstimator, BaseEstimator))
self.model2.fit(self.data1, estimator_type=BayesianEstimator, prior_type="dirichlet", pseudo_counts=[9, 3])
self.assertEqual(self.model2.get_cpds('B'), TabularCPD('B', 2, [[11.0 / 15], [4.0 / 15]]))
def test_fit_missing_data(self):
self.model2.fit(self.data2, state_names={'C': [0, 1]}, complete_samples_only=False)
cpds = set([TabularCPD('A', 2, [[0.5], [0.5]]),
TabularCPD('B', 2, [[2. / 3], [1. / 3]]),
TabularCPD('C', 2, [[0, 0.5, 0.5, 0.5], [1, 0.5, 0.5, 0.5]],
evidence=['A', 'B'], evidence_card=[2, 2])])
self.assertSetEqual(cpds, set(self.model2.get_cpds()))
def test_disconnected_fit(self):
values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
columns=['A', 'B', 'C', 'D', 'E'])
self.model_disconnected.fit(values)
for node in ['A', 'B', 'C', 'D', 'E']:
cpd = self.model_disconnected.get_cpds(node)
self.assertEqual(cpd.variable, node)
np_test.assert_array_equal(cpd.cardinality, np.array([2]))
value = (values.ix[:, node].value_counts() /
values.ix[:, node].value_counts().sum())
value = value.reindex(sorted(value.index)).values
np_test.assert_array_equal(cpd.values, value)
def test_connected_predict(self):
np.random.seed(42)
values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
columns=['A', 'B', 'C', 'D', 'E'])
fit_data = values[:800]
predict_data = values[800:].copy()
self.model_connected.fit(fit_data)
self.assertRaises(ValueError, self.model_connected.predict, predict_data)
predict_data.drop('E', axis=1, inplace=True)
e_predict = self.model_connected.predict(predict_data)
np_test.assert_array_equal(e_predict.values.ravel(),
np.array([1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1,
1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0,
0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0,
0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1,
0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1,
1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1,
1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0,
1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1,
0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1,
1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1,
1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1,
0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0,
1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1,
1, 1, 1, 0]))
def tearDown(self):
del self.model_connected
del self.model_disconnected
class TestDirectedGraphCPDOperations(unittest.TestCase):
def setUp(self):
self.graph = BayesianModel()
def test_add_single_cpd(self):
cpd = TabularCPD('grade', 2, values=np.random.rand(2, 4),
evidence=['diff', 'intel'], evidence_card=[2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd)
self.assertListEqual(self.graph.get_cpds(), [cpd])
def test_add_multiple_cpds(self):
cpd1 = TabularCPD('diff', 2, values=np.random.rand(2, 1))
cpd2 = TabularCPD('intel', 2, values=np.random.rand(2, 1))
cpd3 = TabularCPD('grade', 2, values=np.random.rand(2, 4),
evidence=['diff', 'intel'], evidence_card=[2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.assertListEqual(self.graph.get_cpds(), [cpd1, cpd2, cpd3])
def test_remove_single_cpd(self):
cpd1 = TabularCPD('diff', 2, values=np.random.rand(2, 1))
cpd2 = TabularCPD('intel', 2, values=np.random.rand(2, 1))
cpd3 = TabularCPD('grade', 2, values=np.random.rand(2, 4),
evidence=['diff', 'intel'], evidence_card=[2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.graph.remove_cpds(cpd1)
self.assertListEqual(self.graph.get_cpds(), [cpd2, cpd3])
def test_remove_multiple_cpds(self):
cpd1 = TabularCPD('diff', 2, values=np.random.rand(2, 1))
cpd2 = TabularCPD('intel', 2, values=np.random.rand(2, 1))
cpd3 = TabularCPD('grade', 2, values=np.random.rand(2, 4),
evidence=['diff', 'intel'], evidence_card=[2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.graph.remove_cpds(cpd1, cpd3)
self.assertListEqual(self.graph.get_cpds(), [cpd2])
def test_remove_single_cpd_string(self):
cpd1 = TabularCPD('diff', 2, values=np.random.rand(2, 1))
cpd2 = TabularCPD('intel', 2, values=np.random.rand(2, 1))
cpd3 = TabularCPD('grade', 2, values=np.random.rand(2, 4),
evidence=['diff', 'intel'], evidence_card=[2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.graph.remove_cpds('diff')
self.assertListEqual(self.graph.get_cpds(), [cpd2, cpd3])
def test_remove_multiple_cpds_string(self):
cpd1 = TabularCPD('diff', 2, values=np.random.rand(2, 1))
cpd2 = TabularCPD('intel', 2, values=np.random.rand(2, 1))
cpd3 = TabularCPD('grade', 2, values=np.random.rand(2, 4),
evidence=['diff', 'intel'], evidence_card=[2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.graph.remove_cpds('diff', 'grade')
self.assertListEqual(self.graph.get_cpds(), [cpd2])
def test_get_cpd_for_node(self):
cpd1 = TabularCPD('diff', 2, values=np.random.rand(2, 1))
cpd2 = TabularCPD('intel', 2, values=np.random.rand(2, 1))
cpd3 = TabularCPD('grade', 2, values=np.random.rand(2, 4),
evidence=['diff', 'intel'], evidence_card=[2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.assertEqual(self.graph.get_cpds('diff'), cpd1)
self.assertEqual(self.graph.get_cpds('intel'), cpd2)
self.assertEqual(self.graph.get_cpds('grade'), cpd3)
def test_get_cpd_raises_error(self):
cpd1 = TabularCPD('diff', 2, values=np.random.rand(2, 1))
cpd2 = TabularCPD('intel', 2, values=np.random.rand(2, 1))
cpd3 = TabularCPD('grade', 2, values=np.random.rand(2, 4),
evidence=['diff', 'intel'], evidence_card=[2, 2])
self.graph.add_edges_from([('diff', 'grade'), ('intel', 'grade')])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.assertRaises(ValueError, self.graph.get_cpds, 'sat')
def tearDown(self):
del self.graph
| mit |
joshuamorton/calc_three_proj | plot.py | 1 | 1969 | from matplotlib import pyplot as plt
import numpy as np
import iterative
import pascal
import power
plt.style.use('ggplot')
qr = []
lu = []
for i in range(2, 13):
q = pascal.solve_qr_b(pascal.pascal_matrix(i), pascal.harmonic_vector(i))
l = pascal.solve_lu_b(pascal.pascal_matrix(i), pascal.harmonic_vector(i))
qr.append(q)
lu.append(l)
plt.subplot(1, 1, 1)
x = range(2, 13)
y = [i[1] for i in qr]
z = [i[2] for i in qr]
plt.plot(x, y, color='blue') # error from householder
plt.plot(x, z, color='green') # solution error of qr
plt.yscale('log')
plt.savefig('./qr_err.png')
y = [i[1] for i in lu]
z = [i[2] for i in lu]
plt.clf()
plt.plot(x, y, color='blue')
plt.plot(x, z, color='green')
plt.yscale('log')
plt.savefig('./lu_err.png')
plt.clf()
jacobi, gs = iterative.generate_data()
j_vals = [i[1] for i in jacobi]
g_vals = [i[1] for i in gs]
jacobi_approx = sum(j_vals) / len(j_vals) # 2c
gs_approx = sum(g_vals) / len(g_vals)
print("Averages, jacobi then gauss-seidel, then iterations")
print(jacobi_approx)
print(gs_approx)
print(float(sum(j[2] for j in jacobi))/sum(g[2] for g in gs))
exact = np.array([9.0/190, 28.0/475, 33.0/475]).reshape(3,1)
errs_jacobi = [pascal.norm_inf(j-exact) for j in j_vals]
errs_gs = [pascal.norm_inf(g-exact) for g in g_vals]
plt.plot([j[2] for j in jacobi], errs_jacobi, 'ko', [g[2] for g in gs], errs_jacobi, 'bo')
plt.savefig('./iterative_err.png')
plt.clf()
powers = power.generate_data()
ds = [p[0] for p in powers if p[0] is not None]
ts = [p[1] for p in powers if p[1] is not None]
tis = [p[2] for p in powers if p[2] is not None]
maxs = [p[3] for p in powers if p[3] is not None]
mins = [p[4] for p in powers if p[4] is not None]
big = max(maxs)
small = max(mins)
maxs = [float(m)/big for m in maxs]
mins = [float(m)/small for m in mins]
plt.scatter(ds, ts, c=maxs)
plt.savefig('./power_mat.png')
plt.clf()
plt.scatter([1.0/d for d in ds], tis, c=mins)
plt.savefig('./power_inv.png')
plt.clf() | mit |
jayflo/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
hitszxp/scikit-learn | examples/linear_model/plot_lasso_coordinate_descent_path.py | 254 | 2639 | """
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter)
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False)
print("Computing regularization path using the positive lasso...")
alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path(
X, y, eps, positive=True, fit_intercept=False)
print("Computing regularization path using the elastic net...")
alphas_enet, coefs_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
print("Computing regularization path using the positve elastic net...")
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
# Display results
plt.figure(1)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_enet), coefs_enet.T, linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and Elastic-Net Paths')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
plt.axis('tight')
plt.figure(2)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_positive_lasso), coefs_positive_lasso.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and positive Lasso')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
plt.axis('tight')
plt.figure(3)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_enet), coefs_enet.T)
l2 = plt.plot(-np.log10(alphas_positive_enet), coefs_positive_enet.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Elastic-Net and positive Elastic-Net')
plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
plt.axis('tight')
plt.show()
| bsd-3-clause |
partofthething/laserComm | laserComm/receiver.py | 1 | 2975 | '''
receiver runs the ADC and photoresistor to receive an input signal.
USes MCP3008 ADC via the hardware SPI interface.
Connections are:
MCP3008 VDD -> 3.3V (red)
MCP3008 VREF -> 3.3V (red)
MCP3008 AGND -> GND (orange)
MCP3008 CLK -> SCLK (yellow)
MCP3008 DOUT -> MISO (green)
MCP3008 DIN -> MOSI (yellow)
MCP3008 CS -> CE0 (red)
MCP3008 DGND -> GND (orange)
The photoresistor goes from 4 kOhms (dark) to like 90 Ohms. (flashlight).
Output is 1024*Vin/Vref.
Build a voltage divider with like a 200 Ohm resistor in series w/ the photoR and measure
Vout between them. I put photoresistor between vout and ground.
The signal is intended to be processed using signal_processor
'''
import time
import numpy
import matplotlib
matplotlib.use('Agg') # works headless (e.g. on Raspberry Pi)
import matplotlib.pyplot as plt
try:
import spidev
except ImportError:
print('no spidev')
GAP = 0.001
class ADC(object):
"""
The Analog-to-digital converter
"""
def __init__(self):
self.adc = None
def __enter__(self):
self.adc = spidev.SpiDev()
self.adc.open(0, 0)
def __exit__(self, exc_type, exc_value, traceback):
self.adc.close()
def read(self, input_number):
"""
read SPI data from MCP3008 chip
There are 8 possible channels (0 through 7)
Will return value between 0 and 1023
"""
if ((input_number > 7) or (input_number < 0)):
return -1
r = self.adc.xfer2([1, (8 + input_number) << 4, 0])
adcValue = ((r[1] & 3) << 8) + r[2]
return adcValue
class Receiver(object):
"""
Stream processor that uses adc
"""
@property
def times(self):
return numpy.linspace(0, 10, len(self.vals))
def receive(self, adc):
self.vals = []
# receive for 10 seconds
print('Receiving')
start = time.time()
while time.time() - start < 30.0:
self.vals.append(adc.read(0))
time.sleep(GAP / 10)
def plot(self, fname='adc.pdf'):
print('Plotting')
t = self.times
plt.figure(figsize=(12, 10))
plt.plot(t, self.vals, '-')
plt.xlabel('Time (s)')
plt.ylabel('ADC signal')
plt.title('ADC Signal Trace')
plt.grid(color='0.7')
if fname:
plt.savefig(fname)
def save(self, fname='adc.txt'):
"""
Save results to file
"""
print('Saving')
with open(fname, 'w') as f:
f.writelines(['{0:04d}\n'.format(vi) for vi in self.vals])
def load(self, fname='adc.txt'):
print('Loading')
with open(fname) as f:
vals = f.readlines()
self.vals = [float(vi) for vi in vals]
if __name__ == '__main__':
adc = ADC()
receiver = Receiver()
with adc:
vals = receiver.receive(adc)
receiver.plot()
receiver.save()
| mit |
ageron/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/data_feeder_test.py | 25 | 13554 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `DataFeeder`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class DataFeederTest(test.TestCase):
# pylint: disable=undefined-variable
"""Tests for `DataFeeder`."""
def setUp(self):
self._base_dir = os.path.join(self.get_temp_dir(), 'base_dir')
file_io.create_dir(self._base_dir)
def tearDown(self):
file_io.delete_recursively(self._base_dir)
def _wrap_dict(self, data, prepend=''):
return {prepend + '1': data, prepend + '2': data}
def _assert_raises(self, input_data):
with self.assertRaisesRegexp(TypeError, 'annot convert'):
data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
def _assert_dtype(self, expected_np_dtype, expected_tf_dtype, input_data):
feeder = data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
if isinstance(input_data, dict):
for v in list(feeder.input_dtype.values()):
self.assertEqual(expected_np_dtype, v)
else:
self.assertEqual(expected_np_dtype, feeder.input_dtype)
with ops.Graph().as_default() as g, self.session(g):
inp, _ = feeder.input_builder()
if isinstance(inp, dict):
for v in list(inp.values()):
self.assertEqual(expected_tf_dtype, v.dtype)
else:
self.assertEqual(expected_tf_dtype, inp.dtype)
def test_input_int8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int8)
self._assert_dtype(np.int8, dtypes.int8, data)
self._assert_dtype(np.int8, dtypes.int8, self._wrap_dict(data))
def test_input_int16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int16)
self._assert_dtype(np.int16, dtypes.int16, data)
self._assert_dtype(np.int16, dtypes.int16, self._wrap_dict(data))
def test_input_int32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int32)
self._assert_dtype(np.int32, dtypes.int32, data)
self._assert_dtype(np.int32, dtypes.int32, self._wrap_dict(data))
def test_input_int64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int64)
self._assert_dtype(np.int64, dtypes.int64, data)
self._assert_dtype(np.int64, dtypes.int64, self._wrap_dict(data))
def test_input_uint32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint32)
self._assert_dtype(np.uint32, dtypes.uint32, data)
self._assert_dtype(np.uint32, dtypes.uint32, self._wrap_dict(data))
def test_input_uint64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint64)
self._assert_dtype(np.uint64, dtypes.uint64, data)
self._assert_dtype(np.uint64, dtypes.uint64, self._wrap_dict(data))
def test_input_uint8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint8)
self._assert_dtype(np.uint8, dtypes.uint8, data)
self._assert_dtype(np.uint8, dtypes.uint8, self._wrap_dict(data))
def test_input_uint16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint16)
self._assert_dtype(np.uint16, dtypes.uint16, data)
self._assert_dtype(np.uint16, dtypes.uint16, self._wrap_dict(data))
def test_input_float16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float16)
self._assert_dtype(np.float16, dtypes.float16, data)
self._assert_dtype(np.float16, dtypes.float16, self._wrap_dict(data))
def test_input_float32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float32)
self._assert_dtype(np.float32, dtypes.float32, data)
self._assert_dtype(np.float32, dtypes.float32, self._wrap_dict(data))
def test_input_float64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float64)
self._assert_dtype(np.float64, dtypes.float64, data)
self._assert_dtype(np.float64, dtypes.float64, self._wrap_dict(data))
def test_input_bool(self):
data = np.array([[False for _ in xrange(2)] for _ in xrange(2)])
self._assert_dtype(np.bool, dtypes.bool, data)
self._assert_dtype(np.bool, dtypes.bool, self._wrap_dict(data))
def test_input_string(self):
input_data = np.array([['str%d' % i for i in xrange(2)] for _ in xrange(2)])
self._assert_dtype(input_data.dtype, dtypes.string, input_data)
self._assert_dtype(input_data.dtype, dtypes.string,
self._wrap_dict(input_data))
def _assertAllClose(self, src, dest, src_key_of=None, src_prop=None):
def func(x):
val = getattr(x, src_prop) if src_prop else x
return val if src_key_of is None else src_key_of[val]
if isinstance(src, dict):
for k in list(src.keys()):
self.assertAllClose(func(src[k]), dest)
else:
self.assertAllClose(func(src), dest)
def test_unsupervised(self):
def func(feeder):
with self.cached_session():
inp, _ = feeder.input_builder()
feed_dict_fn = feeder.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[1, 2]], feed_dict, 'name')
data = np.matrix([[1, 2], [2, 3], [3, 4]])
func(data_feeder.DataFeeder(data, None, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data), None, n_classes=0, batch_size=1))
def test_data_feeder_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [2, 1], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
def test_epoch(self):
def func(feeder):
with self.cached_session():
feeder.input_builder()
epoch = feeder.make_epoch_variable()
feed_dict_fn = feeder.get_feed_dict_fn()
# First input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Second input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Third input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Back to the first input again, so new epoch.
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [1])
data = np.matrix([[1, 2], [2, 3], [3, 4]])
labels = np.array([0, 0, 1])
func(data_feeder.DataFeeder(data, labels, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data, 'in'),
self._wrap_dict(labels, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=1))
def test_data_feeder_multioutput_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [[3, 4], [1, 2]], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[1, 2], [3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
def test_data_feeder_multioutput_classification(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(
out, [[[0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]],
[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]], feed_dict,
'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[0, 1, 2], [2, 3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=5, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(5, 'out'),
batch_size=2))
def test_streaming_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[[1, 2]], [[3, 4]]], feed_dict, 'name')
self._assertAllClose(out, [[[1], [2]], [[2], [2]]], feed_dict, 'name')
def x_iter(wrap_dict=False):
yield np.array([[1, 2]]) if not wrap_dict else self._wrap_dict(
np.array([[1, 2]]), 'in')
yield np.array([[3, 4]]) if not wrap_dict else self._wrap_dict(
np.array([[3, 4]]), 'in')
def y_iter(wrap_dict=False):
yield np.array([[1], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[1], [2]]), 'out')
yield np.array([[2], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[2], [2]]), 'out')
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=2))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
# Test non-full batches.
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=10))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=10))
def test_dask_data_feeder(self):
if HAS_PANDAS and HAS_DASK:
x = pd.DataFrame(
dict(
a=np.array([.1, .3, .4, .6, .2, .1, .6]),
b=np.array([.7, .8, .1, .2, .5, .3, .9])))
x = dd.from_pandas(x, npartitions=2)
y = pd.DataFrame(dict(labels=np.array([1, 0, 2, 1, 0, 1, 2])))
y = dd.from_pandas(y, npartitions=2)
# TODO(ipolosukhin): Remove or restore this.
# x = extract_dask_data(x)
# y = extract_dask_labels(y)
df = data_feeder.DaskDataFeeder(x, y, n_classes=2, batch_size=2)
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[inp.name], [[0.40000001, 0.1],
[0.60000002, 0.2]])
self.assertAllClose(feed_dict[out.name], [[0., 0., 1.], [0., 1., 0.]])
# TODO(rohanj): Fix this test by fixing data_feeder. Currently, h5py doesn't
# support permutation based indexing lookups (More documentation at
# http://docs.h5py.org/en/latest/high/dataset.html#fancy-indexing)
def DISABLED_test_hdf5_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self.assertAllClose(out, [2, 1], feed_dict, 'name')
try:
import h5py # pylint: disable=g-import-not-at-top
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
file_path = os.path.join(self._base_dir, 'test_hdf5.h5')
h5f = h5py.File(file_path, 'w')
h5f.create_dataset('x', data=x)
h5f.create_dataset('y', data=y)
h5f.close()
h5f = h5py.File(file_path, 'r')
x = h5f['x']
y = h5f['y']
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
except ImportError:
print("Skipped test for hdf5 since it's not installed.")
class SetupPredictDataFeederTest(DataFeederTest):
"""Tests for `DataFeeder.setup_predict_data_feeder`."""
def test_iterable_data(self):
# pylint: disable=undefined-variable
def func(df):
self._assertAllClose(six.next(df), [[1, 2], [3, 4]])
self._assertAllClose(six.next(df), [[5, 6]])
data = [[1, 2], [3, 4], [5, 6]]
x = iter(data)
x_dict = iter([self._wrap_dict(v) for v in iter(data)])
func(data_feeder.setup_predict_data_feeder(x, batch_size=2))
func(data_feeder.setup_predict_data_feeder(x_dict, batch_size=2))
if __name__ == '__main__':
test.main()
| apache-2.0 |
raghavrv/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 33 | 20167 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hierarchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hierarchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering_wrong_arg_memory():
# Test either if an error is raised when memory is not
# either a str or a joblib.Memory instance
rng = np.random.RandomState(0)
n_samples = 100
X = rng.randn(n_samples, 50)
memory = 5
clustering = AgglomerativeClustering(memory=memory)
assert_raises(ValueError, clustering.fit, X)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
| bsd-3-clause |
oreilly-japan/deep-learning-from-scratch | ch07/apply_filter.py | 4 | 1634 | # coding: utf-8
import sys, os
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
import matplotlib.pyplot as plt
from simple_convnet import SimpleConvNet
from matplotlib.image import imread
from common.layers import Convolution
def filter_show(filters, nx=4, show_num=16):
"""
c.f. https://gist.github.com/aidiary/07d530d5e08011832b12#file-draw_weight-py
"""
FN, C, FH, FW = filters.shape
ny = int(np.ceil(show_num / nx))
fig = plt.figure()
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
for i in range(show_num):
ax = fig.add_subplot(4, 4, i+1, xticks=[], yticks=[])
ax.imshow(filters[i, 0], cmap=plt.cm.gray_r, interpolation='nearest')
network = SimpleConvNet(input_dim=(1,28,28),
conv_param = {'filter_num':30, 'filter_size':5, 'pad':0, 'stride':1},
hidden_size=100, output_size=10, weight_init_std=0.01)
# 学習後の重み
network.load_params("params.pkl")
filter_show(network.params['W1'], 16)
img = imread('../dataset/lena_gray.png')
img = img.reshape(1, 1, *img.shape)
fig = plt.figure()
w_idx = 1
for i in range(16):
w = network.params['W1'][i]
b = 0 # network.params['b1'][i]
w = w.reshape(1, *w.shape)
#b = b.reshape(1, *b.shape)
conv_layer = Convolution(w, b)
out = conv_layer.forward(img)
out = out.reshape(out.shape[2], out.shape[3])
ax = fig.add_subplot(4, 4, i+1, xticks=[], yticks=[])
ax.imshow(out, cmap=plt.cm.gray_r, interpolation='nearest')
plt.show() | mit |
jrleja/bsfh | prospect/io/read_results.py | 3 | 20576 | import sys, os
from copy import deepcopy
import warnings
import pickle, json
import numpy as np
try:
import h5py
except:
pass
try:
from sedpy.observate import load_filters
except:
pass
"""Convenience functions for reading and reconstructing results from a fitting
run, including reconstruction of the model for making posterior samples
"""
__all__ = ["results_from", "emcee_restarter",
"get_sps", "get_model",
"traceplot", "subcorner",
"compare_paramfile"]
def unpick(pickled):
"""create a serialized object that can go into hdf5 in py2 and py3, and can be read by both
"""
try:
obj = pickle.loads(pickled, encoding='bytes')
except(TypeError):
obj = pickle.loads(pickled)
return obj
def results_from(filename, model_file=None, dangerous=True, **kwargs):
"""Read a results file with stored model and MCMC chains.
:param filename:
Name and path to the file holding the results. If ``filename`` ends in
"h5" then it is assumed that this is an HDF5 file, otherwise it is
assumed to be a pickle.
:param dangerous: (default, True)
If True, use the stored paramfile text to import the parameter file and
reconstitute the model object. This executes code in the stored
paramfile text during import, and is therefore dangerous.
:returns results:
A dictionary of various results including:
+ `"chain"` - Samples from the posterior probability (ndarray).
+ `"lnprobability"` - The posterior probability of each sample.
+ `"weights"` - The weight of each sample, if `dynesty` was used.
+ `"theta_labels"` - List of strings describing free parameters.
+ `"bestfit"` - The prediction of the data for the posterior sample with
the highest `"lnprobability"`, as a dictionary.
+ `"run_params"` - A dictionary of arguments supplied to prospector at
the time of the fit.
+ `"paramfile_text"` - Text of the file used to run prospector, string
:returns obs:
The obs dictionary
:returns model:
The models.SedModel() object, if it could be regenerated from the stored
`"paramfile_text"`. Otherwise, `None`.
"""
# Read the basic chain, parameter, and run_params info
if filename.split('.')[-1] == 'h5':
res = read_hdf5(filename, **kwargs)
if "_mcmc.h5" in filename:
mf_default = filename.replace('_mcmc.h5', '_model')
else:
mf_default = "x"
else:
with open(filename, 'rb') as rf:
res = pickle.load(rf)
mf_default = filename.replace('_mcmc', '_model')
# Now try to read the model object itself from a pickle
if model_file is None:
mname = mf_default
else:
mname = model_file
param_file = (res['run_params'].get('param_file', ''),
res.get("paramfile_text", ''))
model, powell_results = read_model(mname, param_file=param_file,
dangerous=dangerous, **kwargs)
if dangerous:
try:
model = get_model(res)
except:
model = None
res['model'] = model
if powell_results is not None:
res["powell_results"] = powell_results
return res, res["obs"], model
def emcee_restarter(restart_from="", niter=32, **kwargs):
"""Get the obs, model, and sps objects from a previous run, as well as the
run_params and initial positions (which are determined from the end of the
last run, and inserted into the run_params dictionary)
:param restart_from:
Name of the file to restart the sampling from. An error is raised if
this does not include an emcee style chain of shape (nwalker, niter,
ndim)
:param niter: (default: 32)
Number of additional iterations to do (added toi run_params)
:returns obs:
The `obs` dictionary used in the last run.
:returns model:
The model object used in the last run.
:returns sps:
The `sps` object used in the last run.
:returns noise:
A tuple of (None, None), since it is assumed the noise model in the
last run was trivial.
:returns run_params:
A dictionary of parameters controlling the operation. This is the same
as used in the last run, but with the "niter" key changed, and a new
"initial_positions" key that gives the ending positions of the emcee
walkers from the last run. The filename from which the run is
restarted is also stored in the "restart_from" key.
"""
result, obs, model = results_from(restart_from)
noise = (None, None)
# check for emcee style outputs
is_emcee = (len(result["chain"].shape) == 3) & (result["chain"].shape[0] > 1)
msg = "Result file {} does not have a chain of the proper shape."
assert is_emcee, msg.format(restart_from)
sps = get_sps(result)
run_params = deepcopy(result["run_params"])
run_params["niter"] = niter
run_params["restart_from"] = restart_from
initial_positions = result["chain"][:, -1, :]
run_params["initial_positions"] = initial_positions
return obs, model, sps, noise, run_params
def read_model(model_file, param_file=('', ''), dangerous=False, **extras):
"""Read the model pickle. This can be difficult if there are user defined
functions that have to be loaded dynamically. In that case, import the
string version of the paramfile and *then* try to unpickle the model
object.
:param model_file:
String, name and path to the model pickle.
:param dangerous: (default: False)
If True, try to import the given paramfile.
:param param_file:
2-element tuple. The first element is the name of the paramfile, which
will be used to set the name of the imported module. The second
element is the param_file contents as a string. The code in this
string will be imported.
"""
model = powell_results = None
if os.path.exists(model_file):
try:
with open(model_file, 'rb') as mf:
mod = pickle.load(mf)
except(AttributeError):
# Here one can deal with module and class names that changed
with open(model_file, 'rb') as mf:
mod = load(mf)
except(ImportError, KeyError):
# here we load the parameter file as a module using the stored
# source string. Obviously this is dangerous as it will execute
# whatever is in the stored source string. But it can be used to
# recover functions (especially dependcy functions) that are user
# defined
path, filename = os.path.split(param_file[0])
modname = filename.replace('.py', '')
if dangerous:
user_module = import_module_from_string(param_file[1], modname)
with open(model_file, 'rb') as mf:
mod = pickle.load(mf)
model = mod['model']
for k, v in list(model.theta_index.items()):
if type(v) is tuple:
model.theta_index[k] = slice(*v)
powell_results = mod['powell']
return model, powell_results
def read_hdf5(filename, **extras):
"""Read an HDF5 file (with a specific format) into a dictionary of results.
This HDF5 file is assumed to have the groups ``sampling`` and ``obs`` which
respectively contain the sampling chain and the observational data used in
the inference.
All attributes of these groups as well as top-level attributes are loaded
into the top-level of the dictionary using ``json.loads``, and therefore
must have been written with ``json.dumps``. This should probably use
JSONDecoders, but who has time to learn that.
:param filename:
Name of the HDF5 file.
"""
groups = {"sampling": {}, "obs": {},
"bestfit": {}, "optimization": {}}
res = {}
with h5py.File(filename, "r") as hf:
# loop over the groups
for group, d in groups.items():
# check the group exists
if group not in hf:
continue
# read the arrays in that group into the dictionary for that group
for k, v in hf[group].items():
d[k] = np.array(v)
# unserialize the attributes and put them in the dictionary
for k, v in hf[group].attrs.items():
try:
d[k] = json.loads(v)
except:
try:
d[k] = unpick(v)
except:
d[k] = v
# do top-level attributes.
for k, v in hf.attrs.items():
try:
res[k] = json.loads(v)
except:
try:
res[k] = unpick(v)
except:
res[k] = v
res.update(groups['sampling'])
res["bestfit"] = groups["bestfit"]
res["optimization"] = groups["optimization"]
res['obs'] = groups['obs']
try:
res['obs']['filters'] = load_filters([str(f) for f in res['obs']['filters']])
except:
pass
try:
res['rstate'] = unpick(res['rstate'])
except:
pass
#try:
# mp = [names_to_functions(p.copy()) for p in res['model_params']]
# res['model_params'] = mp
#except:
# pass
return res
def read_pickles(filename, **kwargs):
"""Alias for backwards compatability. Calls `results_from()`.
"""
return results_from(filename, **kwargs)
def get_sps(res):
"""This gets exactly the SPS object used in the fiting (modulo any
changes to FSPS itself).
It (scarily) imports the paramfile (stored as text in the results
dictionary) as a module and then uses the `load_sps` method defined in the
paramfile module.
:param res:
A results dictionary (the output of `results_from()`)
:returns sps:
An sps object (i.e. from prospect.sources)
"""
import os
param_file = (res['run_params'].get('param_file', ''),
res.get("paramfile_text", ''))
path, filename = os.path.split(param_file[0])
modname = filename.replace('.py', '')
user_module = import_module_from_string(param_file[1], modname)
try:
sps = user_module.load_sps(**res['run_params'])
except(AttributeError):
sps = user_module.build_sps(**res['run_params'])
# Now check that the SSP libraries are consistent
flib = res['run_params'].get('sps_libraries', None)
try:
rlib = sps.ssp.libraries
except(AttributeError):
rlib = None
if (flib is None) or (rlib is None):
warnings.warn("Could not check SSP library versions.")
else:
liberr = ("The FSPS libraries used in fitting({}) are not the "
"same as the FSPS libraries that you are using now ({})".format(flib, rlib))
# If fitting and reading in are happening in different python versions,
# ensure string comparison doesn't throw error:
if type(flib[0]) == 'bytes':
flib = [i.decode() for i in flib]
if type(rlib[0]) == 'bytes':
rlib = [i.decode() for i in rlib]
assert (flib[0] == rlib[0]) and (flib[1] == rlib[1]), liberr
return sps
def get_model(res):
"""This gets exactly the model object used in the fiting.
It (scarily) imports the paramfile (stored as text in the results
dictionary) as a module and then uses the `load_model` method defined in the
paramfile module, with `run_params` dictionary passed to it.
:param res:
A results dictionary (the output of `results_from()`)
:returns model:
A prospect.models.SedModel object
"""
import os
param_file = (res['run_params'].get('param_file', ''),
res.get("paramfile_text", ''))
path, filename = os.path.split(param_file[0])
modname = filename.replace('.py', '')
user_module = import_module_from_string(param_file[1], modname)
try:
model = user_module.load_model(**res['run_params'])
except(AttributeError):
model = user_module.build_model(**res['run_params'])
return model
def import_module_from_string(source, name, add_to_sys_modules=True):
"""Well this seems dangerous.
"""
import imp
user_module = imp.new_module(name)
exec(source, user_module.__dict__)
if add_to_sys_modules:
sys.modules[name] = user_module
return user_module
def traceplot(results, showpars=None, start=0, chains=slice(None),
figsize=None, truths=None, **plot_kwargs):
"""Plot the evolution of each parameter value with iteration #, for each
walker in the chain.
:param results:
A Prospector results dictionary, usually the output of
``results_from('resultfile')``.
:param showpars: (optional)
A list of strings of the parameters to show. Defaults to all
parameters in the ``"theta_labels"`` key of the ``sample_results``
dictionary.
:param chains:
If results are from an ensemble sampler, setting `chain` to an integer
array of walker indices will cause only those walkers to be used in
generating the plot. Useful for to keep the plot from getting too cluttered.
:param start: (optional, default: 0)
Integer giving the iteration number from which to start plotting.
:param **plot_kwargs:
Extra keywords are passed to the
``matplotlib.axes._subplots.AxesSubplot.plot()`` method.
:returns tracefig:
A multipaneled Figure object that shows the evolution of walker
positions in the parameters given by ``showpars``, as well as
ln(posterior probability)
"""
import matplotlib.pyplot as pl
# Get parameter names
try:
parnames = np.array(results['theta_labels'])
except(KeyError):
parnames = np.array(results['model'].theta_labels())
# Restrict to desired parameters
if showpars is not None:
ind_show = np.array([p in showpars for p in parnames], dtype=bool)
parnames = parnames[ind_show]
else:
ind_show = slice(None)
# Get the arrays we need (trace, lnp, wghts)
trace = results['chain'][..., ind_show]
if trace.ndim == 2:
trace = trace[None, :]
trace = trace[chains, start:, :]
lnp = np.atleast_2d(results['lnprobability'])[chains, start:]
wghts = results.get('weights', None)
if wghts is not None:
wghts = wghts[start:]
nwalk = trace.shape[0]
# Set up plot windows
ndim = len(parnames) + 1
nx = int(np.floor(np.sqrt(ndim)))
ny = int(np.ceil(ndim * 1.0 / nx))
sz = np.array([nx, ny])
factor = 3.0 # size of one side of one panel
lbdim = 0.2 * factor # size of left/bottom margin
trdim = 0.2 * factor # size of top/right margin
whspace = 0.05 * factor # w/hspace size
plotdim = factor * sz + factor * (sz - 1) * whspace
dim = lbdim + plotdim + trdim
if figsize is None:
fig, axes = pl.subplots(nx, ny, figsize=(dim[1], dim[0]), sharex=True)
else:
fig, axes = pl.subplots(nx, ny, figsize=figsize, sharex=True)
axes = np.atleast_2d(axes)
#lb = lbdim / dim
#tr = (lbdim + plotdim) / dim
#fig.subplots_adjust(left=lb[1], bottom=lb[0], right=tr[1], top=tr[0],
# wspace=whspace, hspace=whspace)
# Sequentially plot the chains in each parameter
for i in range(ndim - 1):
ax = axes.flat[i]
for j in range(nwalk):
ax.plot(trace[j, :, i], **plot_kwargs)
ax.set_title(parnames[i], y=1.02)
# Plot lnprob
ax = axes.flat[-1]
for j in range(nwalk):
ax.plot(lnp[j, :], **plot_kwargs)
ax.set_title('lnP', y=1.02)
[ax.set_xlabel("iteration") for ax in axes[-1, :]]
#[ax.set_xticklabels('') for ax in axes[:-1, :].flat]
if truths is not None:
for i, t in enumerate(truths[ind_show]):
axes.flat[i].axhline(t, color='k', linestyle=':')
pl.tight_layout()
return fig
def param_evol(results, **kwargs):
"""Backwards compatability
"""
return traceplot(results, **kwargs)
def subcorner(results, showpars=None, truths=None,
start=0, thin=1, chains=slice(None),
logify=["mass", "tau"], **kwargs):
"""Make a triangle plot of the (thinned, latter) samples of the posterior
parameter space. Optionally make the plot only for a supplied subset of
the parameters.
:param showpars: (optional)
List of string names of parameters to include in the corner plot.
:param truths: (optional)
List of truth values for the chosen parameters.
:param start: (optional, default: 0)
The iteration number to start with when drawing samples to plot.
:param thin: (optional, default: 1)
The thinning of each chain to perform when drawing samples to plot.
:param chains: (optional)
If results are from an ensemble sampler, setting `chain` to an integer
array of walker indices will cause only those walkers to be used in
generating the plot. Useful for emoving stuck walkers.
:param kwargs:
Remaining keywords are passed to the ``corner`` plotting package.
:param logify:
A list of parameter names to plot in `log10(parameter)` instead of
`parameter`
"""
try:
import corner as triangle
except(ImportError):
import triangle
except:
raise ImportError("Please install the `corner` package.")
# pull out the parameter names and flatten the thinned chains
# Get parameter names
try:
parnames = np.array(results['theta_labels'], dtype='U20')
except(KeyError):
parnames = np.array(results['model'].theta_labels())
# Restrict to desired parameters
if showpars is not None:
ind_show = np.array([parnames.tolist().index(p) for p in showpars])
parnames = parnames[ind_show]
else:
ind_show = slice(None)
# Get the arrays we need (trace, wghts)
trace = results['chain'][..., ind_show]
if trace.ndim == 2:
trace = trace[None, :]
trace = trace[chains, start::thin, :]
wghts = results.get('weights', None)
if wghts is not None:
wghts = wghts[start::thin]
samples = trace.reshape(trace.shape[0] * trace.shape[1], trace.shape[2])
# logify some parameters
xx = samples.copy()
if truths is not None:
xx_truth = np.array(truths).copy()
else:
xx_truth = None
for p in logify:
if p in parnames:
idx = parnames.tolist().index(p)
xx[:, idx] = np.log10(xx[:, idx])
parnames[idx] = "log({})".format(parnames[idx])
if truths is not None:
xx_truth[idx] = np.log10(xx_truth[idx])
# mess with corner defaults
corner_kwargs = {"plot_datapoints": False, "plot_density": False,
"fill_contours": True, "show_titles": True}
corner_kwargs.update(kwargs)
fig = triangle.corner(xx, labels=parnames, truths=xx_truth,
quantiles=[0.16, 0.5, 0.84], weights=wghts, **corner_kwargs)
return fig
def subtriangle(results, **kwargs):
"""Backwards compatability
"""
return subcorner(results, **kwargs)
def compare_paramfile(res, filename):
"""Compare the runtime parameter file text stored in the `res` dictionary
to the text of some existing file with fully qualified path `filename`.
"""
from pprint import pprint
from difflib import unified_diff
a = res["paramfile_text"]
aa = a.split('\n')
with open(filename, "r") as f:
b = json.dumps(f.read())
bbl = json.loads(b)
bb = bbl.split('\n')
pprint([l for l in unified_diff(aa, bb)])
def names_to_functions(p):
"""Replace names of functions (or pickles of objects) in a parameter
description with the actual functions (or pickles).
"""
from importlib import import_module
for k, v in list(p.items()):
try:
m = import_module(v[1])
f = m.__dict__[v[0]]
except:
try:
f = pickle.loads(v)
except:
f = v
p[k] = f
return p
| mit |
bataeves/kaggle | instacart/imba/arboretum_cv.py | 2 | 18971 | import gc
from concurrent.futures import ThreadPoolExecutor
import pandas as pd
import numpy as np
import os
import arboretum
import json
import sklearn.metrics
from sklearn.metrics import f1_score, roc_auc_score
from sklearn.model_selection import train_test_split
from scipy.sparse import dok_matrix, coo_matrix
from sklearn.utils.multiclass import type_of_target
if __name__ == '__main__':
path = "data"
aisles = pd.read_csv(os.path.join(path, "aisles.csv"), dtype={'aisle_id': np.uint8, 'aisle': 'category'})
departments = pd.read_csv(os.path.join(path, "departments.csv"),
dtype={'department_id': np.uint8, 'department': 'category'})
order_prior = pd.read_csv(os.path.join(path, "order_products__prior.csv"), dtype={'order_id': np.uint32,
'product_id': np.uint16,
'add_to_cart_order': np.uint8,
'reordered': bool})
order_train = pd.read_csv(os.path.join(path, "order_products__train.csv"), dtype={'order_id': np.uint32,
'product_id': np.uint16,
'add_to_cart_order': np.uint8,
'reordered': bool})
orders = pd.read_csv(os.path.join(path, "orders.csv"), dtype={'order_id': np.uint32,
'user_id': np.uint32,
'eval_set': 'category',
'order_number': np.uint8,
'order_dow': np.uint8,
'order_hour_of_day': np.uint8
})
product_embeddings = pd.read_pickle('data/product_embeddings.pkl')
embedings = list(range(32))
product_embeddings = product_embeddings[embedings + ['product_id']]
order_prev = pd.merge(order_train, orders, on='order_id')
order_prev.order_number -= 1
order_prev = pd.merge(order_prev[
['user_id', 'order_number', 'product_id', 'reordered', 'add_to_cart_order', 'order_dow',
'order_hour_of_day']], orders[['user_id', 'order_number', 'order_id']],
on=['user_id', 'order_number'])
order_prev.drop(['order_number', 'user_id'], axis=1, inplace=True)
order_prev.rename(columns={
'reordered': 'reordered_prev',
'add_to_cart_order': 'add_to_cart_order_prev',
'order_dow': 'order_dow_prev',
'order_hour_of_day': 'order_hour_of_day_prev'
}, inplace=True)
products = pd.read_csv(os.path.join(path, "products.csv"), dtype={'product_id': np.uint16,
'aisle_id': np.uint8,
'department_id': np.uint8})
order_train = pd.read_pickle(os.path.join(path, 'chunk_0.pkl'))
order_train = order_train.loc[order_train.eval_set == "train", ['order_id', 'product_id', 'reordered']]
product_periods = pd.read_pickle(os.path.join(path, 'product_periods_stat.pkl')).fillna(9999)
# product_periods.prev1 = product_periods['last'] / product_periods.prev1
# product_periods.prev2 = product_periods['last'] / product_periods.prev2
# product_periods['mean'] = product_periods['last'] / product_periods['mean']
# product_periods['median'] = product_periods['last'] / product_periods['median']
print(order_train.columns)
###########################
weights = order_train.groupby('order_id')['reordered'].sum().to_frame('weights')
weights.reset_index(inplace=True)
prob = pd.merge(order_prior, orders, on='order_id')
print(prob.columns)
prob = prob.groupby(['product_id', 'user_id'])\
.agg({'reordered':'sum', 'user_id': 'size'})
print(prob.columns)
prob.rename(columns={'sum': 'reordered',
'user_id': 'total'}, inplace=True)
prob.reordered = (prob.reordered > 0).astype(np.float32)
prob.total = (prob.total > 0).astype(np.float32)
prob['reorder_prob'] = prob.reordered / prob.total
prob = prob.groupby('product_id').agg({'reorder_prob': 'mean'}).rename(columns={'mean': 'reorder_prob'})\
.reset_index()
prod_stat = order_prior.groupby('product_id').agg({'reordered': ['sum', 'size'],
'add_to_cart_order':'mean'})
prod_stat.columns = prod_stat.columns.levels[1]
prod_stat.rename(columns={'sum':'prod_reorders',
'size':'prod_orders',
'mean': 'prod_add_to_card_mean'}, inplace=True)
prod_stat.reset_index(inplace=True)
prod_stat['reorder_ration'] = prod_stat['prod_reorders'] / prod_stat['prod_orders']
prod_stat = pd.merge(prod_stat, prob, on='product_id')
# prod_stat.drop(['prod_reorders'], axis=1, inplace=True)
user_stat = orders.loc[orders.eval_set == 'prior', :].groupby('user_id').agg({'order_number': 'max',
'days_since_prior_order': ['sum',
'mean',
'median']})
user_stat.columns = user_stat.columns.droplevel(0)
user_stat.rename(columns={'max': 'user_orders',
'sum': 'user_order_starts_at',
'mean': 'user_mean_days_since_prior',
'median': 'user_median_days_since_prior'}, inplace=True)
user_stat.reset_index(inplace=True)
orders_products = pd.merge(orders, order_prior, on="order_id")
user_order_stat = orders_products.groupby('user_id').agg({'user_id': 'size',
'reordered': 'sum',
"product_id": lambda x: x.nunique()})
user_order_stat.rename(columns={'user_id': 'user_total_products',
'product_id': 'user_distinct_products',
'reordered': 'user_reorder_ratio'}, inplace=True)
user_order_stat.reset_index(inplace=True)
user_order_stat.user_reorder_ratio = user_order_stat.user_reorder_ratio / user_order_stat.user_total_products
user_stat = pd.merge(user_stat, user_order_stat, on='user_id')
user_stat['user_average_basket'] = user_stat.user_total_products / user_stat.user_orders
########################### products
prod_usr = orders_products.groupby(['product_id']).agg({'user_id': lambda x: x.nunique()})
prod_usr.rename(columns={'user_id':'prod_users_unq'}, inplace=True)
prod_usr.reset_index(inplace=True)
prod_usr_reordered = orders_products.loc[orders_products.reordered, :].groupby(['product_id']).agg({'user_id': lambda x: x.nunique()})
prod_usr_reordered.rename(columns={'user_id': 'prod_users_unq_reordered'}, inplace=True)
prod_usr_reordered.reset_index(inplace=True)
order_stat = orders_products.groupby('order_id').agg({'order_id': 'size'})\
.rename(columns = {'order_id': 'order_size'}).reset_index()
orders_products = pd.merge(orders_products, order_stat, on='order_id')
orders_products['add_to_cart_order_inverted'] = orders_products.order_size - orders_products.add_to_cart_order
orders_products['add_to_cart_order_relative'] = orders_products.add_to_cart_order / orders_products.order_size
data_dow = orders_products.groupby(['user_id', 'product_id', 'order_dow']).agg({
'reordered': ['sum', 'size']})
data_dow.columns = data_dow.columns.droplevel(0)
data_dow.columns = ['reordered_dow', 'reordered_dow_size']
data_dow['reordered_dow_ration'] = data_dow.reordered_dow / data_dow.reordered_dow_size
data_dow.reset_index(inplace=True)
data = orders_products.groupby(['user_id', 'product_id']).agg({'user_id': 'size',
'order_number': ['min', 'max'],
'add_to_cart_order': ['mean', 'median'],
'days_since_prior_order': ['mean', 'median'],
'order_dow': ['mean', 'median'],
'order_hour_of_day': ['mean', 'median'],
'add_to_cart_order_inverted': ['mean', 'median'],
'add_to_cart_order_relative': ['mean', 'median'],
'reordered':['sum']})
data.columns = data.columns.droplevel(0)
data.columns = ['up_orders', 'up_first_order', 'up_last_order', 'up_mean_cart_position', 'up_median_cart_position',
'days_since_prior_order_mean', 'days_since_prior_order_median', 'order_dow_mean', 'order_dow_median',
'order_hour_of_day_mean', 'order_hour_of_day_median',
'add_to_cart_order_inverted_mean', 'add_to_cart_order_inverted_median',
'add_to_cart_order_relative_mean', 'add_to_cart_order_relative_median',
'reordered_sum'
]
data['user_product_reordered_ratio'] = (data.reordered_sum + 1.0) / data.up_orders
# data['first_order'] = data['up_orders'] > 0
# data['second_order'] = data['up_orders'] > 1
#
# data.groupby('product_id')['']
data.reset_index(inplace=True)
data = pd.merge(data, prod_stat, on='product_id')
data = pd.merge(data, user_stat, on='user_id')
data['up_order_rate'] = data.up_orders / data.user_orders
data['up_orders_since_last_order'] = data.user_orders - data.up_last_order
data['up_order_rate_since_first_order'] = data.user_orders / (data.user_orders - data.up_first_order + 1)
############################
user_dep_stat = pd.read_pickle('data/user_department_products.pkl')
user_aisle_stat = pd.read_pickle('data/user_aisle_products.pkl')
order_train = pd.merge(order_train, products, on='product_id')
order_train = pd.merge(order_train, orders, on='order_id')
order_train = pd.merge(order_train, user_dep_stat, on=['user_id', 'department_id'])
order_train = pd.merge(order_train, user_aisle_stat, on=['user_id', 'aisle_id'])
order_train = pd.merge(order_train, prod_usr, on='product_id')
order_train = pd.merge(order_train, prod_usr_reordered, on='product_id', how='left')
order_train.prod_users_unq_reordered.fillna(0, inplace=True)
order_train = pd.merge(order_train, data, on=['product_id', 'user_id'])
order_train = pd.merge(order_train, data_dow, on=['product_id', 'user_id', 'order_dow'], how='left')
order_train['aisle_reordered_ratio'] = order_train.aisle_reordered / order_train.user_orders
order_train['dep_reordered_ratio'] = order_train.dep_reordered / order_train.user_orders
order_train = pd.merge(order_train, product_periods, on=['user_id', 'product_id'])
order_train = pd.merge(order_train, product_embeddings, on=['product_id'])
# order_train = pd.merge(order_train, weights, on='order_id')
# order_train = pd.merge(order_train, order_prev, on=['order_id', 'product_id'], how='left')
# order_train.reordered_prev = order_train.reordered_prev.astype(np.float32) + 1.
# order_train['reordered_prev'].fillna(0, inplace=True)
# order_train[['add_to_cart_order_prev', 'order_dow_prev', 'order_hour_of_day_prev']].fillna(255, inplace=True)
print('data is joined')
# order_train.days_since_prior_order_mean -= order_train.days_since_prior_order
# order_train.days_since_prior_order_median -= order_train.days_since_prior_order
#
# order_train.order_dow_mean -= order_train.order_dow
# order_train.order_dow_median -= order_train.order_dow
#
# order_train.order_hour_of_day_mean -= order_train.order_hour_of_day
# order_train.order_hour_of_day_median -= order_train.order_hour_of_day
unique_orders = np.unique(order_train.order_id)
orders_train, orders_test = train_test_split(unique_orders, test_size=0.25, random_state=2017)
order_test = order_train.loc[np.in1d(order_train.order_id, orders_test)]
order_train = order_train.loc[np.in1d(order_train.order_id, orders_train)]
features = [
# 'reordered_dow_ration', 'reordered_dow', 'reordered_dow_size',
# 'reordered_prev', 'add_to_cart_order_prev', 'order_dow_prev', 'order_hour_of_day_prev',
'user_product_reordered_ratio', 'reordered_sum',
'add_to_cart_order_inverted_mean', 'add_to_cart_order_relative_mean',
'reorder_prob',
'last', 'prev1', 'prev2', 'median', 'mean',
'dep_reordered_ratio', 'aisle_reordered_ratio',
'aisle_products',
'aisle_reordered',
'dep_products',
'dep_reordered',
'prod_users_unq', 'prod_users_unq_reordered',
'order_number', 'prod_add_to_card_mean',
'days_since_prior_order',
'order_dow', 'order_hour_of_day',
'reorder_ration',
'user_orders', 'user_order_starts_at', 'user_mean_days_since_prior',
# 'user_median_days_since_prior',
'user_average_basket', 'user_distinct_products', 'user_reorder_ratio', 'user_total_products',
'prod_orders', 'prod_reorders',
'up_order_rate', 'up_orders_since_last_order', 'up_order_rate_since_first_order',
'up_orders', 'up_first_order', 'up_last_order', 'up_mean_cart_position',
# 'up_median_cart_position',
'days_since_prior_order_mean',
# 'days_since_prior_order_median',
'order_dow_mean',
# 'order_dow_median',
# 'order_hour_of_day_mean',
# 'order_hour_of_day_median'
]
features.extend(embedings)
print('not included', set(order_train.columns.tolist()) - set(features))
data = order_train[features].fillna(-1.).values.astype(np.float32)
data_categoties = order_train[['product_id', 'aisle_id', 'department_id']].values.astype(np.uint32)
labels = order_train[['reordered']].values.astype(np.float32).flatten()
# weights = order_train.weights.values.astype(np.float32)
# weights = 1./np.maximum(weights, 1.0)
data_val = order_test[features].fillna(-1.).values.astype(np.float32)
data_categoties_val = order_test[['product_id', 'aisle_id', 'department_id']].values.astype(np.uint32)
labels_val = order_test[['reordered']].values.astype(np.float32).flatten()
print(data.shape, data_categoties.shape, labels.shape)
# assert data.shape[0] == 8474661
config = json.dumps({'objective': 1,
'internals':
{
'compute_overlap': 3,
'double_precision': True,
'seed': 2017
},
'verbose':
{
'gpu': True,
'booster': True,
'data': True
},
'tree':
{
'eta': 0.01,
'max_depth': 10,
'gamma': 0.0,
'min_child_weight':20.0,
'min_leaf_size': 0,
'colsample_bytree': 0.6,
'colsample_bylevel': 0.6,
'lambda': 0.1,
'gamma_relative': 0.0001
}})
print(config)
data = arboretum.DMatrix(data, data_category=data_categoties, y=labels)
data_val = arboretum.DMatrix(data_val, data_category=data_categoties_val)
model = arboretum.Garden(config, data)
print('training...')
best_logloss = 1.0
best_rocauc = 0
best_iter_logloss = best_iter_rocauc = -1
with ThreadPoolExecutor(max_workers=4) as executor:
# grow trees
for i in range(20000):
print('tree', i)
model.grow_tree()
model.append_last_tree(data_val)
if i % 5 == 0:
pred = model.get_y(data)
pred_val = model.get_y(data_val)
logloss = executor.submit(sklearn.metrics.log_loss, labels, pred, eps=1e-6)
logloss_val = executor.submit(sklearn.metrics.log_loss, labels_val, pred_val, eps=1e-6)
rocauc = executor.submit(roc_auc_score, labels, pred)
rocauc_val = executor.submit(roc_auc_score, labels_val, pred_val)
# fscore_train = fscore(true_value_matrix, pred, order_index, product_index, len(orders_unique), len(products_unique), threshold=[0.16, 0.17, 0.18, 0.19, 0.2, 0.21])
# fscore_value = fscore(true_value_matrix_val, pred_val, order_index_val, product_index_val, len(orders_unique_val), len(products_unique_val), threshold=[0.16, 0.17, 0.18, 0.19, 0.2, 0.21])
logloss = logloss.result()
logloss_val = logloss_val.result()
rocauc = rocauc.result()
rocauc_val = rocauc_val.result()
print('train', logloss, rocauc,
'val', logloss_val, rocauc_val)
if rocauc_val > best_rocauc:
print('best roc auc ', rocauc_val)
best_rocauc = rocauc_val
best_iter_rocauc = i
if logloss_val < best_logloss:
print('best logloss', logloss_val)
best_logloss = logloss_val
best_iter_logloss = i
print('best roc auc iteration', best_rocauc, best_iter_rocauc)
print('best loggloss iteration', best_logloss, best_iter_logloss) | unlicense |
AOtools/soapy | doc/source/conf.py | 4 | 13074 | # -*- coding: utf-8 -*-
#
# Soapy documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 28 11:49:44 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
from mock import Mock as MagicMock
# Add soapy to path
SOAPY_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../..')
sys.path.append(SOAPY_DIR)
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['pyfftw', 'ipython','pyfits', 'PyQt5','IPython.qt.console.rich_ipython_widget',
'IPython.qt.inprocess',
'matplotlib.backends.backend_qt5agg','sip', 'pyqtgraph','pylab', 'OpenGL',
'matplotlib.figure',
'IPython.qt.console.rich_ipython_widget',
'scipy.ndimage','scipy.optimize', 'scipy.lib.blas.fblas','scipy.fftpack','scipy.interpolate','scipy', 'scipy.signal',
'scipy.ndimage.interpolation', 'scipy.special', 'numba'
]
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
import soapy
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
#'sphinx.ext.napoleon'
'sphinxcontrib.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Soapy'
copyright = u'2015, Andrew Reeves'
author = u'Andrew Reeves'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = soapy.__version__[1:4]
# The full version, including alpha/beta/rc tags.
release = soapy.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
def setup(app):
app.add_stylesheet("theme_overrides.css")
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Soapydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Soapy.tex', u'Soapy Documentation',
u'Andrew Reeves', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'soapy', u'Soapy Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Soapy', u'Soapy Documentation',
author, 'Soapy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = { 'python': ('https://docs.python.org/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
'matplotlib': ('http://matplotlib.sourceforge.net/', None)}
| gpl-3.0 |
JsNoNo/scikit-learn | sklearn/utils/validation.py | 29 | 24630 | """Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from ..utils.fixes import signature
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
class DataConversionWarning(UserWarning):
"""A warning on implicit data conversions happening in the code"""
pass
warnings.simplefilter("always", DataConversionWarning)
class NonBLASDotWarning(UserWarning):
"""A warning on implicit dispatch to numpy.dot"""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
"""
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
array = np.array(array, dtype=dtype, order=order, copy=copy)
if ensure_2d:
if array.ndim == 1:
warnings.warn(
"Passing 1d arrays as data is deprecated in 0.17 and will"
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample.",
DeprecationWarning)
array = np.atleast_2d(array)
# To ensure that array flags are maintained
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we acually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s"
% (dtype_orig, array.dtype))
if estimator is not None:
if not isinstance(estimator, six.string_types):
estimator = estimator.__class__.__name__
msg += " by %s" % estimator
warnings.warn(msg, DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y. For multi-label y,
set multi_output=True to allow 2d and sparse y.
If the dtype of X is object, attempt converting to float,
raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in signature(estimator.fit).parameters
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
| bsd-3-clause |
poryfly/scikit-learn | sklearn/cross_decomposition/cca_.py | 209 | 3150 | from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
| bsd-3-clause |
akshaybabloo/Car-ND | Project_5/laneline.py | 1 | 21371 | import cv2
import glob
import pickle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from sklearn.metrics import mean_squared_error
x_cor = 9 #Number of corners to find
y_cor = 6
# Prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((y_cor*x_cor,3), np.float32)
objp[:,:2] = np.mgrid[0:x_cor, 0:y_cor].T.reshape(-1,2)
def camera_cal():
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
images = glob.glob('camera_cal/calibration*.jpg') # Make a list of paths to calibration images
# Step through the list and search for chessboard corners
corners_not_found = [] #Calibration images in which opencv failed to find corners
for idx, fname in enumerate(images):
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Conver to grayscale
ret, corners = cv2.findChessboardCorners(gray, (x_cor,y_cor), None) # Find the chessboard corners
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
else:
corners_not_found.append(fname)
print 'Corners were found on', str(len(imgpoints)), 'out of', str(len(images)), 'it is', str(len(imgpoints)*100.0/len(images)),'% of calibration images'
img_size = (img.shape[1], img.shape[0])
# Do camera calibration given object points and image points
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size,None,None)
return mtx, dist
mtx, dist = camera_cal()
def undistort(img):
return cv2.undistort(img, mtx, dist, None, mtx)
def eq_Hist(img): # Histogram normalization
img[:, :, 0] = cv2.equalizeHist(img[:, :, 0])
img[:, :, 1] = cv2.equalizeHist(img[:, :, 1])
img[:, :, 2] = cv2.equalizeHist(img[:, :, 2])
return img
# Sobel
def sobel_img(img, thresh_min = 25, thresh_max = 255, sobel_kernel = 11):
sobelx = np.absolute(cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=sobel_kernel))
sobely = np.absolute(cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=sobel_kernel))
scaled_sobelx = np.uint16(255*sobelx/np.max(sobelx))
scaled_sobely = np.uint16(255*sobely/np.max(sobely))
sobel_sum = scaled_sobelx+0.2*scaled_sobely
scaled_sobel_sum = np.uint8(255*sobel_sum/np.max(sobel_sum))
sum_binary = np.zeros_like(scaled_sobel_sum)
sum_binary[(scaled_sobel_sum >= thresh_min) & (scaled_sobel_sum <= thresh_max)] = 1
return sum_binary
# Solbel magnitude
def sobel_mag_img(img, thresh_min = 25, thresh_max = 255, sobel_kernel = 11):
sobelx = np.absolute(cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=sobel_kernel))
sobely = np.absolute(cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=sobel_kernel))
gradmag = np.sqrt(sobelx**2 + sobely**2)
scaled_gradmag = np.uint8(255*gradmag/np.max(gradmag))
gradmag_binary = np.zeros_like(scaled_gradmag)
gradmag_binary[(scaled_gradmag >= thresh_min) & (scaled_gradmag <= thresh_max)] = 1
return gradmag_binary
# Sobel direction
def sobel_dir_img(img, thresh_min = 0.0, thresh_max = 1.5, sobel_kernel = 11):
sobelx = np.absolute(cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=sobel_kernel))
sobely = np.absolute(cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=sobel_kernel))
graddir = np.arctan2(sobely, sobelx)
graddir_binary = np.zeros_like(graddir)
graddir_binary[(graddir >= thresh_min) & (graddir <= thresh_max)] = 1
return graddir_binary
# Binary red channel threshold
def red_thres(img, thresh_min = 25, thresh_max = 255):
red = img[:,:,2]
red_binary = np.zeros_like(red)
red_binary[(red >= thresh_min) & (red <= thresh_max)] = 1
return red_binary
# Binary saturation channel threshold
def s_thres(img, thresh_min = 25, thresh_max = 255):
hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
s_channel = hls[:,:,2]
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel > thresh_min) & (s_channel <= thresh_max)] = 1
return s_binary
# Return saturation channel
def s_hls(img):
hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
return hls[:,:,2]
IMAGE_H = 223
IMAGE_W = 1280
# Sharpen image
def sharpen_img(img):
gb = cv2.GaussianBlur(img, (5,5), 20.0)
return cv2.addWeighted(img, 2, gb, -1, 0)
# Compute linear image transformation img*s+m
def lin_img(img,s=1.0,m=0.0):
img2=cv2.multiply(img, np.array([s]))
return cv2.add(img2, np.array([m]))
# Change image contrast; s>1 - increase
def contr_img(img, s=1.0):
m=127.0*(1.0-s)
return lin_img(img, s, m)
# Create perspective image transformation matrices
def create_M():
src = np.float32([[0, 673], [1207, 673], [0, 450], [1280, 450]])
dst = np.float32([[569, 223], [711, 223], [0, 0], [1280, 0]])
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
return M, Minv
# Main image transformation routine to get a warped image
def transform(img, M):
undist = undistort(img)
img_size = (1280, 223)
warped = cv2.warpPerspective(undist, M, img_size)
warped = sharpen_img(warped)
warped = contr_img(warped, 1.1)
return warped
# Show original and warped image side by side
def show_warped(img, M):
f, (plot1, plot2) = plt.subplots(1, 2, figsize=(9, 3))
plot1.imshow(cv2.cvtColor(undistort(img), cv2.COLOR_BGR2RGB))
plot1.set_title('Undistorted', fontsize=20)
plot2.imshow(cv2.cvtColor(transform(img, M), cv2.COLOR_BGR2RGB))
plot2.set_title('Warped', fontsize=20)
# Show one image
def show_img(img):
if len(img.shape)==3:
plt.figure()
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
else:
plt.figure()
plt.imshow(img, cmap='gray')
M, Minv = create_M()
#Calculate coefficients of polynom in y+h coordinates, i.e. f(y) -> f(y+h)
def pol_shift(pol, h):
pol_ord = len(pol)-1 # Determinate degree of the polynomial
if pol_ord == 3:
pol0 = pol[0]
pol1 = pol[1] + 3.0*pol[0]*h
pol2 = pol[2] + 3.0*pol[0]*h*h + 2.0*pol[1]*h
pol3 = pol[3] + pol[0]*h*h*h + pol[1]*h*h + pol[2]*h
return(np.array([pol0, pol1, pol2, pol3]))
if pol_ord == 2:
pol0 = pol[0]
pol1 = pol[1] + 2.0*pol[0]*h
pol2 = pol[2] + pol[0]*h*h+pol[1]*h
return(np.array([pol0, pol1, pol2]))
if pol_ord == 1:
pol0 = pol[0]
pol1 = pol[1] + pol[0]*h
return(np.array([pol0, pol1]))
# Calculate derivative for a polynom pol in a point x
def pol_d(pol, x):
pol_ord = len(pol)-1
if pol_ord == 3:
return 3.0*pol[0]*x*x+2.0*pol[1]*x+pol[2]
if pol_ord == 2:
return 2.0*pol[0]*x+pol[1]
if pol_ord == 1:
return pol[0]#*np.ones(len(np.array(x)))
# Calculate the second derivative for a polynom pol in a point x
def pol_dd(pol, x):
pol_ord = len(pol)-1
if pol_ord == 3:
return 6.0*pol[0]*x+2.0*pol[1]
if pol_ord == 2:
return 2.0*pol[0]
if pol_ord == 1:
return 0.0
# Calculate a polinomial value in a point x
def pol_calc(pol, x):
pol_f = np.poly1d(pol)
return(pol_f(x))
xm_in_px = 3.675 / 85 # Lane width (12 ft in m) is ~85 px on image
ym_in_px = 3.048 / 24 # Dashed line length (10 ft in m) is ~24 px on image
def px_to_m(px):
return xm_in_px*px
# Calculate offset from the lane center
def lane_offset(left, right):
offset = 1280/2.0-(pol_calc(left, 1.0)+ pol_calc(right, 1.0))/2.0
return px_to_m(offset)
# Calculate radius of curvature
MAX_RADIUS = 10000
def r_curv(pol, y):
if len(pol) == 2: # If the polinomial is a linear function
return MAX_RADIUS
else:
y_pol = np.linspace(0, 1, num=EQUID_POINTS)
x_pol = pol_calc(pol, y_pol)*xm_in_px
y_pol = y_pol*IMAGE_H*ym_in_px
pol = np.polyfit(y_pol, x_pol, len(pol)-1)
d_y = pol_d(pol, y)
dd_y = pol_dd(pol, y)
r = ((np.sqrt(1+d_y**2))**3)/abs(dd_y)
if r > MAX_RADIUS:
r = MAX_RADIUS
return r
def lane_curv(left, right):
l = r_curv(left, 1.0)
r = r_curv(right, 1.0)
if l < MAX_RADIUS and r < MAX_RADIUS:
return (r_curv(left, 1.0)+r_curv(right, 1.0))/2.0
else:
if l < MAX_RADIUS:
return l
if r < MAX_RADIUS:
return r
return MAX_RADIUS
#Calculate approximated equidistant to a parabola
EQUID_POINTS = 25 # Number of points to use for the equidistant approximation
def equidistant(pol, d, max_l = 1, plot = False):
y_pol = np.linspace(0, max_l, num=EQUID_POINTS)
x_pol = pol_calc(pol, y_pol)
y_pol *= IMAGE_H # Convert y coordinates bach to [0..223] scale
x_m = []
y_m = []
k_m = []
for i in range(len(x_pol)-1):
x_m.append((x_pol[i+1]-x_pol[i])/2.0+x_pol[i]) # Calculate polints position between given points
y_m.append((y_pol[i+1]-y_pol[i])/2.0+y_pol[i])
if x_pol[i+1] == x_pol[i]:
k_m.append(1e8) # A vary big number
else:
k_m.append(-(y_pol[i+1]-y_pol[i])/(x_pol[i+1]-x_pol[i])) # Slope of perpendicular lines
x_m = np.array(x_m)
y_m = np.array(y_m)
k_m = np.array(k_m)
#Calculate equidistant points
y_eq = d*np.sqrt(1.0/(1+k_m**2))
x_eq = np.zeros_like(y_eq)
if d >= 0:
for i in range(len(x_m)):
if k_m[i] < 0:
y_eq[i] = y_m[i]-abs(y_eq[i])
else:
y_eq[i] = y_m[i]+abs(y_eq[i])
x_eq[i] = (x_m[i]-k_m[i]*y_m[i])+k_m[i]*y_eq[i]
else:
for i in range(len(x_m)):
if k_m[i] < 0:
y_eq[i] = y_m[i]+abs(y_eq[i])
else:
y_eq[i] = y_m[i]-abs(y_eq[i])
x_eq[i] = (x_m[i]-k_m[i]*y_m[i])+k_m[i]*y_eq[i]
y_eq /= IMAGE_H # Convert y coordinates back to [0..1] scale
y_pol /= IMAGE_H
y_m /= IMAGE_H
pol_eq = np.polyfit(y_eq, x_eq, len(pol)-1) # Fit equidistant with a polinomial
if plot:
plt.plot(x_pol, y_pol, color='red', linewidth=1, label = 'Original line') #Original line
plt.plot(x_eq, y_eq, color='green', linewidth=1, label = 'Equidistant') #Equidistant
plt.plot(pol_calc(pol_eq, y_pol), y_pol, color='blue',
linewidth=1, label = 'Approximation') #Approximation
plt.legend()
for i in range(len(x_m)):
plt.plot([x_m[i],x_eq[i]], [y_m[i],y_eq[i]], color='black', linewidth=1) #Draw connection lines
plt.savefig('readme_img/equid.jpg')
return pol_eq
DEV_POL = 2 # Max mean squared error of the approximation
MSE_DEV = 1.1 # Minimum mean squared error ratio to consider higher order of the polynomial
def best_pol_ord(x, y):
pol1 = np.polyfit(y,x,1)
pred1 = pol_calc(pol1, y)
mse1 = mean_squared_error(x, pred1)
if mse1 < DEV_POL:
return pol1, mse1
pol2 = np.polyfit(y,x,2)
pred2 = pol_calc(pol2, y)
mse2 = mean_squared_error(x, pred2)
if mse2 < DEV_POL or mse1/mse2 < MSE_DEV:
return pol2, mse2
else:
pol3 = np.polyfit(y,x,3)
pred3 = pol_calc(pol3, y)
mse3 = mean_squared_error(x, pred3)
if mse2/mse3 < MSE_DEV:
return pol2, mse2
else:
return pol3, mse3
# Smooth polinomial functions of different degrees
def smooth_dif_ord(pol_p, x, y, new_ord):
x_p = pol_calc(pol_p, y)
x_new = (x+x_p)/2.0
return np.polyfit(y, x_new, new_ord)
# Calculate threashold for left line
def thres_l_calc(sens):
thres = -0.0045*sens**2+1.7581*sens-115.0
if thres < 25*(382.0-sens)/382.0+5:
thres = 25*(382.0-sens)/382.0+5
return thres
# Calculate threashold for right line
def thres_r_calc(sens):
thres = -0.0411*sens**2+9.1708*sens-430.0
if sens<210:
if thres < sens/6:
thres = sens/6
else:
if thres < 20:
thres = 20
return thres
WINDOW_SIZE = 15 # Half of the sensor span
DEV = 7 # Maximum of the point deviation from the sensor center
SPEED = 2 / IMAGE_H # Pixels shift per frame
POL_ORD = 2 # Default polinomial order
RANGE = 0.0 # Fraction of the image to skip
def find(img, left=True, p_ord=POL_ORD, pol = np.zeros(POL_ORD+1), max_n = 0):
x_pos = []
y_pos = []
max_l = img.shape[0] #number of lines in the img
for i in range(max_l-int(max_l*RANGE)):
y = max_l-i #Line number
y_01 = y / float(max_l) #y in [0..1] scale
if abs(pol[-1]) > 0: #If it not a still image or the first video frame
if y_01 >= max_n + SPEED: # If we can use pol to find center of the virtual sensor from the previous frame
cent = int(pol_calc(pol, y_01-SPEED))
if y == max_l:
if left:
cent = 605
else:
cent = 690
else: # Prolong the pol tangentially
k = pol_d(pol, max_n)
b = pol_calc(pol, max_n)-k*max_n
cent = int(k*y_01+b)
if cent > 1280-WINDOW_SIZE:
cent = 1280-WINDOW_SIZE
if cent < WINDOW_SIZE:
cent = WINDOW_SIZE
else: #If it is a still image
if len(x_pos) > 0: # If there are some points detected
cent = x_pos[-1] # Use the previous point as a senser center
else: #Initial guess on line position
if left:
cent = 605
else:
cent = 690
if left: #Subsample image
sens = 0.5*s_hls(img[max_l-1-i:max_l-i,cent-WINDOW_SIZE:cent+WINDOW_SIZE,:])\
+img[max_l-1-i:max_l-i,cent-WINDOW_SIZE:cent+WINDOW_SIZE,2]
else:
sens = img[max_l-1-i:max_l-i,cent-WINDOW_SIZE:cent+WINDOW_SIZE,2]
if len(sens[0,:]) < WINDOW_SIZE: #If we out of the image
break
x_max = max(sens[0,:]) #Find maximal value on the sensor
sens_mean = np.mean(sens[0,:])
# Get threshold
if left:
loc_thres = thres_l_calc(sens_mean)
loc_dev = DEV
else:
loc_thres = thres_r_calc(sens_mean)
loc_dev = DEV
if len(x_pos) == 0:
loc_dev = WINDOW_SIZE
if (x_max-sens_mean) > loc_thres and (x_max>100 or left):
if left:
x = list(reversed(sens[0,:])).index(x_max)
x = cent+WINDOW_SIZE-x
else:
x = list(sens[0,:]).index(x_max)
x = cent-WINDOW_SIZE+x
if x-1 < 569.0*y_01 or x+1 > 569.0*y_01+711 or np.nonzero(sens[0,:]) < WINDOW_SIZE: #if the sensor touchs black triangle
break # We are done
if abs(pol[-1]) < 1e-4: # If there are no polynomial provided
x_pos.append(x)
y_pos.append(y_01)
else:
if abs(x-cent) < loc_dev:#*14.206*r_curv(pol, max_l)**-0.2869:
x_pos.append(x)
y_pos.append(y_01)
if len(x_pos) > 1:
return x_pos, y_pos
else:
return [0], [0.0]
RANGE = 0.0
def get_lane(img, plot=False):
warp = transform(img, M)
img = undistort(img)
ploty = np.linspace(0, 1, num=warp.shape[0])
x2, y2 = find(warp)
x, y = find(warp, False)
right_fitx = pol_calc(best_pol_ord(x,y)[0], ploty)
left_fitx = pol_calc(best_pol_ord(x2,y2)[0], ploty)
y2 = np.int16(np.array(y2)*223.0) # Convert into [0..223] scale
y = np.int16(np.array(y)*223.0)
if plot:
for i in range(len(x)): # Plot points
cv2.circle(warp, (x[i], y[i]), 1, (255,50,255))
for i in range(len(x2)):
cv2.circle(warp, (x2[i], y2[i]), 1, (255,50,250))
show_img(warp)
plt.axis('off')
plt.plot(left_fitx, ploty*IMAGE_H, color='green', linewidth=1)
plt.plot(right_fitx, ploty*IMAGE_H, color='green', linewidth=1)
cv2.imwrite('img.jpg', warp)
return img, left_fitx, right_fitx, ploty*IMAGE_H
def draw_lane_img_p(img_path):
return cv2.imread(img_path)
def draw_lane(img, video=False):
if video:
img, left_fitx, right_fitx, ploty, left, right = get_lane_video(img)
else:
img, left_fitx, right_fitx, ploty = get_lane(img, False)
warp_zero = np.zeros((IMAGE_H,IMAGE_W)).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (img.shape[1], img.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(img, 1.0, newwarp, 0.6, 0)
if video:
# Add text information on the frame
font = cv2.FONT_HERSHEY_SIMPLEX
text_pos = 'Pos of the car: '+str(np.round(lane_offset(left, right),2))+ ' m'
radius = np.round(lane_curv(left, right),2)
if radius >= MAX_RADIUS:
radius = 'Inf'
else:
radius = str(radius)
text_rad = 'Radius: '+radius+ ' m'
cv2.putText(result,text_pos,(10,25), font, 1,(255,255,255),2)
cv2.putText(result,text_rad,(10,75), font, 1,(255,255,255),2)
return(result)
right_fit_p = np.zeros(POL_ORD+1)
left_fit_p = np.zeros(POL_ORD+1)
r_len = 0
l_len = 0
lane_w_p = 90
MIN = 60 # Minimal line separation (in px)
MAX = 95 # Maximal line separation (in px)
MIN_POINTS = 10 #Minimal points to consider a line
MAX_N = 5 # Maximal frames without line detected to use previous frame
n_count = 0 # Frame counter
r_n = 0 # Number of frames with unsuccessful line detection
l_n = 0
def get_lane_video(img):
global right_fit_p, left_fit_p, r_len, l_len, n_count, r_n, l_n
sw = False
warp = transform(img, M)
img = undistort(img)
if l_n < MAX_N and n_count > 0:
x, y = find(warp, pol = left_fit_p, max_n = l_len)
else:
x, y = find(warp)
if len(x) > MIN_POINTS:
left_fit, mse_l = best_pol_ord(x,y)
if mse_l > DEV_POL*9 and n_count > 0:
left_fit = left_fit_p
l_n += 1
else:
l_n /= 2
else:
left_fit = left_fit_p
l_n += 1
if r_n < MAX_N and n_count > 0:
x2, y2 = find(warp, False, pol = right_fit_p, max_n = r_len)
else:
x2, y2 = find(warp, False)
if len(x2) > MIN_POINTS:
right_fit, mse_r = best_pol_ord(x2, y2)
if mse_r > DEV_POL*9 and n_count > 0:
right_fit = right_fit_p
r_n += 1
else:
r_n /= 2
else:
right_fit = right_fit_p
r_n += 1
if n_count > 0: # if not the first video frame
# Apply filter
if len(left_fit_p) == len(left_fit): # If new and prev polinomial have the same order
left_fit = pol_shift(left_fit_p, -SPEED)*(1.0-len(x)/((1.0-RANGE)*IMAGE_H))+left_fit*(len(x)/((1.0-RANGE)*IMAGE_H))
else:
left_fit = smooth_dif_ord(left_fit_p, x, y, len(left_fit)-1)
l_len = y[-1]
if len(right_fit_p) == len(right_fit):
right_fit = pol_shift(right_fit_p, -SPEED)*(1.0-len(x2)/((1.0-RANGE)*IMAGE_H))+right_fit*(len(x2)/((1.0-RANGE)*IMAGE_H))
else:
right_fit = smooth_dif_ord(right_fit_p, x2, y2, len(right_fit)-1)
r_len = y2[-1]
if len(x) > MIN_POINTS and len(x2) <= MIN_POINTS: # If we have only left line
lane_w = pol_calc(right_fit_p, 1.0)-pol_calc(left_fit_p, 1.0)
right_fit = smooth_dif_ord(right_fit_p, pol_calc(equidistant(left_fit, lane_w, max_l=l_len), y),
y, len(left_fit)-1)
r_len = l_len
r_n /=2
if len(x2) > MIN_POINTS and len(x) <= MIN_POINTS: # If we have only right line
lane_w = pol_calc(right_fit_p, 1.0)-pol_calc(left_fit_p, 1.0)
#print(lane_w)
left_fit = smooth_dif_ord(left_fit_p, pol_calc(equidistant(right_fit, -lane_w, max_l=r_len), y2),
y2, len(right_fit)-1)
l_len = r_len
l_n /=2
if (l_n < MAX_N and r_n < MAX_N):
max_y = max(RANGE, l_len, r_len)
else:
max_y = 1.0#max(RANGE, l_len, r_len)
sw = True
d1 = pol_calc(right_fit, 1.0)-pol_calc(left_fit, 1.0)
dm = pol_calc(right_fit, max_y)-pol_calc(left_fit, max_y)
if (d1 > MAX or d1 < 60 or dm < 0):
left_fit = left_fit_p
right_fit = right_fit_p
l_n += 1
r_n += 1
ploty = np.linspace(max_y, 1, num=IMAGE_H)
left_fitx = pol_calc(left_fit, ploty)
right_fitx = pol_calc(right_fit, ploty)
right_fit_p = np.copy(right_fit)
left_fit_p = np.copy(left_fit)
n_count += 1
return img, left_fitx, right_fitx, ploty*223.0, left_fit, right_fit
def init_params(ran):
global right_fit_p, left_fit_p, n_count, RANGE, MIN_POINTS
right_fit_p = np.zeros(POL_ORD+1)
left_fit_p = np.zeros(POL_ORD+1)
n_count = 0
RANGE = ran
MIN_POINTS = 25-15*ran
| mit |
trankmichael/scipy | doc/source/tutorial/stats/plots/kde_plot3.py | 132 | 1229 | import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
np.random.seed(12456)
x1 = np.random.normal(size=200) # random data, normal distribution
xs = np.linspace(x1.min()-1, x1.max()+1, 200)
kde1 = stats.gaussian_kde(x1)
kde2 = stats.gaussian_kde(x1, bw_method='silverman')
fig = plt.figure(figsize=(8, 6))
ax1 = fig.add_subplot(211)
ax1.plot(x1, np.zeros(x1.shape), 'b+', ms=12) # rug plot
ax1.plot(xs, kde1(xs), 'k-', label="Scott's Rule")
ax1.plot(xs, kde2(xs), 'b-', label="Silverman's Rule")
ax1.plot(xs, stats.norm.pdf(xs), 'r--', label="True PDF")
ax1.set_xlabel('x')
ax1.set_ylabel('Density')
ax1.set_title("Normal (top) and Student's T$_{df=5}$ (bottom) distributions")
ax1.legend(loc=1)
x2 = stats.t.rvs(5, size=200) # random data, T distribution
xs = np.linspace(x2.min() - 1, x2.max() + 1, 200)
kde3 = stats.gaussian_kde(x2)
kde4 = stats.gaussian_kde(x2, bw_method='silverman')
ax2 = fig.add_subplot(212)
ax2.plot(x2, np.zeros(x2.shape), 'b+', ms=12) # rug plot
ax2.plot(xs, kde3(xs), 'k-', label="Scott's Rule")
ax2.plot(xs, kde4(xs), 'b-', label="Silverman's Rule")
ax2.plot(xs, stats.t.pdf(xs, 5), 'r--', label="True PDF")
ax2.set_xlabel('x')
ax2.set_ylabel('Density')
plt.show()
| bsd-3-clause |
CalebBell/ht | ht/conv_free_immersed.py | 1 | 58245 | # -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from __future__ import division
from math import exp, log
__all__ = ['Nu_vertical_plate_Churchill',
'Nu_free_vertical_plate',
'Nu_free_vertical_plate_methods',
'Nu_horizontal_plate_McAdams',
'Nu_horizontal_plate_VDI',
'Nu_horizontal_plate_Rohsenow',
'Nu_free_horizontal_plate',
'Nu_free_horizontal_plate_methods',
'Nu_sphere_Churchill',
'Nu_vertical_cylinder_Griffiths_Davis_Morgan',
'Nu_vertical_cylinder_Jakob_Linke_Morgan',
'Nu_vertical_cylinder_Carne_Morgan',
'Nu_vertical_cylinder_Eigenson_Morgan',
'Nu_vertical_cylinder_Touloukian_Morgan',
'Nu_vertical_cylinder_McAdams_Weiss_Saunders',
'Nu_vertical_cylinder_Kreith_Eckert',
'Nu_vertical_cylinder_Hanesian_Kalish_Morgan',
'Nu_vertical_cylinder_Al_Arabi_Khamis',
'Nu_vertical_cylinder_Popiel_Churchill',
'Nu_vertical_cylinder',
'Nu_vertical_cylinder_methods',
'Nu_horizontal_cylinder_Churchill_Chu',
'Nu_horizontal_cylinder_Kuehn_Goldstein',
'Nu_horizontal_cylinder_Morgan',
'Nu_horizontal_cylinder',
'Nu_horizontal_cylinder_methods',
'Nu_coil_Xin_Ebadian']
def Nu_vertical_plate_Churchill(Pr, Gr):
r'''Calculates Nusselt number for natural convection around a vertical
plate according to the Churchill-Chu [1]_ correlation, also presented in
[2]_. Plate must be isothermal; an alternate expression exists for constant
heat flux.
.. math::
Nu_{L}=\left[0.825+\frac{0.387Ra_{L}^{1/6}}
{[1+(0.492/Pr)^{9/16}]^{8/27}}\right]^2
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
Returns
-------
Nu : float
Nusselt number with respect to height, [-]
Notes
-----
Although transition from laminar to turbulent is discrete in reality, this
equation provides a smooth transition in value from laminar to turbulent.
Checked with the original source.
Can be applied to vertical cylinders as well, subject to the criteria below:
.. math::
\frac{D}{L}\ge \frac{35}{Gr_L^{1/4}}
Examples
--------
From [2]_, Example 9.2, matches:
>>> Nu_vertical_plate_Churchill(0.69, 2.63E9)
147.16185223770603
References
----------
.. [1] Churchill, Stuart W., and Humbert H. S. Chu. "Correlating Equations
for Laminar and Turbulent Free Convection from a Vertical Plate."
International Journal of Heat and Mass Transfer 18, no. 11
(November 1, 1975): 1323-29. doi:10.1016/0017-9310(75)90243-4.
.. [2] Bergman, Theodore L., Adrienne S. Lavine, Frank P. Incropera, and
David P. DeWitt. Introduction to Heat Transfer. 6E. Hoboken, NJ:
Wiley, 2011.
'''
Ra = Pr*Gr
term = (0.825 + (0.387*Ra**(1/6.)*(1.0 + (Pr/0.492)**(-0.5625))**(-8.0/27.0)))
return term*term
Nu_free_vertical_plate_all_methods = ["Churchill"]
def Nu_free_vertical_plate_methods(Pr, Gr, H=None, W=None, check_ranges=True):
r'''This function returns a list of methods for calculating heat transfer
coefficient for external free convection from a verical plate.
Requires at a minimum a fluid's Prandtl number `Pr`, and the Grashof
number `Gr` for the system fluid (which require T and P to obtain).
`L` and `W` are not used by any correlations presently, but are included
for future support.
Parameters
----------
Pr : float
Prandtl number with respect to fluid properties [-]
Gr : float
Grashof number with respect to fluid properties and plate - fluid
temperature difference [-]
H : float, optional
Height of vertical plate, [m]
W : float, optional
Width of the vertical plate, [m]
check_ranges : bool, optional
Whether or not to return only correlations suitable for the provided
data, [-]
Returns
-------
methods : list[str]
List of methods which can be used to calculate `Nu` with the given
inputs, [-]
Examples
--------
>>> Nu_free_vertical_plate_methods(0.69, 2.63E9)
['Churchill']
'''
return Nu_free_vertical_plate_all_methods
def Nu_free_vertical_plate(Pr, Gr, buoyancy=None, H=None, W=None, Method=None):
r'''This function calculates the heat transfer coefficient for external
free convection from a verical plate.
Requires at a minimum a fluid's Prandtl number `Pr`, and the Grashof
number `Gr` for the system fluid (which require T and P to obtain).
`L` and `W` are not used by any correlations presently, but are included
for future support.
If no correlation's name is provided as `Method`, the 'Churchill'
correlation is selected.
Parameters
----------
Pr : float
Prandtl number with respect to fluid properties [-]
Gr : float
Grashof number with respect to fluid properties and plate - fluid
temperature difference [-]
buoyancy : bool, optional
Whether or not the plate's free convection is buoyancy assisted (hot
plate) or not, [-]
H : float, optional
Height of vertical plate, [m]
W : float, optional
Width of the vertical plate, [m]
Returns
-------
Nu : float
Nusselt number with respect to plate height, [-]
Other Parameters
----------------
Method : string, optional
A string of the function name to use;
one of ('Churchill', ).
Examples
--------
Turbulent example
>>> Nu_free_vertical_plate(0.69, 2.63E9, False)
147.16185223770603
'''
if Method is None:
Method2 = 'Churchill'
else:
Method2 = Method
if Method2 == 'Churchill':
return Nu_vertical_plate_Churchill(Pr, Gr)
else:
raise ValueError("Correlation name not recognized; see the "
"documentation for the available options.")
def Nu_horizontal_plate_McAdams(Pr, Gr, buoyancy=True):
r'''Calculates the Nusselt number for natural convection above a horizontal
plate according to the McAdams [1]_ correlations. The plate must be
isothermal. Four different equations are used, two each for laminar and
turbulent; the two sets of correlations are required because if the plate
is hot, buoyancy lifts the fluid off the plate and enhances free convection
whereas if the plate is cold, the cold fluid above it settles on it and
decreases the free convection.
Parameters
----------
Pr : float
Prandtl number with respect to fluid properties [-]
Gr : float
Grashof number with respect to fluid properties and plate - fluid
temperature difference [-]
buoyancy : bool, optional
Whether or not the plate's free convection is buoyancy assisted (hot
plate) or not, [-]
Returns
-------
Nu : float
Nusselt number with respect to length, [-]
Notes
-----
Examples
--------
>>> Nu_horizontal_plate_McAdams(5.54, 3.21e8, buoyancy=True)
181.73121274384457
>>> Nu_horizontal_plate_McAdams(5.54, 3.21e8, buoyancy=False)
55.44564799362829
>>> Nu_horizontal_plate_McAdams(.01, 3.21e8, buoyancy=True)
22.857041558492334
>>> Nu_horizontal_plate_McAdams(.01, 3.21e8, buoyancy=False)
11.428520779246167
References
----------
.. [1] McAdams, William Henry. Heat Transmission. 3E. Malabar, Fla:
Krieger Pub Co, 1985.
'''
Ra = Pr*Gr
if buoyancy:
if Ra <= 1E7:
Nu = .54*Ra**0.25
else:
Nu = 0.15*Ra**(1.0/3.0)
else:
if Ra <= 1E10:
Nu = .27*Ra**0.25
else:
Nu = .15*Ra**(1.0/3.0)
return Nu
def Nu_horizontal_plate_VDI(Pr, Gr, buoyancy=True):
r'''Calculates the Nusselt number for natural convection above a horizontal
plate according to the VDI [1]_ correlations. The plate must be
isothermal. Three different equations are used, one each for laminar and
turbulent for the heat transfer happening at upper surface case and one for
the case of heat transfer happening at the lower surface. The lower surface
correlation is recommened for the laminar flow regime.
The two different sets of correlations are required because if the plate
is hot, buoyancy lifts the fluid off the plate and enhances free convection
whereas if the plate is cold, the cold fluid above it settles on it and
decreases the free convection.
Parameters
----------
Pr : float
Prandtl number with respect to fluid properties [-]
Gr : float
Grashof number with respect to fluid properties and plate - fluid
temperature difference [-]
buoyancy : bool, optional
Whether or not the plate's free convection is buoyancy assisted (hot
plate) or not, [-]
Returns
-------
Nu : float
Nusselt number with respect to length, [-]
Notes
-----
The characteristic length suggested for use is as follows, with `a` and
`b` being the length and width of the plate.
.. math::
L = \frac{ab}{2(a+b)}
The buoyancy enhanced cases are from [2]_; the other is said to be from
[3]_, although the equations there not quite the same and do not include
the Prandtl number correction.
Examples
--------
>>> Nu_horizontal_plate_VDI(5.54, 3.21e8, buoyancy=True)
203.89681224927565
>>> Nu_horizontal_plate_VDI(5.54, 3.21e8, buoyancy=False)
39.16864971535617
References
----------
.. [1] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd ed. 2010 edition.
Berlin ; New York: Springer, 2010.
.. [2] Stewartson, Keith. "On the Free Convection from a Horizontal Plate."
Zeitschrift Für Angewandte Mathematik Und Physik ZAMP 9, no. 3
(September 1, 1958): 276-82. https://doi.org/10.1007/BF02033031.
.. [3] Schlunder, Ernst U, and International Center for Heat and Mass
Transfer. Heat Exchanger Design Handbook. Washington:
Hemisphere Pub. Corp., 1987.
'''
Ra = Pr*Gr
if buoyancy:
f2 = (1.0 + (0.322/Pr)**(0.55))**(20.0/11.0)
if Ra*f2 < 7e4:
return 0.766*(Ra*f2)**0.2
else:
return 0.15*(Ra*f2)**(1.0/3.0)
else:
f1 = (1.0 + (0.492/Pr)**(9.0/16.0))**(-16.0/9.0)
return 0.6*(Ra*f1)**0.2
def Nu_horizontal_plate_Rohsenow(Pr, Gr, buoyancy=True):
r'''Calculates the Nusselt number for natural convection above a horizontal
plate according to the Rohsenow, Hartnett, and Cho (1998) [1]_ correlations.
The plate must be isothermal. Three different equations are used, one each
for laminar and turbulent for the heat transfer happening at upper surface
case and one for the case of heat transfer happening at the lower surface.
The lower surface correlation is recommened for the laminar flow regime.
The two different sets of correlations are required because if the plate
is hot, buoyancy lifts the fluid off the plate and enhances free convection
whereas if the plate is cold, the cold fluid above it settles on it and
decreases the free convection.
Parameters
----------
Pr : float
Prandtl number with respect to fluid properties [-]
Gr : float
Grashof number with respect to fluid properties and plate - fluid
temperature difference [-]
buoyancy : bool, optional
Whether or not the plate's free convection is buoyancy assisted (hot
plate) or not, [-]
Returns
-------
Nu : float
Nusselt number with respect to length, [-]
Notes
-----
The characteristic length suggested for use is as follows, with `a` and
`b` being the length and width of the plate.
.. math::
L = \frac{ab}{2(a+b)}
Examples
--------
>>> Nu_horizontal_plate_Rohsenow(5.54, 3.21e8, buoyancy=True)
175.91054716322836
>>> Nu_horizontal_plate_Rohsenow(5.54, 3.21e8, buoyancy=False)
35.95799244863986
References
----------
.. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
'''
Ra = Pr*Gr
if buoyancy:
C_tU = 0.14*((1.0 + 0.01707*Pr)/(1.0 + 0.01*Pr))
C_tV = 0.13*Pr**0.22/(1.0 + 0.61*Pr**0.81)**0.42
t1 = 1.0 # Ah/A # Heated to non heated area ratio
t2 = 0.0 # Lf*P/A # Lf vertical distance between lowest and highest point in body
# P is perimiter, A is area
Cl = (0.0972 - (0.0157 + 0.462*C_tV)*t1
+ (0.615*C_tV - 0.0548 - 6e-6*Pr)*t2)
Nu_T = 0.835*Cl*Ra**0.25 # average Cl
Nu_l = 1.4/(log(1.0 + 1.4/Nu_T))
Nu_t = C_tU*Ra**(1.0/3.0)
m = 10.0
Nu = ((Nu_l)**m + Nu_t**m)**(1.0/m)
return Nu
else:
# No friction/C term
Nu_T = 0.527*Ra**0.2/(1.0 + (1.9/Pr)**0.9)**(2.0/9.0)
Nu_l = 2.5/(log(1.0 + 2.5/Nu_T))
return Nu_l
conv_free_horizontal_plate_all_methods = {
'McAdams': (Nu_horizontal_plate_McAdams, ('Pr', 'Gr', 'buoyancy')),
'VDI': (Nu_horizontal_plate_VDI, ('Pr', 'Gr', 'buoyancy')),
'Rohsenow': (Nu_horizontal_plate_Rohsenow, ('Pr', 'Gr', 'buoyancy')),
}
Nu_free_horizontal_plate_all_methods = ["VDI", "McAdams", "Rohsenow"]
def Nu_free_horizontal_plate_methods(Pr, Gr, buoyancy, L=None, W=None,
check_ranges=True):
r'''This function returns a list of methods for calculating heat transfer
coefficient for external free convection from a verical plate.
Requires at a minimum a fluid's Prandtl number `Pr`, and the Grashof
number `Gr` for the system fluid, temperatures, and geometry.
`L` and `W` are not used by any correlations presently, but are included
for future support.
Parameters
----------
Pr : float
Prandtl number with respect to fluid properties [-]
Gr : float
Grashof number with respect to fluid properties and plate - fluid
temperature difference [-]
buoyancy : bool, optional
Whether or not the plate's free convection is buoyancy assisted (hot
plate) or not, [-]
L : float, optional
Length of horizontal plate, [m]
W : float, optional
Width of the horizontal plate, [m]
check_ranges : bool, optional
Whether or not to return only correlations suitable for the provided
data, [-]
Returns
-------
methods : list[str]
List of methods which can be used to calculate `Nu` with the given
inputs, [-]
Examples
--------
>>> Nu_free_horizontal_plate_methods(0.69, 2.63E9, True)
['VDI', 'McAdams', 'Rohsenow']
'''
return Nu_free_horizontal_plate_all_methods
def Nu_free_horizontal_plate(Pr, Gr, buoyancy, L=None, W=None,
Method=None):
r'''This function calculates the heat transfer coefficient for external
free convection from a horizontal plate.
Requires at a minimum a fluid's Prandtl number `Pr`, and the Grashof
number `Gr` for the system fluid, temperatures, and geometry.
`L` and `W` are not used by any correlations presently, but are included
for future support.
If no correlation's name is provided as `Method`, the 'VDI' correlation is
selected.
Parameters
----------
Pr : float
Prandtl number with respect to fluid properties [-]
Gr : float
Grashof number with respect to fluid properties and plate - fluid
temperature difference [-]
buoyancy : bool, optional
Whether or not the plate's free convection is buoyancy assisted (hot
plate) or not, [-]
L : float, optional
Length of horizontal plate, [m]
W : float, optional
Width of the horizontal plate, [m]
Returns
-------
Nu : float
Nusselt number with respect to plate length, [-]
Other Parameters
----------------
Method : string, optional
A string of the function name to use, as in the dictionary
conv_free_horizontal_plate_methods
Examples
--------
Turbulent example
>>> Nu_free_horizontal_plate(5.54, 3.21e8, buoyancy=True)
203.89681224927565
>>> Nu_free_horizontal_plate(5.54, 3.21e8, buoyancy=True, Method='McAdams')
181.73121274384457
'''
if Method is None:
Method2 = "VDI"
else:
Method2 = Method
if Method2 == 'VDI':
return Nu_horizontal_plate_VDI(Pr=Pr, Gr=Gr, buoyancy=buoyancy)
if Method2 == 'McAdams':
return Nu_horizontal_plate_McAdams(Pr=Pr, Gr=Gr, buoyancy=buoyancy)
if Method2 == 'Rohsenow':
return Nu_horizontal_plate_Rohsenow(Pr=Pr, Gr=Gr, buoyancy=buoyancy)
else:
raise ValueError("Correlation name not recognized; see the "
"documentation for the available options.")
def Nu_sphere_Churchill(Pr, Gr):
r'''Calculates Nusselt number for natural convection around a sphere
according to the Churchill [1]_ correlation. Sphere must be isothermal.
.. math::
Nu_D=2+\frac{0.589Ra_D^{1/4}} {\left[1+(0.469/Pr)^{9/16}\right]^{4/9}}
\cdot\left\{1 + \frac{7.44\times 10^{-8}Ra}
{[1+(0.469/Pr)^{9/16}]^{16/9}}\right\}^{1/12}
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Although transition from laminar to turbulent is discrete in reality, this
equation provides a smooth transition in value from laminar to turbulent.
Checked with the original source.
Good for Ra < 1E13. Limit of Nu is 2 at low Grashof numbers.
Examples
--------
>>> Nu_sphere_Churchill(.7, 1E7)
25.670869440317578
References
----------
.. [1] Schlunder, Ernst U, and International Center for Heat and Mass
Transfer. Heat Exchanger Design Handbook. Washington:
Hemisphere Pub. Corp., 1987.
'''
Ra = Pr*Gr
Nu = 2 + (0.589*Ra**0.25/(1 + (0.469/Pr)**(9/16.))**(4/9.)*(
1 + 7.44E-8*Ra/(1 + (0.469/Pr)**(9/16.))**(16/9.))**(1/12.))
return Nu
### Vertical cylinders
def Nu_vertical_cylinder_Griffiths_Davis_Morgan(Pr, Gr, turbulent=None):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to the results of [1]_ correlated by [2]_, as
presented in [3]_ and [4]_.
.. math::
Nu_H = 0.67 Ra_H^{0.25},\; 10^{7} < Ra < 10^{9}
.. math::
Nu_H = 0.0782 Ra_H^{0.357}, \; 10^{9} < Ra < 10^{11}
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
turbulent : bool or None, optional
Whether or not to force the correlation to return the turbulent
result; will return the laminar regime if False; leave as None for
automatic selection
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Cylinder of diameter 17.43 cm, length from 4.65 to 263.5 cm. Air as fluid.
Transition between ranges is not smooth.
If outside of range, no warning is given.
Examples
--------
>>> Nu_vertical_cylinder_Griffiths_Davis_Morgan(.7, 2E10)
327.6230596100138
References
----------
.. [1] Griffiths, Ezer, A. H. Davis, and Great Britain. The Transmission of
Heat by Radiation and Convection. London: H. M. Stationery off., 1922.
.. [2] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [3] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [4] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
if turbulent or (Ra > 1E9 and turbulent is None):
Nu = 0.0782*Ra**0.357
else:
Nu = 0.67*Ra**0.25
return Nu
def Nu_vertical_cylinder_Jakob_Linke_Morgan(Pr, Gr, turbulent=None):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to the results of [1]_ correlated by [2]_, as
presented in [3]_ and [4]_.
.. math::
Nu_H = 0.555 Ra_H^{0.25},\; 10^{4} < Ra < 10^{8}
.. math::
Nu_H = 0.129 Ra_H^{1/3},\; 10^{8} < Ra < 10^{12}
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
turbulent : bool or None, optional
Whether or not to force the correlation to return the turbulent
result; will return the laminar regime if False; leave as None for
automatic selection
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Cylinder of diameter 3.5 cm, length from L/D = 4.3. Air as fluid.
Transition between ranges is not smooth.
If outside of range, no warning is given. Results are presented rounded in
[4]_, and the second range is not shown in [3]_.
Examples
--------
>>> Nu_vertical_cylinder_Jakob_Linke_Morgan(.7, 2E10)
310.90835207860454
References
----------
.. [1] Jakob, M., and Linke, W., Warmeubergang beim Verdampfen von
Flussigkeiten an senkrechten und waagerechten Flaschen, Phys. Z.,
vol. 36, pp. 267-280, 1935.
.. [2] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [3] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [4] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
if turbulent or (Ra > 1E8 and turbulent is None):
Nu = 0.129*Ra**(1/3.)
else:
Nu = 0.555*Ra**0.25
return Nu
def Nu_vertical_cylinder_Carne_Morgan(Pr, Gr, turbulent=None):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to the results of [1]_ correlated by [2]_, as
presented in [3]_ and [4]_.
.. math::
Nu_H = 1.07 Ra_H^{0.28},\; 2\times 10^{6} < Ra < 2\times 10^{8}
.. math::
Nu_H = 0.152 Ra_H^{0.38},\; 2\times 10^{8} < Ra < 2\times 10^{11}
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
turbulent : bool or None, optional
Whether or not to force the correlation to return the turbulent
result; will return the laminar regime if False; leave as None for
automatic selection
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Cylinder of diameters 0.475 cm to 7.62 cm, L/D from 8 to 127. Isothermal
boundary condition was assumed, but not verified. Transition between ranges
is not smooth. If outside of range, no warning is given. The higher range
of [1]_ is not shown in [3]_, and the formula for the first is actually for
the second in [3]_.
Examples
--------
>>> Nu_vertical_cylinder_Carne_Morgan(.7, 2E8)
204.31470629065677
References
----------
.. [1] J. B. Carne. "LIX. Heat Loss by Natural Convection from Vertical
Cylinders." The London, Edinburgh, and Dublin Philosophical Magazine and
Journal of Science 24, no. 162 (October 1, 1937): 634-53.
doi:10.1080/14786443708565140.
.. [2] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [3] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [4] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
if turbulent or (Ra > 2E8 and turbulent is None):
return 0.152*Ra**0.38
else:
return 1.07*Ra**0.28
def Nu_vertical_cylinder_Eigenson_Morgan(Pr, Gr, turbulent=None):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to the results of [1]_ correlated by [2]_,
presented in [3]_ and in more detail in [4]_.
.. math::
Nu_H = 0.48 Ra_H^{0.25},\; 10^{9} < Ra
.. math::
Nu_H = 51.5 + 0.0000726 Ra_H^{0.63},\; 10^{9} < Ra < 1.69 \times 10^{10}
.. math::
Nu_H = 0.148 Ra_H^{1/3} - 127.6 ,\; 1.69 \times 10^{10} < Ra
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
turbulent : bool or None, optional
Whether or not to force the correlation to return the turbulent
result; will return the laminar regime if False; leave as None for
automatic selection
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Author presents results as appropriate for both flat plates and cylinders.
Height of 2.5 m with diameters of 2.4, 7.55, 15, 35, and 50 mm. Another
experiment of diameter 58 mm and length of 6.5 m was considered.
Cylinder of diameters 0.475 cm to 7.62 cm, L/D from 8 to 127.Transition
between ranges is not smooth. If outside of range, no warning is given.
Formulas are presented similarly in [3]_ and [4]_, but only [4]_ shows
the transition formula.
Examples
--------
>>> Nu_vertical_cylinder_Eigenson_Morgan(0.7, 2E10)
230.55946525499715
References
----------
.. [1] Eigenson L (1940). Les lois gouvernant la transmission de la chaleur
aux gaz biatomiques par les parois des cylindres verticaux dans le cas
de convection naturelle. Dokl Akad Nauk SSSR 26:440-444
.. [2] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [3] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [4] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
if turbulent or (Ra > 1.69E10 and turbulent is None):
return 0.148*Ra**(1/3.) - 127.6
elif 1E9 < Ra < 1.69E10 and turbulent is not False:
return 51.5 + 0.0000726*Ra**0.63
else:
return 0.48*Ra**0.25
def Nu_vertical_cylinder_Touloukian_Morgan(Pr, Gr, turbulent=None):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to the results of [1]_ correlated by [2]_, as
presented in [3]_ and [4]_.
.. math::
Nu_H = 0.726 Ra_H^{0.25},\; 2\times 10^{8} < Ra < 4\times 10^{10}
.. math::
Nu_H = 0.0674 (Gr_H Pr^{1.29})^{1/3},\; 4\times 10^{10} < Ra < 9\times 10^{11}
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
turbulent : bool or None, optional
Whether or not to force the correlation to return the turbulent
result; will return the laminar regime if False; leave as None for
automatic selection
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Cylinder of diameters 2.75 inch, with heights of 6, 18, and 36.25 inch.
Temperature was controlled via multiple separately controlled heating
sections. Fluids were water and ethylene-glycol. Transition between ranges
is not smooth. If outside of range, no warning is given. [2]_, [3]_, and
[4]_ are in complete agreement about this formulation.
Examples
--------
>>> Nu_vertical_cylinder_Touloukian_Morgan(.7, 2E10)
249.72879961097854
References
----------
.. [1] Touloukian, Y. S, George A Hawkins, and Max Jakob. Heat Transfer by
Free Convection from Heated Vertical Surfaces to Liquids.
Trans. ASME 70, 13-18 (1948).
.. [2] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [3] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [4] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
if turbulent or (Ra > 4E10 and turbulent is None):
return 0.0674*(Gr*Pr**1.29)**(1/3.)
else:
return 0.726*Ra**0.25
def Nu_vertical_cylinder_McAdams_Weiss_Saunders(Pr, Gr, turbulent=None):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to the results of [1]_ and [2]_ correlated by
[3]_, as presented in [4]_, [5]_, and [6]_.
.. math::
Nu_H = 0.59 Ra_H^{0.25},\; 10^{4} < Ra < 10^{9}
.. math::
Nu_H = 0.13 Ra_H^{1/3.},\; 10^{9} < Ra < 10^{12}
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
turbulent : bool or None, optional
Whether or not to force the correlation to return the turbulent
result; will return the laminar regime if False; leave as None for
automatic selection
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Transition between ranges is not smooth. If outside of range, no warning is
given. For ranges under 10^4, a graph is provided, not included here.
Examples
--------
>>> Nu_vertical_cylinder_McAdams_Weiss_Saunders(.7, 2E10)
313.31849434277973
References
----------
.. [1] Weise, Rudolf. "Warmeubergang durch freie Konvektion an
quadratischen Platten." Forschung auf dem Gebiet des Ingenieurwesens
A 6, no. 6 (November 1935): 281-92. doi:10.1007/BF02592565.
.. [2] Saunders, O. A. "The Effect of Pressure Upon Natural Convection in
Air." Proceedings of the Royal Society of London A: Mathematical,
Physical and Engineering Sciences 157, no. 891 (November 2, 1936):
278-91. doi:10.1098/rspa.1936.0194.
.. [3] McAdams, William Henry. Heat Transmission. 3E. Malabar, Fla:
Krieger Pub Co, 1985.
.. [4] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [5] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [6] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
if turbulent or (Ra > 1E9 and turbulent is None):
return 0.13*Ra**(1/3.)
else:
return 0.59*Ra**0.25
def Nu_vertical_cylinder_Kreith_Eckert(Pr, Gr, turbulent=None):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to the results of [1]_ correlated by
[2]_, also as presented in [3]_, [4]_, and [5]_.
.. math::
Nu_H = 0.555 Ra_H^{0.25},\; 10^{5} < Ra < 10^{9}
.. math::
Nu_H = 0.021 Ra_H^{0.4},\; 10^{9} < Ra < 10^{12}
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
turbulent : bool or None, optional
Whether or not to force the correlation to return the turbulent
result; will return the laminar regime if False; leave as None for
automatic selection
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Transition between ranges is not smooth. If outside of range, no warning is
given.
Examples
--------
>>> Nu_vertical_cylinder_Kreith_Eckert(.7, 2E10)
240.25393473033196
References
----------
.. [1] Eckert, E. R. G., Thomas W. Jackson, and United States. Analysis of
Turbulent Free-Convection Boundary Layer on Flat Plate. National
Advisory Committee for Aeronautics, no. 2207. Washington, D.C.: National
Advisoty Committee for Aeronautics, 1950.
.. [2] Kreith, Frank, Raj Manglik, and Mark Bohn. Principles of Heat
Transfer. Cengage, 2010.
.. [3] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [4] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [5] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
if turbulent or (Ra > 1E9 and turbulent is None):
return 0.021*Ra**0.4
else:
return 0.555*Ra**0.25
def Nu_vertical_cylinder_Hanesian_Kalish_Morgan(Pr, Gr):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to the results of [1]_ correlated by
[2]_, also as presented in [3]_ and [4]_.
.. math::
Nu_H = 0.48 Ra_H^{0.23},\; 10^{6} < Ra < 10^{8}
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
For air and fluoro-carbons. If outside of range, no warning is given.
Laminar range only!
Examples
--------
>>> Nu_vertical_cylinder_Hanesian_Kalish_Morgan(.7, 1E7)
18.014150492696604
References
----------
.. [1] Hanesian, D. and Kalish, R. "Heat Transfer by Natural Convection
with Fluorocarbon Gases." IEEE Transactions on Parts, Materials and
Packaging 6, no. 4 (December 1970): 147-148.
doi:10.1109/TPMP.1970.1136270.
.. [2] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [3] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [4] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
return 0.48*Ra**0.23
### Vertical cylinders, more complex correlations
def Nu_vertical_cylinder_Al_Arabi_Khamis(Pr, Gr, L, D, turbulent=None):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to [1]_, also as presented in [2]_ and [3]_.
.. math::
Nu_H = 2.9Ra_H^{0.25}/Gr_D^{1/12},\; 9.88 \times 10^7 \le Ra_H \le 2.7\times10^{9}
.. math::
Nu_H = 0.47 Ra_H^{0.333}/Gr_D^{1/12},\; 2.7 \times 10^9 \le Ra_H \le 2.95\times10^{10}
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number with respect to cylinder height [-]
L : float
Length of vertical cylinder, [m]
D : float
Diameter of cylinder, [m]
turbulent : bool or None, optional
Whether or not to force the correlation to return the turbulent
result; will return the laminar regime if False; leave as None for
automatic selection, [-]
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
For air. Local Nusselt number results also given in [1]_. D from 12.75 to
51 mm; H from 300 to 2000 mm. Temperature kept constant by steam condensing.
If outside of range, no warning is given. Applies for range of:
.. math::
1.08 \times 10^4 \le Gr_D \le 6.9 \times 10^5
Examples
--------
>>> Nu_vertical_cylinder_Al_Arabi_Khamis(.71, 2E10, 10, 1)
280.39793209114765
References
----------
.. [1] Al-Arabi, M., and M. Khamis. "Natural Convection Heat Transfer from
Inclined Cylinders." International Journal of Heat and Mass Transfer 25,
no. 1 (January 1982): 3-15. doi:10.1016/0017-9310(82)90229-0.
.. [2] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [3] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Gr_D = Gr/L**3*D**3
Ra = Pr*Gr
if turbulent or (Ra > 2.6E9 and turbulent is None):
return 0.47*Ra**(1/3.)*Gr_D**(-1/12.)
else:
return 2.9*Ra**0.25*Gr_D**(-1/12.)
def Nu_vertical_cylinder_Popiel_Churchill(Pr, Gr, L, D):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to [1]_, also presented in [2]_.
.. math::
\frac{Nu}{Nu_{L,fp}} = 1 + B\left[32^{0.5}Gr_L^{-0.25}\frac{L}{D}\right]^C
.. math::
B = 0.0571322 + 0.20305 Pr^{-0.43}
.. math::
C = 0.9165 - 0.0043Pr^{0.5} + 0.01333\ln Pr + 0.0004809/Pr
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number with respect to cylinder height [-]
L : float
Length of vertical cylinder, [m]
D : float
Diameter of cylinder, [m]
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
For 0.01 < Pr < 100. Requires a vertical flat plate correlation.
Both [2], [3] present a power of 2 instead of 0.5 on the 32 in the equation,
but the original has the correct form.
Examples
--------
>>> Nu_vertical_cylinder_Popiel_Churchill(0.7, 1E10, 2.5, 1)
228.89790055149896
References
----------
.. [1] Popiel, C. O., J. Wojtkowiak, and K. Bober. "Laminar Free Convective
Heat Transfer from Isothermal Vertical Slender Cylinder." Experimental
Thermal and Fluid Science 32, no. 2 (November 2007): 607-613.
doi:10.1016/j.expthermflusci.2007.07.003.
.. [2] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [3] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
B = 0.0571322 + 0.20305*Pr**-0.43
C = 0.9165 - 0.0043*Pr**0.5 + 0.01333*log(Pr) + 0.0004809/Pr
Nu_fp = Nu_vertical_plate_Churchill(Pr, Gr)
return Nu_fp*(1 + B*(32**0.5*Gr**-0.25*L/D)**C)
# Nice Name : (function_call, does_turbulent, does_laminar, transition_Ra, is_only_Pr_Gr)
vertical_cylinder_correlations = {
'Churchill Vertical Plate': (Nu_vertical_plate_Churchill, True, True, None, True),
'Griffiths, Davis, & Morgan': (Nu_vertical_cylinder_Griffiths_Davis_Morgan, True, True, 1.00E+009, True),
'Jakob, Linke, & Morgan': (Nu_vertical_cylinder_Jakob_Linke_Morgan, True, True, 1.00E+008, True),
'Carne & Morgan': (Nu_vertical_cylinder_Carne_Morgan, True, True, 2.00E+008, True),
'Eigenson & Morgan': (Nu_vertical_cylinder_Eigenson_Morgan, True, True, 6.90E+011, True),
'Touloukian & Morgan': (Nu_vertical_cylinder_Touloukian_Morgan, True, True, 4.00E+010, True),
'McAdams, Weiss & Saunders': (Nu_vertical_cylinder_McAdams_Weiss_Saunders, True, True, 1.00E+009, True),
'Kreith & Eckert': (Nu_vertical_cylinder_Kreith_Eckert, True, True, 1.00E+009, True),
'Hanesian, Kalish & Morgan': (Nu_vertical_cylinder_Hanesian_Kalish_Morgan, False, True, 1.00E+008, True),
'Al-Arabi & Khamis': (Nu_vertical_cylinder_Al_Arabi_Khamis, True, True, 2.60E+009, False),
'Popiel & Churchill': (Nu_vertical_cylinder_Popiel_Churchill, False, True, 1.00E+009, False),
}
def Nu_vertical_cylinder_methods(Pr, Gr, L=None, D=None, check_ranges=True):
r'''This function returns a list of correlation names for free convetion
to a vertical cylinder.
The functions returned are 'Popiel & Churchill' for fully defined geometries,
and 'McAdams, Weiss & Saunders' otherwise.
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number with respect to cylinder height [-]
L : float, optional
Length of vertical cylinder, [m]
D : float, optional
Diameter of cylinder, [m]
check_ranges : bool, optional
Whether or not to return only correlations suitable for the provided
data, [-]
Returns
-------
methods : list[str]
List of methods which can be used to calculate `Nu` with the given
inputs
Examples
--------
>>> Nu_vertical_cylinder_methods(0.72, 1E7)[0]
'McAdams, Weiss & Saunders'
'''
if L is None or D is None:
return ['McAdams, Weiss & Saunders', 'Churchill Vertical Plate',
'Griffiths, Davis, & Morgan', 'Jakob, Linke, & Morgan', 'Carne & Morgan',
'Eigenson & Morgan', 'Touloukian & Morgan', 'Kreith & Eckert', 'Hanesian, Kalish & Morgan']
else:
return ['Popiel & Churchill', 'Churchill Vertical Plate', 'Griffiths, Davis, & Morgan',
'Jakob, Linke, & Morgan', 'Carne & Morgan', 'Eigenson & Morgan', 'Touloukian & Morgan',
'McAdams, Weiss & Saunders', 'Kreith & Eckert', 'Hanesian, Kalish & Morgan',
'Al-Arabi & Khamis']
def Nu_vertical_cylinder(Pr, Gr, L=None, D=None, Method=None):
r'''This function handles choosing which vertical cylinder free convection
correlation is used. Generally this is used by a helper class, but can be
used directly. Will automatically select the correlation to use if none is
provided; returns None if insufficient information is provided.
Preferred functions are 'Popiel & Churchill' for fully defined geometries,
and 'McAdams, Weiss & Saunders' otherwise.
Examples
--------
>>> Nu_vertical_cylinder(0.72, 1E7)
30.562236756513943
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number with respect to cylinder height [-]
L : float, optional
Length of vertical cylinder, [m]
D : float, optional
Diameter of cylinder, [m]
Returns
-------
Nu : float
Nusselt number, [-]
Other Parameters
----------------
Method : string, optional
A string of the function name to use, as in the dictionary
vertical_cylinder_correlations
'''
if Method is None:
if L is None or D is None:
Method2 = 'McAdams, Weiss & Saunders'
else:
Method2 = 'Popiel & Churchill'
else:
Method2 = Method
if Method2 == 'Churchill Vertical Plate':
return Nu_vertical_plate_Churchill(Pr=Pr, Gr=Gr)
elif Method2 == 'Griffiths, Davis, & Morgan':
return Nu_vertical_cylinder_Griffiths_Davis_Morgan(Pr=Pr, Gr=Gr)
elif Method2 == 'Jakob, Linke, & Morgan':
return Nu_vertical_cylinder_Jakob_Linke_Morgan(Pr=Pr, Gr=Gr)
elif Method2 == 'Carne & Morgan':
return Nu_vertical_cylinder_Carne_Morgan(Pr=Pr, Gr=Gr)
elif Method2 == 'Eigenson & Morgan':
return Nu_vertical_cylinder_Eigenson_Morgan(Pr=Pr, Gr=Gr)
elif Method2 == 'Touloukian & Morgan':
return Nu_vertical_cylinder_Touloukian_Morgan(Pr=Pr, Gr=Gr)
elif Method2 == 'McAdams, Weiss & Saunders':
return Nu_vertical_cylinder_McAdams_Weiss_Saunders(Pr=Pr, Gr=Gr)
elif Method2 == 'Kreith & Eckert':
return Nu_vertical_cylinder_Kreith_Eckert(Pr=Pr, Gr=Gr)
elif Method2 == 'Hanesian, Kalish & Morgan':
return Nu_vertical_cylinder_Hanesian_Kalish_Morgan(Pr=Pr, Gr=Gr)
elif Method2 == 'Al-Arabi & Khamis':
return Nu_vertical_cylinder_Al_Arabi_Khamis(Pr=Pr, Gr=Gr, L=L, D=D)
elif Method2 == 'Popiel & Churchill':
return Nu_vertical_cylinder_Popiel_Churchill(Pr=Pr, Gr=Gr, L=L, D=D)
else:
raise ValueError("Correlation name not recognized; see the "
"documentation for the available options.")
#import matplotlib.pyplot as plt
#import numpy as np
##L, D = 1.5, 0.1
#Pr, Gr = 0.72, 1E8
#methods = Nu_vertical_cylinder_methods(Pr, Gr)
#Grs = np.logspace(2, 12, 10000)
#
#for method in methods:
# Nus = [Nu_vertical_cylinder(Pr=Pr, Gr=i, Method=method) for i in Grs]
# plt.loglog(Grs, Nus, label=method)
#plt.legend()
#plt.show()
### Horizontal Cylinders
def Nu_horizontal_cylinder_Churchill_Chu(Pr, Gr):
r'''Calculates Nusselt number for natural convection around a horizontal
cylinder according to the Churchill-Chu [1]_ correlation, also presented in
[2]_. Cylinder must be isothermal; an alternate expression exists for
constant heat flux.
.. math::
Nu_{D}=\left[0.60+\frac{0.387Ra_{D}^{1/6}}
{[1+(0.559/Pr)^{9/16}]^{8/27}}\right]^2
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number with respect to cylinder diameter, [-]
Returns
-------
Nu : float
Nusselt number with respect to cylinder diameter, [-]
Notes
-----
Although transition from laminar to turbulent is discrete in reality, this
equation provides a smooth transition in value from laminar to turbulent.
Checked with the original source, which has its powers unsimplified but
is equivalent.
[1]_ recommends 1E-5 as the lower limit for Ra, but no upper limit. [2]_
suggests an upper limit of 1E12.
Examples
--------
From [2]_, Example 9.2, matches:
>>> Nu_horizontal_cylinder_Churchill_Chu(0.69, 2.63E9)
139.13493970073597
References
----------
.. [1] Churchill, Stuart W., and Humbert H. S. Chu. "Correlating Equations
for Laminar and Turbulent Free Convection from a Horizontal Cylinder."
International Journal of Heat and Mass Transfer 18, no. 9
(September 1975): 1049-53. doi:10.1016/0017-9310(75)90222-7.
.. [2] Bergman, Theodore L., Adrienne S. Lavine, Frank P. Incropera, and
David P. DeWitt. Introduction to Heat Transfer. 6E. Hoboken, NJ:
Wiley, 2011.
'''
Ra = Pr*Gr
return (0.6 + 0.387*Ra**(1/6.)/(1. + (0.559/Pr)**(9/16.))**(8/27.))**2
def Nu_horizontal_cylinder_Kuehn_Goldstein(Pr, Gr):
r'''Calculates Nusselt number for natural convection around a horizontal
cylinder according to the Kuehn-Goldstein [1]_ correlation, also shown in
[2]_. Cylinder must be isothermal.
.. math::
\frac{2}{Nu_D} = \ln\left[1 + \frac{2}{\left[\left\{0.518Ra_D^{0.25}
\left[1 + \left(\frac{0.559}{Pr}\right)^{3/5}\right]^{-5/12}
\right\}^{15} + (0.1Ra_D^{1/3})^{15}\right]^{1/15}}\right]
Parameters
----------
Pr : float
Prandtl number with respect to film temperature [-]
Gr : float
Grashof number with respect to cylinder diameter, [-]
Returns
-------
Nu : float
Nusselt number with respect to cylinder diameter, [-]
Notes
-----
[1]_ suggests this expression is valid for all cases except low-Pr fluids.
[2]_ suggests no restrictions.
Examples
--------
>>> Nu_horizontal_cylinder_Kuehn_Goldstein(0.69, 2.63E9)
122.99323525628186
References
----------
.. [1] Kuehn, T. H., and R. J. Goldstein. "Correlating Equations for
Natural Convection Heat Transfer between Horizontal Circular Cylinders."
International Journal of Heat and Mass Transfer 19, no. 10
(October 1976): 1127-34. doi:10.1016/0017-9310(76)90145-9
.. [2] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
return 2./log(1 + 2./((0.518*Ra**0.25*(1. + (0.559/Pr)**0.6)**(-5/12.))**15
+ (0.1*Ra**(1/3.))**15)**(1/15.))
def Nu_horizontal_cylinder_Morgan(Pr, Gr):
r'''Calculates Nusselt number for natural convection around a horizontal
cylinder according to the Morgan [1]_ correlations, a product of a very
large review of the literature. Sufficiently common as to be shown in [2]_.
Cylinder must be isothermal.
.. math::
Nu_D = C Ra_D^n
+----------+----------+-------+-------+
| Gr min | Gr max | C | n |
+==========+==========+=======+=======+
| 10E-10 | 10E-2 | 0.675 | 0.058 |
+----------+----------+-------+-------+
| 10E-2 | 10E2 | 1.02 | 0.148 |
+----------+----------+-------+-------+
| 10E2 | 10E4 | 0.850 | 0.188 |
+----------+----------+-------+-------+
| 10E4 | 10E7 | 0.480 | 0.250 |
+----------+----------+-------+-------+
| 10E7 | 10E12 | 0.125 | 0.333 |
+----------+----------+-------+-------+
Parameters
----------
Pr : float
Prandtl number with respect to film temperature [-]
Gr : float
Grashof number with respect to cylinder diameter, [-]
Returns
-------
Nu : float
Nusselt number with respect to cylinder diameter, [-]
Notes
-----
Most comprehensive review with a new proposed equation to date.
Discontinuous among the jumps in range. Blindly runs outside if upper and
lower limits without warning.
Examples
--------
>>> Nu_horizontal_cylinder_Morgan(0.69, 2.63E9)
151.3881997228419
References
----------
.. [1] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [2] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
if Ra < 1E-2:
C, n = 0.675, 0.058
elif Ra < 1E2:
C, n = 1.02, 0.148
elif Ra < 1E4:
C, n = 0.850, 0.188
elif Ra < 1E7:
C, n = 0.480, 0.250
else:
# up to 1E12
C, n = 0.125, 0.333
return C*Ra**n
horizontal_cylinder_correlations = {
'Churchill-Chu': (Nu_horizontal_cylinder_Churchill_Chu),
'Kuehn & Goldstein': (Nu_horizontal_cylinder_Kuehn_Goldstein),
'Morgan': (Nu_horizontal_cylinder_Morgan)
}
def Nu_horizontal_cylinder_methods(Pr, Gr, check_ranges=True):
r'''This function returns a list of correlation names for free convetion
to a horizontal cylinder.
Preferred functions are 'Morgan' when discontinuous results are acceptable
and 'Churchill-Chu' otherwise.
Parameters
----------
Pr : float
Prandtl number with respect to film temperature [-]
Gr : float
Grashof number with respect to cylinder diameter, [-]
check_ranges : bool, optional
Whether or not to return only correlations suitable for the provided
data, [-]
Returns
-------
methods : list[str]
List of methods which can be used to calculate `Nu` with the given
inputs
Examples
--------
>>> Nu_horizontal_cylinder_methods(0.72, 1E7)[0]
'Morgan'
'''
return ['Morgan', 'Churchill-Chu', 'Kuehn & Goldstein']
def Nu_horizontal_cylinder(Pr, Gr, Method=None):
r'''This function handles choosing which horizontal cylinder free convection
correlation is used. Generally this is used by a helper class, but can be
used directly. Will automatically select the correlation to use if none is
provided; returns None if insufficient information is provided.
Preferred functions are 'Morgan' when discontinuous results are acceptable
and 'Churchill-Chu' otherwise.
Parameters
----------
Pr : float
Prandtl number with respect to film temperature [-]
Gr : float
Grashof number with respect to cylinder diameter, [-]
Returns
-------
Nu : float
Nusselt number with respect to cylinder diameter, [-]
Other Parameters
----------------
Method : string, optional
A string of the function name to use, as in the dictionary
horizontal_cylinder_correlations
Notes
-----
All fluid properties should be evaluated at the film temperature, the
average between the outer surface temperature of the solid, and the fluid
temperature far away from the heat transfer interface - normally the same
as the temperature before any cooling or heating occurs.
.. math::
T_f = (T_{\text{surface}} + T_\infty)/2
Examples
--------
>>> Nu_horizontal_cylinder(0.72, 1E7)
24.864192615468973
'''
if Method is None:
Method2 = 'Morgan'
else:
Method2 = Method
if Method2 == 'Churchill-Chu':
return Nu_horizontal_cylinder_Churchill_Chu(Pr=Pr, Gr=Gr)
elif Method2 == 'Kuehn & Goldstein':
return Nu_horizontal_cylinder_Kuehn_Goldstein(Pr=Pr, Gr=Gr)
elif Method2 == 'Morgan':
return Nu_horizontal_cylinder_Morgan(Pr=Pr, Gr=Gr)
else:
raise ValueError("Correlation name not recognized; see the "
"documentation for the available options.")
#import matplotlib.pyplot as plt
#import numpy as np
#Pr, Gr = 0.72, 1E8
#methods = Nu_horizontal_cylinder_methods(Pr, Gr)
#Grs = np.logspace(-2, 2.5, 10000)
#
#for method in methods:
# Nus = [Nu_horizontal_cylinder(Pr=Pr, Gr=i, Method=method) for i in Grs]
# plt.semilogx(Grs, Nus, label=method)
#plt.legend()
#plt.show()
def Nu_coil_Xin_Ebadian(Pr, Gr, horizontal=False):
r'''Calculates Nusselt number for natural convection around a vertical
or horizontal helical coil suspended in a fluid without
forced convection.
For horizontal cases:
.. math::
Nu_D = 0.318 Ra_D^{0.293},\; 5 \times {10}^{3} < Ra < 1 \times {10}^5
For vertical cases:
.. math::
Nu_D = 0.290 Ra_D^{0.293},\; 5 \times {10}^{3} < Ra < 1 \times {10}^5
Parameters
----------
Pr : float
Prandtl number calculated with the film temperature -
wall and temperature very far from the coil average, [-]
Gr : float
Grashof number calculated with the film temperature -
wall and temperature very far from the coil average,
and using the outer diameter of the coil [-]
horizontal : bool, optional
Whether the coil is horizontal or vertical, [-]
Returns
-------
Nu : float
Nusselt number using the outer diameter of the coil
and the film temperature, [-]
Notes
-----
This correlation is also reviewed in [2]_.
Examples
--------
>>> Nu_coil_Xin_Ebadian(0.7, 2E4, horizontal=False)
4.755689726250451
>>> Nu_coil_Xin_Ebadian(0.7, 2E4, horizontal=True)
5.2148597687849785
References
----------
.. [1] Xin, R. C., and M. A. Ebadian. "Natural Convection Heat Transfer
from Helicoidal Pipes." Journal of Thermophysics and Heat Transfer 10,
no. 2 (1996): 297-302.
.. [2] Prabhanjan, Devanahalli G., Timothy J. Rennie, and G. S. Vijaya
Raghavan. "Natural Convection Heat Transfer from Helical Coiled Tubes."
International Journal of Thermal Sciences 43, no. 4 (April 1, 2004):
359-65.
'''
Ra = Pr*Gr
if horizontal:
return 0.318*Ra**0.293
else:
return 0.290*Ra**0.293 | mit |
jayflo/scikit-learn | sklearn/cluster/tests/test_mean_shift.py | 121 | 3429 | """
Testing for mean shift clustering methods
"""
import numpy as np
import warnings
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
# Test estimate_bandwidth
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
# Test MeanShift algorithm
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_meanshift_predict():
# Test MeanShift.predict
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_meanshift_all_orphans():
# init away from the data, crash with a sensible warning
ms = MeanShift(bandwidth=0.1, seeds=[[-9, -9], [-10, -10]])
msg = "No point was within bandwidth=0.1"
assert_raise_message(ValueError, msg, ms.fit, X,)
def test_unfitted():
# Non-regression: before fit, there should be not fitted attributes.
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
# Test the bin seeding technique which can be used in the mean shift
# algorithm
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.4, 1.4], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
# we bail and use the whole data here.
with warnings.catch_warnings(record=True):
test_bins = get_bin_seeds(X, 0.01, 1)
assert_array_equal(test_bins, X)
# tight clusters around [0, 0] and [1, 1], only get two bins
X, _ = make_blobs(n_samples=100, n_features=2, centers=[[0, 0], [1, 1]],
cluster_std=0.1, random_state=0)
test_bins = get_bin_seeds(X, 1)
assert_array_equal(test_bins, [[0, 0], [1, 1]])
| bsd-3-clause |
alexpeattie/wethepeopletoolkit | wethepeopletoolkit/clusterer.py | 1 | 2828 | import pandas
import numpy as np
import click
from bitstring import BitArray
from base58 import b58encode_int, b58decode_int
class Clusterer:
def __init__(self):
pass
def cluster(self, n, state_processor, pca = False, model_type = 'kmeans', z_score_exclude = 0.0, seed = None, quiet = False):
from sklearn.cluster import FeatureAgglomeration, KMeans, SpectralClustering
from scipy import stats
model_types = {
'feature-agglomeration': FeatureAgglomeration,
'kmeans': KMeans,
'spectral': SpectralClustering,
}
states = state_processor.states(two_d = pca)
excluded_states, labels = [], []
if z_score_exclude > 0:
if not model_type == 'kmeans':
raise click.UsageError("--z-score-exclude can only be used when --model-type is 'kmeans'")
states_2d = state_processor.states(two_d = True)
excluded_states = states[-(np.abs(stats.zscore(states_2d)) < z_score_exclude).all(axis=1)]
states = states[(np.abs(stats.zscore(states_2d)) < z_score_exclude).all(axis=1)]
seed = seed or np.random.randint(0, 10 ** 6)
np.random.seed(seed)
if not quiet:
click.echo("Clustering with seed %d..." % seed)
self.model = model_types[model_type](n_clusters = n)
self.data = states.as_matrix()
self.model.fit(self.data)
labels = self.model.labels_
self.results = pandas.DataFrame([states.index, self.model.labels_]).T.sort_values(by=0)
if any(excluded_states):
excluded_results = pandas.DataFrame([excluded_states.index, self.model.predict(excluded_states)]).T
self.results = pandas.DataFrame(np.concatenate([self.results, excluded_results]))
def cluster_ids(self):
labels = self.results[1]
sorted_labels = sorted(labels.unique())
ids = map(lambda l: b58encode_int(BitArray((labels == l).astype(int).tolist()).uint), sorted_labels)
return zip(sorted_labels, ids)
def cluster_id_to_states(self, cluster_id):
states = np.array(['AL', 'AK', 'AZ', 'AR', 'CA', 'CO', 'CT', 'DE', 'FL', 'GA', 'HI', 'ID', 'IL', 'IN', 'IA', 'KS', 'KY', 'LA', 'ME', 'MD', 'MA', 'MI', 'MN', 'MS', 'MO', 'MT', 'NE', 'NV', 'NH', 'NJ', 'NM', 'NY', 'NC', 'ND', 'OH', 'OK', 'OR', 'PA', 'RI', 'SC', 'SD', 'TN', 'TX', 'UT', 'VT', 'VA', 'WA', 'WV', 'WI', 'WY'])
return states[list(BitArray(uint = b58decode_int(cluster_id), length = 50))]
def evaluate(self, metric, distance = None):
from sklearn.metrics import silhouette_score, calinski_harabaz_score
if metric == 'silhouette':
return silhouette_score(self.data, self.model.labels_, metric = distance)
if metric == 'calinski_harabaz':
return calinski_harabaz_score(self.data, self.model.labels_)
if metric == 'inertia':
return self.model.inertia_
def results_dict(self):
return self.results.set_index(0)[1].to_dict() | mit |
pbvarga1/qimage2ndarray | qimage2ndarray/__init__.py | 1 | 14973 | import sys as _sys
import numpy as _np
from .dynqt import QtGui as _qt
from .dynqt import qt as _qt_driver
if _qt_driver.name() == 'PythonQt':
from .qimageview import QImage2ndarray as _temp
_qimageview = _temp.qimageview
else:
from .qimageview_python import qimageview as _qimageview
__version__ = "1.5"
if _sys.byteorder == 'little':
_bgra = (0, 1, 2, 3)
else:
_bgra = (3, 2, 1, 0)
_bgra_fields = {'b': (_np.uint8, _bgra[0], 'blue'),
'g': (_np.uint8, _bgra[1], 'green'),
'r': (_np.uint8, _bgra[2], 'red'),
'a': (_np.uint8, _bgra[3], 'alpha')}
bgra_dtype = _np.dtype(_bgra_fields)
"""Complex dtype offering the named fields 'r','g','b', and 'a' and
corresponding long names, conforming to QImage_'s 32-bit memory layout."""
try:
_basestring = basestring
except NameError:
# 'basestring' undefined, must be Python 3
_basestring = str
def _qimage_or_filename_view(qimage):
if isinstance(qimage, _basestring):
qimage = _qt.QImage(qimage)
return _qimageview(qimage)
def raw_view(qimage):
"""Returns raw 2D view of the given QImage_'s memory. The result
will be a 2-dimensional numpy.ndarray with an appropriately sized
integral dtype. (This function is not intented to be used
directly, but used internally by the other -- more convenient --
view creation functions.)
:param qimage: image whose memory shall be accessed via NumPy
:type qimage: QImage_
:rtype: numpy.ndarray_ with shape (height, width)"""
return _qimage_or_filename_view(qimage)
def byte_view(qimage, byteorder = 'little'):
"""Returns raw 3D view of the given QImage_'s memory. This will
always be a 3-dimensional numpy.ndarray with dtype numpy.uint8.
Note that for 32-bit images, the last dimension will be in the
[B,G,R,A] order (if little endian) due to QImage_'s memory layout
(the alpha channel will be present for Format_RGB32 images, too).
For 8-bit (indexed) images, the array will still be 3-dimensional,
i.e. shape will be (height, width, 1).
The order of channels in the last axis depends on the `byteorder`,
which defaults to 'little', i.e. BGRA order. You may set the
argument `byteorder` to 'big' to get ARGB, or use None which means
sys.byteorder here, i.e. return native order for the machine the
code is running on.
For your convenience, `qimage` may also be a filename, see
`Loading and Saving Images`_ in the documentation.
:param qimage: image whose memory shall be accessed via NumPy
:type qimage: QImage_
:param byteorder: specify order of channels in last axis
:rtype: numpy.ndarray_ with shape (height, width, 1 or 4) and dtype uint8"""
raw = _qimage_or_filename_view(qimage)
result = raw.view(_np.uint8).reshape(raw.shape + (-1, ))
if byteorder and byteorder != _sys.byteorder:
result = result[...,::-1]
return result
def rgb_view(qimage, byteorder = 'big'):
"""Returns RGB view of a given 32-bit color QImage_'s memory.
Similarly to byte_view(), the result is a 3D numpy.uint8 array,
but reduced to the rgb dimensions (without alpha), and reordered
(using negative strides in the last dimension) to have the usual
[R,G,B] order. The image must have 32 bit pixel size, i.e. be
RGB32, ARGB32, or ARGB32_Premultiplied. (Note that in the latter
case, the values are of course premultiplied with alpha.)
The order of channels in the last axis depends on the `byteorder`,
which defaults to 'big', i.e. RGB order. You may set the argument
`byteorder` to 'little' to get BGR, or use None which means
sys.byteorder here, i.e. return native order for the machine the
code is running on.
For your convenience, `qimage` may also be a filename, see
`Loading and Saving Images`_ in the documentation.
:param qimage: image whose memory shall be accessed via NumPy
:type qimage: QImage_ with 32-bit pixel type
:param byteorder: specify order of channels in last axis
:rtype: numpy.ndarray_ with shape (height, width, 3) and dtype uint8"""
if byteorder is None:
byteorder = _sys.byteorder
bytes = byte_view(qimage, byteorder)
if bytes.shape[2] != 4:
raise ValueError("For rgb_view, the image must have 32 bit pixel size (use RGB32, ARGB32, or ARGB32_Premultiplied)")
if byteorder == 'little':
return bytes[...,:3] # strip A off BGRA
else:
return bytes[...,1:] # strip A off ARGB
def alpha_view(qimage):
"""Returns alpha view of a given 32-bit color QImage_'s memory.
The result is a 2D numpy.uint8 array, equivalent to
byte_view(qimage)[...,3]. The image must have 32 bit pixel size,
i.e. be RGB32, ARGB32, or ARGB32_Premultiplied. Note that it is
not enforced that the given qimage has a format that actually
*uses* the alpha channel -- for Format_RGB32, the alpha channel
usually contains 255 everywhere.
For your convenience, `qimage` may also be a filename, see
`Loading and Saving Images`_ in the documentation.
:param qimage: image whose memory shall be accessed via NumPy
:type qimage: QImage_ with 32-bit pixel type
:rtype: numpy.ndarray_ with shape (height, width) and dtype uint8"""
bytes = byte_view(qimage, byteorder = None)
if bytes.shape[2] != 4:
raise ValueError("For alpha_view, the image must have 32 bit pixel size (use RGB32, ARGB32, or ARGB32_Premultiplied)")
return bytes[...,_bgra[3]]
def recarray_view(qimage):
"""Returns recarray_ view of a given 32-bit color QImage_'s
memory.
The result is a 2D array with a complex record dtype, offering the
named fields 'r','g','b', and 'a' and corresponding long names.
Thus, each color components can be accessed either via string
indexing or via attribute lookup (through numpy.recarray_):
For your convenience, `qimage` may also be a filename, see
`Loading and Saving Images`_ in the documentation.
>>> from PyQt4.QtGui import QImage, qRgb
>>> qimg = QImage(320, 240, QImage.Format_ARGB32)
>>> qimg.fill(qRgb(12,34,56))
>>>
>>> import qimage2ndarray
>>> v = qimage2ndarray.recarray_view(qimg)
>>>
>>> red = v["r"]
>>> red[10,10]
12
>>> pixel = v[10,10]
>>> pixel["r"]
12
>>> (v.g == v["g"]).all()
True
>>> (v.alpha == 255).all()
True
:param qimage: image whose memory shall be accessed via NumPy
:type qimage: QImage_ with 32-bit pixel type
:rtype: numpy.ndarray_ with shape (height, width) and dtype :data:`bgra_dtype`"""
raw = _qimage_or_filename_view(qimage)
if raw.itemsize != 4:
raise ValueError("For rgb_view, the image must have 32 bit pixel size (use RGB32, ARGB32, or ARGB32_Premultiplied)")
return raw.view(bgra_dtype, _np.recarray)
def _normalize255(array, normalize, clip = (0, 255)):
if normalize:
if normalize is True:
normalize = array.min(), array.max()
if clip == (0, 255):
clip = None
elif _np.isscalar(normalize):
normalize = (0, normalize)
nmin, nmax = normalize
if nmin:
array = array - nmin
if nmax != nmin:
scale = 255. / (nmax - nmin)
if scale != 1.0:
array = array * scale
if clip:
low, high = clip
_np.clip(array, low, high, array)
return array
def gray2qimage(gray, normalize = False):
"""Convert the 2D numpy array `gray` into a 8-bit, indexed QImage_
with a gray colormap. The first dimension represents the vertical
image axis.
The parameter `normalize` can be used to normalize an image's
value range to 0..255:
`normalize` = (nmin, nmax):
scale & clip image values from nmin..nmax to 0..255
`normalize` = nmax:
lets nmin default to zero, i.e. scale & clip the range 0..nmax
to 0..255
`normalize` = True:
scale image values to 0..255 (same as passing (gray.min(),
gray.max()))
If the source array `gray` contains masked values, the result will
have only 255 shades of gray, and one color map entry will be used
to make the corresponding pixels transparent.
A full alpha channel cannot be supported with indexed images;
instead, use `array2qimage` to convert into a 32-bit QImage.
:param gray: image data which should be converted (copied) into a QImage_
:type gray: 2D or 3D numpy.ndarray_ or `numpy.ma.array <masked arrays>`_
:param normalize: normalization parameter (see above, default: no value changing)
:type normalize: bool, scalar, or pair
:rtype: QImage_ with RGB32 or ARGB32 format"""
if _np.ndim(gray) != 2:
raise ValueError("gray2QImage can only convert 2D arrays" +
" (try using array2qimage)" if _np.ndim(gray) == 3 else "")
h, w = gray.shape
result = _qt.QImage(w, h, _qt.QImage.Format_Indexed8)
if not _np.ma.is_masked(gray):
for i in range(256):
result.setColor(i, _qt.qRgb(i,i,i))
_qimageview(result)[:] = _normalize255(gray, normalize)
else:
# map gray value 1 to gray value 0, in order to make room for
# transparent colormap entry:
result.setColor(0, _qt.qRgb(0,0,0))
for i in range(2, 256):
result.setColor(i-1, _qt.qRgb(i,i,i))
_qimageview(result)[:] = _normalize255(gray, normalize, clip = (1, 255)) - 1
result.setColor(255, 0)
_qimageview(result)[gray.mask] = 255
return result
def array2qimage(array, normalize = False):
"""Convert a 2D or 3D numpy array into a 32-bit QImage_. The
first dimension represents the vertical image axis; the optional
third dimension is supposed to contain 1-4 channels:
========= ===================
#channels interpretation
========= ===================
1 scalar/gray
2 scalar/gray + alpha
3 RGB
4 RGB + alpha
========= ===================
Scalar data will be converted into corresponding gray RGB triples;
if you want to convert to an (indexed) 8-bit image instead, use
`gray2qimage` (which cannot support an alpha channel though).
The parameter `normalize` can be used to normalize an image's
value range to 0..255:
`normalize` = (nmin, nmax):
scale & clip image values from nmin..nmax to 0..255
`normalize` = nmax:
lets nmin default to zero, i.e. scale & clip the range 0..nmax
to 0..255
`normalize` = True:
scale image values to 0..255 (same as passing (array.min(),
array.max()))
If `array` contains masked values, the corresponding pixels will
be transparent in the result. Thus, the result will be of
QImage.Format_ARGB32 if the input already contains an alpha
channel (i.e. has shape (H,W,4)) or if there are masked pixels,
and QImage.Format_RGB32 otherwise.
:param array: image data which should be converted (copied) into a QImage_
:type array: 2D or 3D numpy.ndarray_ or `numpy.ma.array <masked arrays>`_
:param normalize: normalization parameter (see above, default: no value changing)
:type normalize: bool, scalar, or pair
:rtype: QImage_ with RGB32 or ARGB32 format"""
if _np.ndim(array) == 2:
array = array[...,None]
elif _np.ndim(array) != 3:
raise ValueError("array2qimage can only convert 2D or 3D arrays (got %d dimensions)" % _np.ndim(array))
if array.shape[2] not in (1, 2, 3, 4):
raise ValueError("array2qimage expects the last dimension to contain exactly one (scalar/gray), two (gray+alpha), three (R,G,B), or four (R,G,B,A) channels")
h, w, channels = array.shape
hasAlpha = _np.ma.is_masked(array) or channels in (2, 4)
fmt = _qt.QImage.Format_ARGB32 if hasAlpha else _qt.QImage.Format_RGB32
result = _qt.QImage(w, h, fmt)
array = _normalize255(array, normalize)
if channels >= 3:
rgb_view(result)[:] = array[...,:3]
else:
rgb_view(result)[:] = array[...,:1] # scalar data
alpha = alpha_view(result)
if channels in (2, 4):
alpha[:] = array[...,-1]
else:
alpha[:] = 255
if _np.ma.is_masked(array):
alpha[:] *= _np.logical_not(_np.any(array.mask, axis = -1))
return result
def imread(filename, masked = False):
"""Convenience function that uses the QImage_ constructor to read an
image from the given file and return an `rgb_view` of the result.
This is intentionally similar to scipy.ndimage.imread (which uses
PIL), scipy.misc.imread, or matplotlib.pyplot.imread (using PIL
for non-PNGs).
For grayscale images, return 2D array (even if it comes from a 32-bit
representation; this is a consequence of the QImage API).
For images with an alpha channel, the resulting number of channels
will be 2 (grayscale+alpha) or 4 (RGB+alpha). Alternatively, one may
pass `masked = True' in order to get `numpy.ma.array <masked
arrays>`_ back. Note that only fully transparent pixels are masked
(and that masked arrays only support binary masks). The value of
`masked` is ignored when the loaded image has no alpha channel
(i.e., one would not get a masked array in that case).
This function has been added in version 1.3.
"""
qImage = _qt.QImage(filename)
isGray = qImage.isGrayscale()
if isGray and qImage.depth() == 8:
return byte_view(qImage)[...,0]
hasAlpha = qImage.hasAlphaChannel()
if hasAlpha:
targetFormat = _qt.QImage.Format_ARGB32
else:
targetFormat = _qt.QImage.Format_RGB32
if qImage.format() != targetFormat:
qImage = qImage.convertToFormat(targetFormat)
result = rgb_view(qImage)
if isGray:
result = result[...,0]
if hasAlpha:
if masked:
mask = (alpha_view(qImage) == 0)
if _np.ndim(result) == 3:
mask = _np.repeat(mask[...,None], 3, axis = 2)
result = _np.ma.masked_array(result, mask)
else:
result = _np.dstack((result, alpha_view(qImage)))
return result
def imsave(filename, image, normalize = False, format = None, quality = -1):
"""Convenience function that uses QImage.save to save an image to the
given file. This is intentionally similar to scipy.misc.imsave.
However, it supports different optional arguments:
:param normalize: see :func:`array2qimage` (which is used internally)
:param format: image filetype (e.g. 'PNG'), (default: check filename's suffix)
:param quality: see QImage.save (0 = small .. 100 = uncompressed, -1 = default compression)
:returns: boolean success, see QImage.save
This function has been added in version 1.4.
"""
qImage = array2qimage(image, normalize = normalize)
return qImage.save(filename, format, quality)
| bsd-3-clause |
CDSFinance/zipline | zipline/utils/factory.py | 6 | 11228 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Factory functions to prepare useful data.
"""
import pytz
import random
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from zipline.protocol import Event, DATASOURCE_TYPE
from zipline.sources import (SpecificEquityTrades,
DataFrameSource,
DataPanelSource)
from zipline.finance.trading import SimulationParameters, TradingEnvironment
from zipline.sources.test_source import create_trade
# For backwards compatibility
from zipline.data.loader import (load_from_yahoo,
load_bars_from_yahoo)
__all__ = ['load_from_yahoo', 'load_bars_from_yahoo']
def create_simulation_parameters(year=2006, start=None, end=None,
capital_base=float("1.0e5"),
num_days=None, load=None,
data_frequency='daily',
emission_rate='daily',
env=None):
"""Construct a complete environment with reasonable defaults"""
if env is None:
env = TradingEnvironment(load=load)
if start is None:
start = datetime(year, 1, 1, tzinfo=pytz.utc)
if end is None:
if num_days:
start_index = env.trading_days.searchsorted(
start)
end = env.trading_days[start_index + num_days - 1]
else:
end = datetime(year, 12, 31, tzinfo=pytz.utc)
sim_params = SimulationParameters(
period_start=start,
period_end=end,
capital_base=capital_base,
data_frequency=data_frequency,
emission_rate=emission_rate,
env=env,
)
return sim_params
def create_random_simulation_parameters():
env = TradingEnvironment()
treasury_curves = env.treasury_curves
for n in range(100):
random_index = random.randint(
0,
len(treasury_curves) - 1
)
start_dt = treasury_curves.index[random_index]
end_dt = start_dt + timedelta(days=365)
now = datetime.utcnow().replace(tzinfo=pytz.utc)
if end_dt <= now:
break
assert end_dt <= now, """
failed to find a suitable daterange after 100 attempts. please double
check treasury and benchmark data in findb, and re-run the test."""
sim_params = SimulationParameters(
period_start=start_dt,
period_end=end_dt,
env=env,
)
return sim_params, start_dt, end_dt
def get_next_trading_dt(current, interval, env):
next_dt = pd.Timestamp(current).tz_convert(env.exchange_tz)
while True:
# Convert timestamp to naive before adding day, otherwise the when
# stepping over EDT an hour is added.
next_dt = pd.Timestamp(next_dt.replace(tzinfo=None))
next_dt = next_dt + interval
next_dt = pd.Timestamp(next_dt, tz=env.exchange_tz)
next_dt_utc = next_dt.tz_convert('UTC')
if env.is_market_hours(next_dt_utc):
break
next_dt = next_dt_utc.tz_convert(env.exchange_tz)
return next_dt_utc
def create_trade_history(sid, prices, amounts, interval, sim_params, env,
source_id="test_factory"):
trades = []
current = sim_params.first_open
oneday = timedelta(days=1)
use_midnight = interval >= oneday
for price, amount in zip(prices, amounts):
if use_midnight:
trade_dt = current.replace(hour=0, minute=0)
else:
trade_dt = current
trade = create_trade(sid, price, amount, trade_dt, source_id)
trades.append(trade)
current = get_next_trading_dt(current, interval, env)
assert len(trades) == len(prices)
return trades
def create_dividend(sid, payment, declared_date, ex_date, pay_date):
div = Event({
'sid': sid,
'gross_amount': payment,
'net_amount': payment,
'payment_sid': None,
'ratio': None,
'declared_date': pd.tslib.normalize_date(declared_date),
'ex_date': pd.tslib.normalize_date(ex_date),
'pay_date': pd.tslib.normalize_date(pay_date),
'type': DATASOURCE_TYPE.DIVIDEND,
'source_id': 'MockDividendSource'
})
return div
def create_stock_dividend(sid, payment_sid, ratio, declared_date,
ex_date, pay_date):
return Event({
'sid': sid,
'payment_sid': payment_sid,
'ratio': ratio,
'net_amount': None,
'gross_amount': None,
'dt': pd.tslib.normalize_date(declared_date),
'ex_date': pd.tslib.normalize_date(ex_date),
'pay_date': pd.tslib.normalize_date(pay_date),
'type': DATASOURCE_TYPE.DIVIDEND,
'source_id': 'MockDividendSource'
})
def create_split(sid, ratio, date):
return Event({
'sid': sid,
'ratio': ratio,
'dt': date.replace(hour=0, minute=0, second=0, microsecond=0),
'type': DATASOURCE_TYPE.SPLIT,
'source_id': 'MockSplitSource'
})
def create_txn(sid, price, amount, datetime):
txn = Event({
'sid': sid,
'amount': amount,
'dt': datetime,
'price': price,
'type': DATASOURCE_TYPE.TRANSACTION,
'source_id': 'MockTransactionSource'
})
return txn
def create_commission(sid, value, datetime):
txn = Event({
'dt': datetime,
'type': DATASOURCE_TYPE.COMMISSION,
'cost': value,
'sid': sid,
'source_id': 'MockCommissionSource'
})
return txn
def create_txn_history(sid, priceList, amtList, interval, sim_params, env):
txns = []
current = sim_params.first_open
for price, amount in zip(priceList, amtList):
current = get_next_trading_dt(current, interval, env)
txns.append(create_txn(sid, price, amount, current))
current = current + interval
return txns
def create_returns_from_range(sim_params):
return pd.Series(index=sim_params.trading_days,
data=np.random.rand(len(sim_params.trading_days)))
def create_returns_from_list(returns, sim_params):
return pd.Series(index=sim_params.trading_days[:len(returns)],
data=returns)
def create_daily_trade_source(sids, sim_params, env, concurrent=False):
"""
creates trade_count trades for each sid in sids list.
first trade will be on sim_params.period_start, and daily
thereafter for each sid. Thus, two sids should result in two trades per
day.
"""
return create_trade_source(
sids,
timedelta(days=1),
sim_params,
env=env,
concurrent=concurrent,
)
def create_minutely_trade_source(sids, sim_params, env, concurrent=False):
"""
creates trade_count trades for each sid in sids list.
first trade will be on sim_params.period_start, and every minute
thereafter for each sid. Thus, two sids should result in two trades per
minute.
"""
return create_trade_source(
sids,
timedelta(minutes=1),
sim_params,
env=env,
concurrent=concurrent,
)
def create_trade_source(sids, trade_time_increment, sim_params, env,
concurrent=False):
# If the sim_params define an end that is during market hours, that will be
# used as the end of the data source
if env.is_market_hours(sim_params.period_end):
end = sim_params.period_end
# Otherwise, the last_close after the period_end is used as the end of the
# data source
else:
end = sim_params.last_close
args = tuple()
kwargs = {
'sids': sids,
'start': sim_params.first_open,
'end': end,
'delta': trade_time_increment,
'filter': sids,
'concurrent': concurrent,
'env': env,
}
source = SpecificEquityTrades(*args, **kwargs)
return source
def create_test_df_source(sim_params=None, env=None, bars='daily'):
if bars == 'daily':
freq = pd.datetools.BDay()
elif bars == 'minute':
freq = pd.datetools.Minute()
else:
raise ValueError('%s bars not understood.' % bars)
if sim_params and bars == 'daily':
index = sim_params.trading_days
else:
if env is None:
env = TradingEnvironment()
start = pd.datetime(1990, 1, 3, 0, 0, 0, 0, pytz.utc)
end = pd.datetime(1990, 1, 8, 0, 0, 0, 0, pytz.utc)
days = env.days_in_range(start, end)
if bars == 'daily':
index = days
if bars == 'minute':
index = pd.DatetimeIndex([], freq=freq)
for day in days:
day_index = env.market_minutes_for_day(day)
index = index.append(day_index)
x = np.arange(1, len(index) + 1)
df = pd.DataFrame(x, index=index, columns=[0])
return DataFrameSource(df), df
def create_test_panel_source(sim_params=None, env=None, source_type=None):
start = sim_params.first_open \
if sim_params else pd.datetime(1990, 1, 3, 0, 0, 0, 0, pytz.utc)
end = sim_params.last_close \
if sim_params else pd.datetime(1990, 1, 8, 0, 0, 0, 0, pytz.utc)
if env is None:
env = TradingEnvironment()
index = env.days_in_range(start, end)
price = np.arange(0, len(index))
volume = np.ones(len(index)) * 1000
arbitrary = np.ones(len(index))
df = pd.DataFrame({'price': price,
'volume': volume,
'arbitrary': arbitrary},
index=index)
if source_type:
source_types = np.full(len(index), source_type)
df['type'] = source_types
panel = pd.Panel.from_dict({0: df})
return DataPanelSource(panel), panel
def create_test_panel_ohlc_source(sim_params, env):
start = sim_params.first_open \
if sim_params else pd.datetime(1990, 1, 3, 0, 0, 0, 0, pytz.utc)
end = sim_params.last_close \
if sim_params else pd.datetime(1990, 1, 8, 0, 0, 0, 0, pytz.utc)
index = env.days_in_range(start, end)
price = np.arange(0, len(index)) + 100
high = price * 1.05
low = price * 0.95
open_ = price + .1 * (price % 2 - .5)
volume = np.ones(len(index)) * 1000
arbitrary = np.ones(len(index))
df = pd.DataFrame({'price': price,
'high': high,
'low': low,
'open': open_,
'volume': volume,
'arbitrary': arbitrary},
index=index)
panel = pd.Panel.from_dict({0: df})
return DataPanelSource(panel), panel
| apache-2.0 |
mortonjt/American-Gut | scripts/mod2_pcoa.py | 1 | 14487 | #!/usr/bin/env python
import os
import click
from matplotlib import use
use('Agg') # noqa
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from skbio import read, DistanceMatrix
from skbio.stats import isubsample
from skbio.stats.ordination import OrdinationResults
from collections import defaultdict
from collections import OrderedDict
ALPHA = 1.0
LINE_WIDTH = 0.3
LINE_WIDTH_WHITE = 2.0
LINE_WIDTH_BLACK = 1.0
@click.group()
def mod2_pcoa():
pass
@mod2_pcoa.command()
@click.option('--coords', required=True, type=click.Path(
resolve_path=True, readable=True, exists=True),
help='Coordinates file')
@click.option('--mapping_file', required=True, type=click.Path(
resolve_path=True, readable=True, exists=True),
help='Mapping file')
@click.option('--output', required=True, type=click.Path(exists=True,
writable=True, resolve_path=True), help='Output directory')
@click.option('--prefix', required=True, type=str, help='Output file prefix')
@click.option('--samples', required=False, type=str,
help='Comma separated list of samples to print')
def body_site(coords, mapping_file, output, prefix, samples):
"""Generates as many figures as samples in the coordinates file"""
o = read(coords, into=OrdinationResults)
# coordinates
c_df = pd.DataFrame(o.site, o.site_ids)
# mapping file
mf = pd.read_csv(mapping_file, '\t', converters=defaultdict(str),
index_col='#SampleID')
mf = mf.loc[o.site_ids]
if samples is None:
samples = mf.index
else:
samples = set(samples.split(',')).intersection(set(o.site_ids))
samples = mf.loc[samples].index
color_hmp_fecal = sns.color_palette('Paired', 12)[10] # light brown
color_agp_fecal = sns.color_palette('Paired', 12)[11] # dark brown
color_hmp_oral = sns.color_palette('Paired', 12)[0] # light blue
color_agp_oral = sns.color_palette('Paired', 12)[1] # dark blue
color_hmp_skin = sns.color_palette('Paired', 12)[2] # light green
color_agp_skin = sns.color_palette('Paired', 12)[3] # dark green
grp_colors = {'AGP-FECAL': color_agp_fecal,
'AGP-ORAL': color_agp_oral,
'AGP-SKIN': color_agp_skin,
'HMP-FECAL': color_hmp_fecal,
'GG-FECAL': color_hmp_fecal,
'PGP-FECAL': color_hmp_fecal,
'HMP-ORAL': color_hmp_oral,
'PGP-ORAL': color_hmp_oral,
'HMP-SKIN': color_hmp_skin,
'PGP-SKIN': color_hmp_skin}
for sample in samples:
# plot categories as 50 slices with random zorder
for grp, color in grp_colors.iteritems():
sub_coords = c_df[mf.TITLE_BODY_SITE == grp].values
for i in np.array_split(sub_coords, 50):
plt.scatter(i[:, 0], i[:, 1], color=color,
edgecolor=np.asarray(color)*0.6, lw=LINE_WIDTH,
alpha=ALPHA, zorder=np.random.rand())
# plot participant's dot
plt.scatter(c_df.loc[sample][0], c_df.loc[sample][1],
color=grp_colors[mf.loc[sample]['TITLE_BODY_SITE']],
s=270, edgecolor='w', zorder=1, lw=LINE_WIDTH_WHITE)
plt.scatter(c_df.loc[sample][0], c_df.loc[sample][1],
color=grp_colors[mf.loc[sample]['TITLE_BODY_SITE']],
s=250, edgecolor=np.asarray(
grp_colors[mf.loc[sample]['TITLE_BODY_SITE']])*0.6,
zorder=2, lw=LINE_WIDTH_BLACK)
plt.axis('off')
my_dpi = 72
figsize = (1000 / my_dpi, 1000 / my_dpi)
out_file = os.path.join(output, '.'.join([prefix, sample, 'pdf']))
plt.savefig(out_file, figsize=figsize, dpi=my_dpi)
plt.close()
@mod2_pcoa.command()
@click.option('--distmat', required=True, type=click.Path(resolve_path=True,
readable=True,
exists=True),
help='Input distance matrix to subsample nearest sample')
@click.option('--mapping_file', required=True, type=click.Path(
resolve_path=True, readable=True, exists=True),
help='Mapping file')
@click.option('--max', required=True, type=int,
help='Max number of samples per category value')
@click.option('--category', required=True, type=str,
help='The category to subsample in (likely COUNTRY)')
@click.option('--output', required=True, type=click.Path(exists=False,
writable=True, resolve_path=True), help='Output file')
def subsample_dm(distmat, mapping_file, max, category, output):
"""Subsample the distmat to max samples per category value"""
mf = pd.read_csv(mapping_file, '\t', converters=defaultdict(str),
index_col='#SampleID')
id_to_cat = dict(mf[category])
def bin_f(x):
return id_to_cat[x]
dm = read(distmat, into=DistanceMatrix)
dm = dm.filter([id for _, id in isubsample(dm.ids, max, bin_f=bin_f)])
dm.to_file(output)
@mod2_pcoa.command()
@click.option('--coords', required=True, type=click.Path(resolve_path=True,
readable=True, exists=True), help='Coordinates file')
@click.option('--mapping_file', required=True, type=click.Path(
resolve_path=True, readable=True, exists=True),
help='Mapping file')
@click.option('--output', required=True, type=click.Path(exists=True,
writable=True, resolve_path=True), help='Output directory')
@click.option('--prefix', required=True, type=str, help='Output file prefix')
@click.option('--samples', required=False, type=str,
help='Comma separated list of samples to print')
@click.option('--distmat', required=True, type=click.Path(resolve_path=True,
readable=True,
exists=True),
help=('Input distance matrix to find nearest sample (if not '
'present in the coordinates'))
def country(coords, mapping_file, output, prefix, samples, distmat):
"""Generates as many figures as samples in the coordinates file"""
o = read(coords, into=OrdinationResults)
o_id_lookup = set(o.site_ids)
dm = read(distmat, into=DistanceMatrix)
dm_id_lookup = {i: idx for idx, i in enumerate(dm.ids)}
coord_samples_in_dm = {idx for idx, i in enumerate(dm.ids)
if i in o_id_lookup}
# we'll be computing min values, so we need to avoid catching the diagonal
np.fill_diagonal(dm._data, np.inf)
x, y = o.site[:, 0], o.site[:, 1]
# coordinates
c_df = pd.DataFrame(o.site, o.site_ids)
# mapping file
mf = pd.read_csv(mapping_file, '\t', converters=defaultdict(str),
index_col='#SampleID')
# mf = mf.loc[o.site_ids]
if samples is None:
samples = dm.ids[:]
else:
samples = set(samples.split(',')).intersection(set(dm.ids))
samples = mf.loc[samples].index
color_Venezuela = sns.color_palette('Paired', 12)[10]
color_Malawi = sns.color_palette('Paired', 12)[1]
color_Western = sns.color_palette('Paired', 12)[4]
color_Highlight = sns.color_palette('Paired', 12)[5]
color_no_data = (0.5, 0.5, 0.5)
grp_colors = OrderedDict()
grp_colors['no_data'] = color_no_data
grp_colors['Australia'] = color_Western
grp_colors['Belgium'] = color_Western
grp_colors['Canada'] = color_Western
grp_colors['China'] = color_Western
grp_colors['Finland'] = color_Western
grp_colors['France'] = color_Western
grp_colors['Germany'] = color_Western
grp_colors['Great Britain'] = color_Western
grp_colors['Ireland'] = color_Western
grp_colors['Japan'] = color_Western
grp_colors['Netherlands'] = color_Western
grp_colors['New Zealand'] = color_Western
grp_colors['Norway'] = color_Western
grp_colors['Scotland'] = color_Western
grp_colors['Spain'] = color_Western
grp_colors['Switzerland'] = color_Western
grp_colors['Thailand'] = color_Western
grp_colors['United Arab Emirates'] = color_Western
grp_colors['United Kingdom'] = color_Western
grp_colors['United States of America'] = color_Western
grp_colors['Malawi'] = color_Malawi
grp_colors['Venezuela'] = color_Venezuela
for sample_to_plot in samples:
if sample_to_plot in o_id_lookup:
sample = sample_to_plot
else:
# find the closest sample in the distance matrix that is in the
# coordinates data
sample = None
for i in dm[dm_id_lookup[sample_to_plot]].argsort():
if i in coord_samples_in_dm:
sample = dm.ids[i]
break
# this should not ever happen
if sample is None:
raise ValueError("Unable to find a similar sample?")
# countour plot superimposed
sns.kdeplot(x, y, cmap='bone')
sns.set_context(rc={"lines.linewidth": 0.75})
# change particapant's country's color to color_Highlight unless
# country is Venezuela or Malawi
if (mf.loc[sample_to_plot]['COUNTRY'] != 'Malawi') & (
mf.loc[sample_to_plot]['COUNTRY'] != 'Venezuela'):
grp_colors[mf.loc[sample_to_plot]['COUNTRY']] = color_Highlight
# plot each country except participant's according to colors above
for grp, color in grp_colors.iteritems():
if grp == mf.loc[sample_to_plot]['COUNTRY']:
continue
sub_coords = c_df[mf.COUNTRY == grp]
plt.scatter(sub_coords[0], sub_coords[1], color=color,
edgecolor=np.asarray(color)*0.6, lw=LINE_WIDTH,
alpha=ALPHA)
# now plot participant's country
grp = mf.loc[sample_to_plot]['COUNTRY']
color = grp_colors[grp]
sub_coords = c_df[mf.COUNTRY == grp]
plt.scatter(sub_coords[0], sub_coords[1], color=color,
edgecolor=np.asarray(color)*0.6, lw=LINE_WIDTH,
alpha=ALPHA)
# plot participant's dot
plt.scatter(c_df.loc[sample][0], c_df.loc[sample][1],
color=grp_colors[mf.loc[sample_to_plot]['COUNTRY']],
s=270, edgecolor='w', zorder=1, lw=LINE_WIDTH_WHITE)
plt.scatter(c_df.loc[sample][0], c_df.loc[sample][1],
color=grp_colors[mf.loc[sample_to_plot]['COUNTRY']],
s=250, edgecolor=np.asarray(grp_colors[
mf.loc[sample_to_plot]['COUNTRY']])*0.6,
zorder=2, lw=LINE_WIDTH_BLACK)
# reset particapant's country's color to color_Western unless country
# is Venezuela or Malawi
if (mf.loc[sample_to_plot]['COUNTRY'] != 'Malawi') & (
mf.loc[sample_to_plot]['COUNTRY'] != 'Venezuela'):
grp_colors[mf.loc[sample_to_plot]['COUNTRY']] = color_Western
plt.axis('off')
my_dpi = 72
figsize = (1000 / my_dpi, 1000 / my_dpi)
out_file = os.path.join(output, '.'.join([prefix, sample, 'pdf']))
plt.savefig(out_file, figsize=figsize, dpi=my_dpi)
plt.close()
@mod2_pcoa.command()
@click.option('--coords', required=True, type=click.Path(resolve_path=True,
readable=True, exists=True), help='Coordinates file')
@click.option('--mapping_file', required=True, type=click.Path(
resolve_path=True, readable=True, exists=True),
help='Mapping file')
@click.option('--color', required=True, type=str,
help='Metadata category to set color by')
@click.option('--output', required=True, type=click.Path(exists=True,
writable=True, resolve_path=True), help='Output directory')
@click.option('--prefix', required=True, type=str, help='Output file prefix')
@click.option('--samples', required=False, type=str,
help='Comma separated list of samples to print')
def gradient(coords, mapping_file, color, output, prefix, samples):
"""Generates as many figures as samples in the coordinates file"""
o = read(coords, into=OrdinationResults)
# coordinates
c_df = pd.DataFrame(o.site, o.site_ids)
# mapping file
mf = pd.read_csv(mapping_file, '\t', converters=defaultdict(str),
index_col='#SampleID')
mf = mf.loc[o.site_ids]
mf[color] = mf[color].convert_objects(convert_numeric=True)
if samples is None:
samples = mf.index
else:
samples = set(samples.split(',')).intersection(set(o.site_ids))
samples = mf.loc[samples].index
numeric = mf[~pd.isnull(mf[color])]
non_numeric = mf[pd.isnull(mf[color])]
color_array = plt.cm.RdBu(numeric[color]/max(numeric[color]))
for sample in samples:
# plot numeric metadata as colored gradient
ids = numeric.index
x, y = c_df.loc[ids][0], c_df.loc[ids][1]
plt.scatter(x, y, c=numeric[color], cmap=plt.get_cmap('RdBu'),
alpha=ALPHA, lw=LINE_WIDTH, edgecolor=color_array*0.6)
# plt.colorbar()
# plot non-numeric metadata as gray
ids = non_numeric.index
x, y = c_df.loc[ids][0], c_df.loc[ids][1]
plt.scatter(x, y, c='0.5', alpha=ALPHA, lw=LINE_WIDTH, edgecolor='0.3')
# plot individual's dot
try:
color_index = numeric.index.tolist().index(sample)
except ValueError:
color_index = None
if color_index is None:
_color = (0.5, 0.5, 0.5)
else:
_color = color_array[color_index]
plt.scatter(c_df.loc[sample][0], c_df.loc[sample][1],
color=_color, s=270, edgecolor='w', lw=LINE_WIDTH_WHITE)
plt.scatter(c_df.loc[sample][0], c_df.loc[sample][1],
color=_color, s=250, edgecolor=np.asarray(_color)*0.6,
lw=LINE_WIDTH_BLACK)
plt.axis('off')
my_dpi = 72
figsize = (1000 / my_dpi, 1000 / my_dpi)
out_file = os.path.join(output, '.'.join([prefix, sample, 'pdf']))
plt.savefig(out_file, figsize=figsize, dpi=my_dpi)
plt.close()
if __name__ == '__main__':
mod2_pcoa()
| bsd-3-clause |
KarlTDebiec/Ramaplot | PDistDataset.py | 1 | 20943 | # -*- coding: utf-8 -*-
# ramaplot.PDistDataset.py
#
# Copyright (C) 2015 Karl T Debiec
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Manages probability distribution datasets.
"""
################################### MODULES ###################################
from __future__ import absolute_import,division,print_function,unicode_literals
from .myplotspec.Dataset import Dataset
################################### CLASSES ###################################
class PDistDataset(Dataset):
"""
Manages probability distribution datasets.
Generates probability distribution from series of Φ/Ψ values,
representing either the probability of Φ/Ψ, or the expectation value
of a selected measurement (e.g. energy) at that Φ/Ψ.
Input data should be providied in a whitespace-delimited text file
including columns for Φ, Ψ, and any additional data, such as this
output from `cpptraj`'s `multidihedral` command::
#Frame phi:2 psi:2 chip:2 ...
1 -62.1431 144.6768 72.2964 ...
2 -63.2487 151.6551 71.9101 ...
... ... ... ...
"""
@classmethod
def get_cache_key(cls, infile, phikey="phi", psikey="psi",
zkey="free energy", mode="hist", bins=72, bandwidth=5, wrap=True,
mask_cutoff=None,
calc_populations=False, plot_populations=False,
*args, **kwargs):
"""
Generates tuple of arguments to be used as key for dataset
cache.
Arguments documented under :func:`__init__`.
"""
from os.path import expandvars
if zkey in ["free energy", "probability"]:
x_bins, y_bins = cls.process_bins_arg(bins, dim=2)
bins = (tuple(x_bins), tuple(y_bins))
else:
x_bins, y_bins, z_bins = cls.process_bins_arg(bins, dim=3)
bins = (tuple(x_bins), tuple(y_bins), tuple(z_bins))
if mode == "hist":
return (cls, expandvars(infile), phikey, psikey, zkey, mode, bins,
wrap, mask_cutoff, calc_populations, plot_populations)
elif mode == "kde":
return (cls, expandvars(infile), phikey, psikey, zkey, mode, bins,
bandwidth, wrap, mask_cutoff, calc_populations,
plot_populations)
@staticmethod
def process_bins_arg(bins, dim=2):
"""
Processes bin argument.
Arguments:
bins (int, list, ndarray): Bins to use for histogram or grid
to use for kernel density estimate; if int, number of bins
or gride points between -180° and 180° in Φ and Ψ, if list
or ndarray, bins or grid directly
Returns:
out_bins (tuple): Processed bins
"""
import numpy as np
if dim == 2:
if isinstance(bins, int):
x_bins = y_bins = np.linspace(-180, 180, bins + 1)
elif isinstance(bins, list):
if len(bins) == 2:
if isinstance(bins[0], int):
x_bins = np.linspace(-180, 180, bins[0] + 1)
elif isinstance(bins[0], list):
x_bins = np.array(bins[0])
if isinstance(bins[1], int):
y_bins = np.linspace(-180, 180, bins[1] + 1)
elif isinstance(bins[1], list):
y_bins = np.array(bins[1])
else:
x_bins = y_bins = np.array(bins)
elif isinstance(bins, np.ndarray):
x_bins = y_bins = bins
return x_bins, y_bins
elif dim == 3:
if isinstance(bins, int):
x_bins = y_bins = z_bins = np.linspace(-180, 180, bins + 1)
elif isinstance(bins, list):
if len(bins) == 2:
if isinstance(bins[0], int):
x_bins = y_bins = np.linspace(-180, 180, bins[0] + 1)
elif (isinstance(bins[0], list)
or isinstance(bins[0], np.ndarray)):
x_bins = y_bins = np.array(bins[0])
if isinstance(bins[1], int):
z_bins = np.linspace(-180, 180, bins[1] + 1)
elif (isinstance(bins[1], list)
or isinstance(bins[1], np.ndarray)):
z_bins = np.array(bins[1])
elif len(bins) == 3:
if isinstance(bins[0], int):
x_bins = np.linspace(-180, 180, bins[0] + 1)
elif (isinstance(bins[0], list)
or isinstance(bins[0], np.ndarray)):
x_bins = np.array(bins[0])
if isinstance(bins[1], int):
y_bins = np.linspace(-180, 180, bins[1] + 1)
elif (isinstance(bins[1], list)
or isinstance(bins[1], np.ndarray)):
y_bins = np.array(bins[1])
if isinstance(bins[2], int):
z_bins = np.linspace(-180, 180, bins[2] + 1)
elif (isinstance(bins[2], list)
or isinstance(bins[2], np.ndarray)):
z_bins = np.array(bins[2])
else:
x_bins = y_bins = z_bins = np.array(bins)
elif isinstance(bins, np.ndarray):
x_bins = y_bins = z_bins = bins
return x_bins, y_bins, z_bins
else:
raise TypeError()
def __init__(self, phikey="phi", psikey="psi", zkey="free energy",
mode="hist", bins=72, bandwidth=5, wrap=True, mask_cutoff=None,
calc_populations=False, plot_populations=False,
verbose=1, debug=0, **kwargs):
"""
Arguments:
infile (str): Path to text input file, may contain environment
variables
phikey (str): Key from which to load Φ
psikey (str): Key from which to load Ψ
zkey (str): Key from which to load distribution; if 'free
energy' or 'probability', the 2D probability density of Φ
and Ψ will be calculated and the selected representation
returned; for other values a third dimension will be loaded
from the `zkey` column of `infile`, the 3D probability
density of Φ, Ψ, and `zkey` will be calculated, and the
expectation value of `zkey` as a function of Φ and Ψ will be
returned
mode (str): Method of calculating probability distribution;
may be either 'hist', to use a histogram, or 'kde', to use a
kernel density estimate
bins (int, list, ndarray): Bins to use for histogram or grid
to use for kernel density estimate; if int, number of bins
or gride points between -180° and 180° in Φ and Ψ, if list
or ndarray, bins or grid directly
bandwidth (float, optional): Bandwidth to use for kernel
density estimate
wrap (bool): Wrap x and y coordinates between 180° and 360° to
between -180° and 0°
wrap_z (bool): Wrap z coordinates between -180° and 0 to between 180°
and 360°; probably only useful for plotting ω
mask_cutoff (float): Cutoff beyond which distribution is
masked, if `zkey` is 'free energy', this is a the maximum
free energy above which the mask will be set, and if `zkey`
is 'probability', this is the minimum probability below
which the mask will be set
hist_kw: Keyword arguments passed to numpy.histogram2d or
numpy.histogramdd
kde_kw: Keyword arguments passed to
sklearn.neighbors.KernelDensity
verbose (int): Level of verbose output
debug (int): Level of debug output
kwargs (dict): Additional keyword arguments
.. todo:
- Fix and validate 3D KDE
- Auto-detect phikey and psikey
- Support periodicic kernel density estimate
- Support variable bandwidth kernel density estimate
"""
import numpy as np
import pandas as pd
from .myplotspec import multi_get_copy
# Manage arguments
if str(mode.lower()) not in ["hist", "kde", "none"]:
raise ValueError("Argument 'mode' does not support provided " +
"value '{0}', may be 'hist', 'kde', or 'none'".format(mode))
read_csv_kw = dict(delim_whitespace=True, index_col=0)
read_csv_kw.update(kwargs.pop("read_csv_kw", {}))
# Load data
dataframe = self.load_dataset(verbose=verbose, debug=debug,
read_csv_kw=read_csv_kw, **kwargs).dataframe
if wrap:
dataframe[phikey][dataframe[phikey] > 180] -= 360
dataframe[psikey][dataframe[psikey] > 180] -= 360
# Option 0: Store Φ, Ψ
if mode == "none":
# Store data in instance variable
self.x = dataframe[phikey]
self.y = dataframe[psikey]
# Option 1: Calculate probability and free energy of Φ, Ψ
elif zkey in ["free energy", "probability"]:
x_bins, y_bins = self.process_bins_arg(bins, dim=2)
x_centers = (x_bins[:-1] + x_bins[1:]) / 2
y_centers = (y_bins[:-1] + y_bins[1:]) / 2
x_width = np.mean(x_centers[1:] - x_centers[:-1])
y_width = np.mean(y_centers[1:] - y_centers[:-1])
# Option 1a: Use a histogram (fast but noisy)
if mode == "hist":
if verbose >= 1:
print("calculating probability distribution of " +
"'{0}' and '{1}' using a ".format(phikey, psikey) +
"histogram")
hist_kw = dict(normed=False)
hist_kw.update(kwargs.get("hist_kw", {}))
hist_kw["bins"] = hist_kw.get("bins", [x_bins, y_bins])
probability, _, _ = np.histogram2d(
dataframe[phikey], dataframe[psikey], **hist_kw)
# Option 1b: Use a kernel density estimate (smooth but slow)
elif mode == "kde":
if verbose >= 1:
print("calculating probability distribution of " +
"'{0}' and '{1}' using a ".format(phikey, psikey) +
"kernel density estimate")
from sklearn.neighbors import KernelDensity
kde_kw = multi_get_copy("kde_kw", kwargs, {})
kde_kw["bandwidth"] = kde_kw.get("bandwidth", bandwidth)
xg, yg = np.meshgrid(x_centers, y_centers)
xyg = np.vstack([yg.ravel(), xg.ravel()]).T
samples = np.column_stack((dataframe[phikey],
dataframe[psikey]))
kde = KernelDensity(**kde_kw)
kde.fit(samples)
probability_series = np.exp(kde.score_samples(xyg))
probability = np.zeros((x_centers.size, y_centers.size))
for phi, psi, p in np.column_stack((xyg, probability_series)):
x_index = np.where(x_centers == phi)[0][0]
y_index = np.where(y_centers == psi)[0][0]
probability[x_index, y_index] = p
# Normalize and calculate free energy
probability /= np.nansum(probability)
free_energy = -1 * np.log(probability)
free_energy[np.isinf(free_energy)] = np.nan
free_energy -= np.nanmin(free_energy)
# Store data in instance variable
self.x_centers = x_centers
self.y_centers = y_centers
self.x_width = x_width
self.y_width = y_width
self.x_bins = x_bins
self.y_bins = y_bins
self.x = dataframe[phikey]
self.y = dataframe[psikey]
# Option 2: Calculate mean value of a third observable as a
# function of Φ, Ψ
else:
x_bins, y_bins, z_bins = self.process_bins_arg(bins, dim=3)
x_centers = (x_bins[:-1] + x_bins[1:]) / 2
y_centers = (y_bins[:-1] + y_bins[1:]) / 2
z_centers = (z_bins[:-1] + z_bins[1:]) / 2
x_width = np.mean(x_centers[1:] - x_centers[:-1])
y_width = np.mean(y_centers[1:] - y_centers[:-1])
if kwargs.get("wrap_z"):
dataframe[zkey][dataframe[zkey] < 0] += 360
# Option 2a: Use a histogram (fast but noisy)
if mode == "hist":
if verbose >= 1:
print("calculating mean value of '{0}'".format(zkey) +
"as a function of '{0}' and ".format(phikey) +
"'{0}' using a histogram".format(psikey))
hist_kw = dict(normed=True)
hist_kw.update(kwargs.get("hist_kw", {}))
hist_kw["bins"] = hist_kw.get("bins", [x_bins, y_bins, z_bins])
prob_xyz, _ = np.histogramdd(np.column_stack(
(dataframe[phikey], dataframe[psikey], dataframe[zkey])),
**hist_kw)
probability = np.sum(prob_xyz, axis=2)
prob_z_given_xy = prob_xyz / probability[:,:,np.newaxis]
weighted_z = prob_z_given_xy*z_centers[np.newaxis,np.newaxis,:]
mean_z = np.sum(weighted_z, axis=2)
# Option 2b: Use a kernel density estimate (smooth but slow)
elif mode == "kde":
raise()
from copy import copy
from sklearn.neighbors import KernelDensity
kde_kw = multi_get_copy("kde_kw", kwargs, {})
kde_kw["bandwidth"] = kde_kw.get("bandwidth", bandwidth)
# Only a single bandwidth is supported; scale z
# dimension to span range of 120-240
# scale_range = 340
z = copy(dataframe[zkey])
# z -= z.min() # shift bottom to 0
# z_range = z.max() # save max
# z *= (scale_range / z_range)
# z += (360 - scale_range) / 2 # Give buffer on top and bottom
xg, yg, zg = np.meshgrid(x_centers, y_centers, z_centers)
xyzg = np.vstack([xg.ravel(), yg.ravel(), zg.ravel()]).T
samples = np.column_stack((dataframe[phikey],
dataframe[psikey], z))
kde = KernelDensity(**kde_kw)
kde.fit(samples)
probability_series = np.exp(kde.score_samples(xyzg))
prob_xyz = np.zeros((x_centers.size, y_centers.size,
z_centers.size), np.float) * np.nan
for phi,psi,z,p in np.column_stack((xyzg, probability_series)):
x_index = np.where(x_centers == phi)[0][0]
y_index = np.where(y_centers == psi)[0][0]
z_index = np.where(z_centers == z)[0][0]
prob_xyz[x_index, y_index, z_index] = p
prob_xyz /= np.sum(prob_xyz)
probability = np.sum(prob_xyz, axis=2)
prob_z_given_xy = prob_xyz / probability[:,:,np.newaxis]
weighted_z = prob_z_given_xy*z_centers[np.newaxis,np.newaxis,:]
mean_z = np.sum(weighted_z, axis=2)
# mean_z -= (360 - scale_range) / 2 # Shift back down
# mean_z *= (z_range / scale_range) # Back from degrees to E
# free_energy *= 627.503 # Convert to kcal/mol
# Normalize and calculate free energy
probability /= np.nansum(probability)
free_energy = -1 * np.log(probability)
free_energy[np.isinf(free_energy)] = np.nan
free_energy -= np.nanmin(free_energy)
# Store data in instance variable
self.x_centers = x_centers
self.y_centers = y_centers
self.x_width = x_width
self.y_width = y_width
self.x_bins = x_bins
self.y_bins = y_bins
self.x = dataframe[phikey]
self.y = dataframe[psikey]
# Prepare mask
if mode == "none":
pass
elif zkey == "probability":
self.dist = probability
if mask_cutoff is not None:
self.mask = np.ma.masked_where(np.logical_and(
probability >= mask_cutoff,
np.logical_not(np.isnan(probability))),
np.ones_like(probability))
else:
self.mask = np.ma.masked_where(
np.logical_not(np.isnan(free_energy)),
np.ones_like(free_energy))
elif zkey == "free energy":
self.dist = free_energy
if mask_cutoff is not None:
self.mask = np.ma.masked_where(np.logical_and(
free_energy <= mask_cutoff,
np.logical_not(np.isnan(free_energy))),
np.ones_like(free_energy))
else:
self.mask = np.ma.masked_where(
np.logical_not(np.isnan(free_energy)),
np.ones_like(free_energy))
else:
self.dist = mean_z
if mask_cutoff is not None:
self.mask = np.ma.masked_where(np.logical_and(
free_energy <= mask_cutoff,
np.logical_not(np.isnan(free_energy))),
np.ones_like(free_energy))
else:
self.mask = np.ma.masked_where(
np.logical_not(np.isnan(free_energy)),
np.ones_like(free_energy))
# Calculate state populations
if calc_populations:
states = kwargs.get("states", [
("β", -151, 151),
("PPII", -66, 140),
("ξ", -145, 55),
("γ'", -81, 65),
("α", -70, -25),
("$L_α$", 55, 45),
("γ", 73, -35),
("PPII'", 56, -124),
("plateau", -100, -130)])
state_radius = kwargs.get("state_radius", 45)
distances = np.zeros((len(states), len(x_centers), len(y_centers)))
xs = []
ys = []
for i, (state, x, y) in enumerate(states):
xs += [x]
ys += [y]
# There must be a better way to do this, but this works
for j, xc in enumerate(x_centers):
for k, yc in enumerate(y_centers):
dx = (xc - x)
if dx <= -180 or dx >= 180:
dx = 360 - dx
else:
dx = dx
dy = (yc - y)
if dy <= -180 or dy >= 180:
dy = 360 - dy
else:
dy = dy
distances[i,j,k] = np.sqrt(dx**2 + dy**2)
assignments = np.argmin(distances, axis=0)
assignments[np.min(distances, axis=0) >= state_radius] = \
len(states) + 1
index, state_populations = [], []
for i, (state, x, y) in enumerate(states):
index += [state]
state_populations += [(x, y,
np.nansum(probability[assignments==i]))]
state_populations = pd.DataFrame(state_populations, index=index,
columns=["Φ center", "Ψ center", "population"])
self.state_populations = state_populations
if verbose >= 1:
print(state_populations)
if plot_populations:
self.dist = assignments
self.mask = np.ma.masked_where(
np.logical_not(assignments == len(states) + 1),
np.ones_like(assignments))
self.x = np.array(xs)
self.y = np.array(ys)
label, label_kw = [], []
from .myplotspec import multi_get_copy
default_label_kw = multi_get_copy(["default_label_kw",
"label_kw"], kwargs, {})
for index, row in state_populations.iterrows():
label += ["{0}\n{1:2d}%".format(index,
int(row["population"]*100))]
label_kw += [default_label_kw.copy()]
label_kw[-1]["x"] = row["Φ center"]
label_kw[-1]["y"] = row["Ψ center"]
self.label = label
self.label_kw = label_kw
| bsd-3-clause |
bikong2/scikit-learn | sklearn/decomposition/tests/test_sparse_pca.py | 160 | 6028 | # Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_safe_multiprocessing_with_blas
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
| bsd-3-clause |
hhj0325/pystock | com/hhj/sogou/countByTime.py | 1 | 1082 | """
选取发布时间为2018年的文章,并对其进行月份统计
"""
import numpy as np
import pandas as pd
from pyecharts import Bar
df = pd.read_csv('sg_articles.csv', header=None, names=["title", "article", "name", "date"])
list1 = []
list2 = []
for j in df['date']:
# 获取文章发布年份及月份
time_1 = j.split('-')[0]
time_2 = j.split('-')[1]
list1.append(time_1)
list2.append(time_2)
df['year'] = list1
df['month'] = list2
# 选取发布时间为2018年的文章,并对其进行月份统计
df = df.loc[df['year'] == '2018']
month_message = df.groupby(['month'])
month_com = month_message['month'].agg(['count'])
month_com.reset_index(inplace=True)
month_com_last = month_com.sort_index()
attr = ["{}".format(str(i) + '月') for i in range(1, 12)]
v1 = np.array(month_com_last['count'])
v1 = ["{}".format(int(i)) for i in v1]
bar = Bar("微信文章发布时间分布", title_pos='center', title_top='18', width=800, height=400)
bar.add("", attr, v1, is_stack=True, is_label_show=True)
bar.render("微信文章发布时间分布.html") | apache-2.0 |
szarvas/anc-field | examples/ie224-impulse.py | 1 | 5152 | # -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal, stats
import sys
sys.path.append('..')
from anc_field_py.ancfield import *
from anc_field_py.ancutil import *
def add_microphones(ancObject):
# error_mic
ancObject.AddMic([4,1.6,5.3])
# reference_front
ancObject.AddMic([4,1.6,4.5])
# reference_back
ancObject.AddMic([4,1.6,6.5])
# reference_left
ancObject.AddMic([3,1.6,5.5])
# reference_right
ancObject.AddMic([5,1.6,5.5])
# reference_bottom
ancObject.AddMic([4,0.6,5.5])
# reference_top
ancObject.AddMic([4,2.6,5.5])
return ancObject
# =========================================================================
# SIMULATION 1
# Calculating noise to microphone paths
# =========================================================================
#
# Trying to run this simulation on CPU failed on an i7-3770, compiling the
# lrs_1.cl file fails. It maybe because the scene's size is too large for
# the CPU. Compiling it for the built in integrated GPU worked though.
#
# We create a simulation and immediately add microphones to it
anc = add_microphones(AncField('gpu', 'models/ie224'))
# noise_source
anc.AddSource([4,1.6,1.0], 5*impulse(2000, 6*32000, 32000))
anc.Visualize(1.6)
(x,y_noise) = anc.Run(4)
# Saving the impulse responses
np.savetxt('ie224-noise-to-error.dat', y_noise[0,:])
np.savetxt('ie224-noise-to-reference_front.dat', y_noise[1,:])
np.savetxt('ie224-noise-to-reference_back.dat', y_noise[2,:])
np.savetxt('ie224-noise-to-reference_left.dat', y_noise[3,:])
np.savetxt('ie224-noise-to-reference_right.dat', y_noise[4,:])
np.savetxt('ie224-noise-to-reference_bottom.dat', y_noise[5,:])
np.savetxt('ie224-noise-to-reference_top.dat', y_noise[6,:])
# =========================================================================
# SIMULATION 2
# Calculating actuator to microphone paths
# =========================================================================
#
# We create a simulation and immediately add microphones to it
anc = add_microphones(AncField('gpu', 'models/ie224'))
# actuator
anc.AddSource([4,1.6,5.5], 5*impulse(2000, 6*32000, 32000))
anc.Visualize(1.6)
(x,y_actuator) = anc.Run(4)
# Saving the impulse responses
np.savetxt('ie224-actuator-to-error.dat', y_actuator[0,:])
np.savetxt('ie224-actuator-to-reference_front.dat', y_actuator[1,:])
np.savetxt('ie224-actuator-to-reference_back.dat', y_actuator[2,:])
np.savetxt('ie224-actuator-to-reference_left.dat', y_actuator[3,:])
np.savetxt('ie224-actuator-to-reference_right.dat', y_actuator[4,:])
np.savetxt('ie224-actuator-to-reference_bottom.dat', y_actuator[5,:])
np.savetxt('ie224-actuator-to-reference_top.dat', y_actuator[6,:])
# =========================================================================
# GENERATING IMAGES FOR THE REPORT
# Calculating actuator to microphone paths
# =========================================================================
#
# Saving figures for the field simulation report
fig, ax = plt.subplots()
ax.plot(y_noise[0,:])
plt.title('ie224-noise-to-error')
fig.savefig('ie224-noise-to-error.png')
fig, ax = plt.subplots()
ax.plot(y_noise[1,:])
plt.title('ie224-noise-to-reference_front')
fig.savefig('ie224-noise-to-reference_front.png')
fig, ax = plt.subplots()
ax.plot(y_noise[2,:])
plt.title('ie224-noise-to-reference_back')
fig.savefig('ie224-noise-to-reference_back.png')
fig, ax = plt.subplots()
ax.plot(y_noise[3,:])
plt.title('ie224-noise-to-reference_left')
fig.savefig('ie224-noise-to-reference_left.png')
fig, ax = plt.subplots()
ax.plot(y_noise[4,:])
plt.title('ie224-noise-to-reference_right')
fig.savefig('ie224-noise-to-reference_right.png')
fig, ax = plt.subplots()
ax.plot(y_noise[5,:])
plt.title('ie224-noise-to-reference_bottom')
fig.savefig('ie224-noise-to-reference_bottom.png')
fig, ax = plt.subplots()
ax.plot(y_noise[6,:])
plt.title('ie224-noise-to-reference_top')
fig.savefig('ie224-noise-to-reference_top.png')
# Saving figures for the field simulation report
fig, ax = plt.subplots()
ax.plot(y_actuator[0,:])
plt.title('ie224-actuator-to-error')
fig.savefig('ie224-actuator-to-error.png')
fig, ax = plt.subplots()
ax.plot(y_actuator[1,:])
plt.title('ie224-actuator-to-reference_front')
fig.savefig('ie224-actuator-to-reference_front.png')
fig, ax = plt.subplots()
ax.plot(y_actuator[2,:])
plt.title('ie224-actuator-to-reference_back')
fig.savefig('ie224-actuator-to-reference_back.png')
fig, ax = plt.subplots()
ax.plot(y_actuator[3,:])
plt.title('ie224-actuator-to-reference_left')
fig.savefig('ie224-actuator-to-reference_left.png')
fig, ax = plt.subplots()
ax.plot(y_actuator[4,:])
plt.title('ie224-actuator-to-reference_right')
fig.savefig('ie224-actuator-to-reference_right.png')
fig, ax = plt.subplots()
ax.plot(y_actuator[5,:])
plt.title('ie224-actuator-to-reference_bottom')
fig.savefig('ie224-actuator-to-reference_bottom.png')
fig, ax = plt.subplots()
ax.plot(y_actuator[6,:])
plt.title('ie224-actuator-to-reference_top')
fig.savefig('ie224-actuator-to-reference_top.png')
| gpl-3.0 |
aerosara/thesis | notebooks_archive_10112014/pycse Examples.py | 1 | 2176 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=3>
# Example from pycse 1
# <codecell>
# copied from http://kitchingroup.cheme.cmu.edu/blog/tag/events/
from pycse import odelay
import matplotlib.pyplot as plt
import numpy as np
def ode(Y,x):
y1, y2 = Y
dy1dx = y2
dy2dx = -y1
return [dy1dx, dy2dx]
def event1(Y, x):
y1, y2 = Y
value = y2 - (-1.0)
isterminal = True
direction = 0
return value, isterminal, direction
def event2(Y, x):
dy1dx, dy2dx = ode(Y,x)
value = dy1dx - 0.0
isterminal = False
direction = -1 # derivative is decreasing towards a maximum
return value, isterminal, direction
Y0 = [2.0, 1.0]
xspan = np.linspace(0, 5)
X, Y, XE, YE, IE = odelay(ode, Y0, xspan, events=[event1, event2])
plt.plot(X, Y)
for ie,xe,ye in zip(IE, XE, YE):
if ie == 1: #this is the second event
y1,y2 = ye
plt.plot(xe, y1, 'ro')
plt.legend(['$y_1$', '$y_2$'], loc='best')
#plt.savefig('images/odelay-mult-eq.png')
plt.show()
# <headingcell level=3>
# Example from pycse 2
# <codecell>
# copied from: http://kitchingroup.cheme.cmu.edu/pycse/pycse.html#sec-10-1-8
# 10.1.8 Stopping the integration of an ODE at some condition
from pycse import *
import numpy as np
k = 0.23
Ca0 = 2.3
def dCadt(Ca, t):
return -k * Ca**2
def stop(Ca, t):
isterminal = True
direction = 0
value = 1.0 - Ca
return value, isterminal, direction
tspan = np.linspace(0.0, 10.0)
t, CA, TE, YE, IE = odelay(dCadt, Ca0, tspan, events=[stop])
print 'At t = {0:1.2f} seconds the concentration of A is {1:1.2f} mol/L.'.format(t[-1], float(CA[-1]))
# <headingcell level=3>
# fsolve example
# <codecell>
from math import cos
def func(x):
return x + 2*cos(x) # finds where this is zero
def func2(x):
out = [x[0]*cos(x[1]) - 4]
out.append(x[1]*x[0] - x[1] - 5)
return out # finds where both elements of this array are zero
from scipy.optimize import fsolve
x0 = fsolve(func, 0.3) # initial guess
print x0
print func(x0)
#-1.02986652932
x02 = fsolve(func2, [1, 1]) # initial guesses
print x02
print func2(x02)
#[ 6.50409711 0.90841421]
| mit |
victorpoughon/master-thesis | python/outlier_analysis.py | 1 | 1365 | #!/usr/bin/env python3
import os
import os.path
import sys
import numpy as np
import matplotlib.pyplot as plt
from features_common import match_angle, base_plot
def outlier_frequency_plot(path, angles, threshold):
f, ax = base_plot()
ax.plot(100 * np.cumsum(np.abs(angles) > threshold) / angles.size)
ax.set_xlabel("Match number")
ax.set_ylabel("Outlier fraction (%)")
ax.set_ylim([0, 100])
f.savefig(path, bbox_inches='tight')
plt.close(f)
if __name__ == "__main__":
if len(sys.argv) < 2:
path = "."
else:
path = sys.argv[1]
# Produce outlier plots for all directories containing outlier_threshold.txt
for root, subdirs, files in os.walk(path):
if "matches.txt" in files:
shape = np.loadtxt(os.path.join(root, "shape.txt"))
matches = np.loadtxt(os.path.join(root, "matches.txt"), comments="#")
threshold = np.loadtxt(os.path.join(root, "outlier_threshold.txt"))
if threshold.size == 1:
print("outlier_analysis.py: " + root)
# Compute matches angles
angles = match_angle(matches, shape)
outlier_frequency_plot(os.path.join(root, "plot_outliers.pdf"), angles, threshold)
else:
print("outlier_analysis.py: " + root + " --- empty outlier_threshold.txt")
| mit |
SamHames/scikit-image | skimage/viewer/canvastools/base.py | 1 | 5472 | import numpy as np
try:
from matplotlib import lines
except ImportError:
pass
__all__ = ['CanvasToolBase', 'ToolHandles']
def _pass(*args):
pass
class CanvasToolBase(object):
"""Base canvas tool for matplotlib axes.
Parameters
----------
ax : :class:`matplotlib.axes.Axes`
Matplotlib axes where tool is displayed.
on_move : function
Function called whenever a control handle is moved.
This function must accept the end points of line as the only argument.
on_release : function
Function called whenever the control handle is released.
on_enter : function
Function called whenever the "enter" key is pressed.
useblit : bool
If True, update canvas by blitting, which is much faster than normal
redrawing (turn off for debugging purposes).
"""
def __init__(self, ax, on_move=None, on_enter=None, on_release=None,
useblit=True):
self.ax = ax
self.canvas = ax.figure.canvas
self.img_background = None
self.cids = []
self._artists = []
self.active = True
if useblit:
self.connect_event('draw_event', self._blit_on_draw_event)
self.useblit = useblit
self.callback_on_move = _pass if on_move is None else on_move
self.callback_on_enter = _pass if on_enter is None else on_enter
self.callback_on_release = _pass if on_release is None else on_release
self.connect_event('key_press_event', self._on_key_press)
def connect_event(self, event, callback):
"""Connect callback with an event.
This should be used in lieu of `figure.canvas.mpl_connect` since this
function stores call back ids for later clean up.
"""
cid = self.canvas.mpl_connect(event, callback)
self.cids.append(cid)
def disconnect_events(self):
"""Disconnect all events created by this widget."""
for c in self.cids:
self.canvas.mpl_disconnect(c)
def ignore(self, event):
"""Return True if event should be ignored.
This method (or a version of it) should be called at the beginning
of any event callback.
"""
return not self.active
def set_visible(self, val):
for artist in self._artists:
artist.set_visible(val)
def _blit_on_draw_event(self, event=None):
self.img_background = self.canvas.copy_from_bbox(self.ax.bbox)
self._draw_artists()
def _draw_artists(self):
for artist in self._artists:
self.ax.draw_artist(artist)
def remove(self):
"""Remove artists and events from axes.
Note that the naming here mimics the interface of Matplotlib artists.
"""
#TODO: For some reason, RectangleTool doesn't get properly removed
self.disconnect_events()
for a in self._artists:
a.remove()
def redraw(self):
"""Redraw image and canvas artists.
This method should be called by subclasses when artists are updated.
"""
if self.useblit and self.img_background is not None:
self.canvas.restore_region(self.img_background)
self._draw_artists()
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
def _on_key_press(self, event):
if event.key == 'enter':
self.callback_on_enter(self.geometry)
self.set_visible(False)
self.redraw()
@property
def geometry(self):
"""Geometry information that gets passed to callback functions."""
return None
class ToolHandles(object):
"""Control handles for canvas tools.
Parameters
----------
ax : :class:`matplotlib.axes.Axes`
Matplotlib axes where tool handles are displayed.
x, y : 1D arrays
Coordinates of control handles.
marker : str
Shape of marker used to display handle. See `matplotlib.pyplot.plot`.
marker_props : dict
Additional marker properties. See :class:`matplotlib.lines.Line2D`.
"""
def __init__(self, ax, x, y, marker='o', marker_props=None):
self.ax = ax
props = dict(marker=marker, markersize=7, mfc='w', ls='none',
alpha=0.5, visible=False)
props.update(marker_props if marker_props is not None else {})
self._markers = lines.Line2D(x, y, animated=True, **props)
self.ax.add_line(self._markers)
self.artist = self._markers
@property
def x(self):
return self._markers.get_xdata()
@property
def y(self):
return self._markers.get_ydata()
def set_data(self, pts, y=None):
"""Set x and y positions of handles"""
if y is not None:
x = pts
pts = np.array([x, y])
self._markers.set_data(pts)
def set_visible(self, val):
self._markers.set_visible(val)
def set_animated(self, val):
self._markers.set_animated(val)
def draw(self):
self.ax.draw_artist(self._markers)
def closest(self, x, y):
"""Return index and pixel distance to closest index."""
pts = np.transpose((self.x, self.y))
# Transform data coordinates to pixel coordinates.
pts = self.ax.transData.transform(pts)
diff = pts - ((x, y))
dist = np.sqrt(np.sum(diff**2, axis=1))
return np.argmin(dist), np.min(dist)
| bsd-3-clause |
abele/bokeh | bokeh/charts/tests/test_data_adapter.py | 37 | 3285 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import DataAdapter
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestDataAdapter(unittest.TestCase):
def setUp(self):
self._values = OrderedDict()
self._values['first'] = [2., 5., 3.]
self._values['second'] = [4., 1., 4.]
self._values['third'] = [6., 4., 3.]
def test_list(self):
values = list(self._values.values())
da = DataAdapter(values)
self.assertEqual(da.values(), list(self._values.values()))
self.assertEqual(da.columns, ['0', '1', '2'])
self.assertEqual(da.keys(), ['0', '1', '2'])
self.assertEqual(da.index, ['a', 'b', 'c'])
def test_array(self):
values = np.array(list(self._values.values()))
da = DataAdapter(values)
assert_array_equal(da.values(), list(self._values.values()))
self.assertEqual(da.columns, ['0', '1', '2'])
self.assertEqual(da.keys(), ['0', '1', '2'])
self.assertEqual(da.index, ['a', 'b', 'c'])
def test_pandas(self):
values = pd.DataFrame(self._values)
da = DataAdapter(values)
# TODO: THIS SHOULD BE FIXED..
#self.assertEqual(da.values(), list(self._values.values()))
self.assertEqual(da.columns, ['first', 'second', 'third'])
self.assertEqual(da.keys(), ['first', 'second', 'third'])
# We expect data adapter index to be the same as the underlying pandas
# object and not the default created by DataAdapter
self.assertEqual(da.index, [0, 1, 2])
def test_ordered_dict(self):
da = DataAdapter(self._values)
self.assertEqual(da.values(), list(self._values.values()))
self.assertEqual(da.columns, ['first', 'second', 'third'])
self.assertEqual(da.keys(), ['first', 'second', 'third'])
self.assertEqual(da.index, ['a', 'b', 'c'])
def test_blaze_data_no_fields(self):
import blaze
valuesdf = pd.DataFrame(self._values)
values = blaze.Data(valuesdf)
da = DataAdapter(values)
assert_array_equal(da.values(), list(self._values.values()))
self.assertEqual(da.columns, ['first', 'second', 'third'])
self.assertEqual(da.keys(), ['first', 'second', 'third'])
self.assertEqual(da.index, [0, 1, 2])
xs, _values = DataAdapter.get_index_and_data(values, None)
assert_array_equal([0,1,2], xs)
| bsd-3-clause |
kaichogami/sympy | sympy/physics/quantum/state.py | 58 | 29186 | """Dirac notation for states."""
from __future__ import print_function, division
from sympy import (cacheit, conjugate, Expr, Function, integrate, oo, sqrt,
Tuple)
from sympy.core.compatibility import u, range
from sympy.printing.pretty.stringpict import stringPict
from sympy.physics.quantum.qexpr import QExpr, dispatch_method
__all__ = [
'KetBase',
'BraBase',
'StateBase',
'State',
'Ket',
'Bra',
'TimeDepState',
'TimeDepBra',
'TimeDepKet',
'Wavefunction'
]
#-----------------------------------------------------------------------------
# States, bras and kets.
#-----------------------------------------------------------------------------
# ASCII brackets
_lbracket = "<"
_rbracket = ">"
_straight_bracket = "|"
# Unicode brackets
# MATHEMATICAL ANGLE BRACKETS
_lbracket_ucode = u("\N{MATHEMATICAL LEFT ANGLE BRACKET}")
_rbracket_ucode = u("\N{MATHEMATICAL RIGHT ANGLE BRACKET}")
# LIGHT VERTICAL BAR
_straight_bracket_ucode = u("\N{LIGHT VERTICAL BAR}")
# Other options for unicode printing of <, > and | for Dirac notation.
# LEFT-POINTING ANGLE BRACKET
# _lbracket = u"\u2329"
# _rbracket = u"\u232A"
# LEFT ANGLE BRACKET
# _lbracket = u"\u3008"
# _rbracket = u"\u3009"
# VERTICAL LINE
# _straight_bracket = u"\u007C"
class StateBase(QExpr):
"""Abstract base class for general abstract states in quantum mechanics.
All other state classes defined will need to inherit from this class. It
carries the basic structure for all other states such as dual, _eval_adjoint
and label.
This is an abstract base class and you should not instantiate it directly,
instead use State.
"""
@classmethod
def _operators_to_state(self, ops, **options):
""" Returns the eigenstate instance for the passed operators.
This method should be overridden in subclasses. It will handle being
passed either an Operator instance or set of Operator instances. It
should return the corresponding state INSTANCE or simply raise a
NotImplementedError. See cartesian.py for an example.
"""
raise NotImplementedError("Cannot map operators to states in this class. Method not implemented!")
def _state_to_operators(self, op_classes, **options):
""" Returns the operators which this state instance is an eigenstate
of.
This method should be overridden in subclasses. It will be called on
state instances and be passed the operator classes that we wish to make
into instances. The state instance will then transform the classes
appropriately, or raise a NotImplementedError if it cannot return
operator instances. See cartesian.py for examples,
"""
raise NotImplementedError(
"Cannot map this state to operators. Method not implemented!")
@property
def operators(self):
"""Return the operator(s) that this state is an eigenstate of"""
from .operatorset import state_to_operators # import internally to avoid circular import errors
return state_to_operators(self)
def _enumerate_state(self, num_states, **options):
raise NotImplementedError("Cannot enumerate this state!")
def _represent_default_basis(self, **options):
return self._represent(basis=self.operators)
#-------------------------------------------------------------------------
# Dagger/dual
#-------------------------------------------------------------------------
@property
def dual(self):
"""Return the dual state of this one."""
return self.dual_class()._new_rawargs(self.hilbert_space, *self.args)
@classmethod
def dual_class(self):
"""Return the class used to construt the dual."""
raise NotImplementedError(
'dual_class must be implemented in a subclass'
)
def _eval_adjoint(self):
"""Compute the dagger of this state using the dual."""
return self.dual
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _pretty_brackets(self, height, use_unicode=True):
# Return pretty printed brackets for the state
# Ideally, this could be done by pform.parens but it does not support the angled < and >
# Setup for unicode vs ascii
if use_unicode:
lbracket, rbracket = self.lbracket_ucode, self.rbracket_ucode
slash, bslash, vert = u('\N{BOX DRAWINGS LIGHT DIAGONAL UPPER RIGHT TO LOWER LEFT}'), \
u('\N{BOX DRAWINGS LIGHT DIAGONAL UPPER LEFT TO LOWER RIGHT}'), \
u('\N{BOX DRAWINGS LIGHT VERTICAL}')
else:
lbracket, rbracket = self.lbracket, self.rbracket
slash, bslash, vert = '/', '\\', '|'
# If height is 1, just return brackets
if height == 1:
return stringPict(lbracket), stringPict(rbracket)
# Make height even
height += (height % 2)
brackets = []
for bracket in lbracket, rbracket:
# Create left bracket
if bracket in set([_lbracket, _lbracket_ucode]):
bracket_args = [ ' ' * (height//2 - i - 1) +
slash for i in range(height // 2)]
bracket_args.extend(
[ ' ' * i + bslash for i in range(height // 2)])
# Create right bracket
elif bracket in set([_rbracket, _rbracket_ucode]):
bracket_args = [ ' ' * i + bslash for i in range(height // 2)]
bracket_args.extend([ ' ' * (
height//2 - i - 1) + slash for i in range(height // 2)])
# Create straight bracket
elif bracket in set([_straight_bracket, _straight_bracket_ucode]):
bracket_args = [vert for i in range(height)]
else:
raise ValueError(bracket)
brackets.append(
stringPict('\n'.join(bracket_args), baseline=height//2))
return brackets
def _sympystr(self, printer, *args):
contents = self._print_contents(printer, *args)
return '%s%s%s' % (self.lbracket, contents, self.rbracket)
def _pretty(self, printer, *args):
from sympy.printing.pretty.stringpict import prettyForm
# Get brackets
pform = self._print_contents_pretty(printer, *args)
lbracket, rbracket = self._pretty_brackets(
pform.height(), printer._use_unicode)
# Put together state
pform = prettyForm(*pform.left(lbracket))
pform = prettyForm(*pform.right(rbracket))
return pform
def _latex(self, printer, *args):
contents = self._print_contents_latex(printer, *args)
# The extra {} brackets are needed to get matplotlib's latex
# rendered to render this properly.
return '{%s%s%s}' % (self.lbracket_latex, contents, self.rbracket_latex)
class KetBase(StateBase):
"""Base class for Kets.
This class defines the dual property and the brackets for printing. This is
an abstract base class and you should not instantiate it directly, instead
use Ket.
"""
lbracket = _straight_bracket
rbracket = _rbracket
lbracket_ucode = _straight_bracket_ucode
rbracket_ucode = _rbracket_ucode
lbracket_latex = r'\left|'
rbracket_latex = r'\right\rangle '
@classmethod
def default_args(self):
return ("psi",)
@classmethod
def dual_class(self):
return BraBase
def __mul__(self, other):
"""KetBase*other"""
from sympy.physics.quantum.operator import OuterProduct
if isinstance(other, BraBase):
return OuterProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*KetBase"""
from sympy.physics.quantum.innerproduct import InnerProduct
if isinstance(other, BraBase):
return InnerProduct(other, self)
else:
return Expr.__rmul__(self, other)
#-------------------------------------------------------------------------
# _eval_* methods
#-------------------------------------------------------------------------
def _eval_innerproduct(self, bra, **hints):
"""Evaluate the inner product betweeen this ket and a bra.
This is called to compute <bra|ket>, where the ket is ``self``.
This method will dispatch to sub-methods having the format::
``def _eval_innerproduct_BraClass(self, **hints):``
Subclasses should define these methods (one for each BraClass) to
teach the ket how to take inner products with bras.
"""
return dispatch_method(self, '_eval_innerproduct', bra, **hints)
def _apply_operator(self, op, **options):
"""Apply an Operator to this Ket.
This method will dispatch to methods having the format::
``def _apply_operator_OperatorName(op, **options):``
Subclasses should define these methods (one for each OperatorName) to
teach the Ket how operators act on it.
Parameters
==========
op : Operator
The Operator that is acting on the Ket.
options : dict
A dict of key/value pairs that control how the operator is applied
to the Ket.
"""
return dispatch_method(self, '_apply_operator', op, **options)
class BraBase(StateBase):
"""Base class for Bras.
This class defines the dual property and the brackets for printing. This
is an abstract base class and you should not instantiate it directly,
instead use Bra.
"""
lbracket = _lbracket
rbracket = _straight_bracket
lbracket_ucode = _lbracket_ucode
rbracket_ucode = _straight_bracket_ucode
lbracket_latex = r'\left\langle '
rbracket_latex = r'\right|'
@classmethod
def _operators_to_state(self, ops, **options):
state = self.dual_class().operators_to_state(ops, **options)
return state.dual
def _state_to_operators(self, op_classes, **options):
return self.dual._state_to_operators(op_classes, **options)
def _enumerate_state(self, num_states, **options):
dual_states = self.dual._enumerate_state(num_states, **options)
return [x.dual for x in dual_states]
@classmethod
def default_args(self):
return self.dual_class().default_args()
@classmethod
def dual_class(self):
return KetBase
def __mul__(self, other):
"""BraBase*other"""
from sympy.physics.quantum.innerproduct import InnerProduct
if isinstance(other, KetBase):
return InnerProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*BraBase"""
from sympy.physics.quantum.operator import OuterProduct
if isinstance(other, KetBase):
return OuterProduct(other, self)
else:
return Expr.__rmul__(self, other)
def _represent(self, **options):
"""A default represent that uses the Ket's version."""
from sympy.physics.quantum.dagger import Dagger
return Dagger(self.dual._represent(**options))
class State(StateBase):
"""General abstract quantum state used as a base class for Ket and Bra."""
pass
class Ket(State, KetBase):
"""A general time-independent Ket in quantum mechanics.
Inherits from State and KetBase. This class should be used as the base
class for all physical, time-independent Kets in a system. This class
and its subclasses will be the main classes that users will use for
expressing Kets in Dirac notation [1]_.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Ket and looking at its properties::
>>> from sympy.physics.quantum import Ket, Bra
>>> from sympy import symbols, I
>>> k = Ket('psi')
>>> k
|psi>
>>> k.hilbert_space
H
>>> k.is_commutative
False
>>> k.label
(psi,)
Ket's know about their associated bra::
>>> k.dual
<psi|
>>> k.dual_class()
<class 'sympy.physics.quantum.state.Bra'>
Take a linear combination of two kets::
>>> k0 = Ket(0)
>>> k1 = Ket(1)
>>> 2*I*k0 - 4*k1
2*I*|0> - 4*|1>
Compound labels are passed as tuples::
>>> n, m = symbols('n,m')
>>> k = Ket(n,m)
>>> k
|nm>
References
==========
.. [1] http://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Bra
class Bra(State, BraBase):
"""A general time-independent Bra in quantum mechanics.
Inherits from State and BraBase. A Bra is the dual of a Ket [1]_. This
class and its subclasses will be the main classes that users will use for
expressing Bras in Dirac notation.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Bra and look at its properties::
>>> from sympy.physics.quantum import Ket, Bra
>>> from sympy import symbols, I
>>> b = Bra('psi')
>>> b
<psi|
>>> b.hilbert_space
H
>>> b.is_commutative
False
Bra's know about their dual Ket's::
>>> b.dual
|psi>
>>> b.dual_class()
<class 'sympy.physics.quantum.state.Ket'>
Like Kets, Bras can have compound labels and be manipulated in a similar
manner::
>>> n, m = symbols('n,m')
>>> b = Bra(n,m) - I*Bra(m,n)
>>> b
-I*<mn| + <nm|
Symbols in a Bra can be substituted using ``.subs``::
>>> b.subs(n,m)
<mm| - I*<mm|
References
==========
.. [1] http://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Ket
#-----------------------------------------------------------------------------
# Time dependent states, bras and kets.
#-----------------------------------------------------------------------------
class TimeDepState(StateBase):
"""Base class for a general time-dependent quantum state.
This class is used as a base class for any time-dependent state. The main
difference between this class and the time-independent state is that this
class takes a second argument that is the time in addition to the usual
label argument.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
"""
#-------------------------------------------------------------------------
# Initialization
#-------------------------------------------------------------------------
@classmethod
def default_args(self):
return ("psi", "t")
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def label(self):
"""The label of the state."""
return self.args[:-1]
@property
def time(self):
"""The time of the state."""
return self.args[-1]
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _print_time(self, printer, *args):
return printer._print(self.time, *args)
_print_time_repr = _print_time
_print_time_latex = _print_time
def _print_time_pretty(self, printer, *args):
pform = printer._print(self.time, *args)
return pform
def _print_contents(self, printer, *args):
label = self._print_label(printer, *args)
time = self._print_time(printer, *args)
return '%s;%s' % (label, time)
def _print_label_repr(self, printer, *args):
label = self._print_sequence(self.label, ',', printer, *args)
time = self._print_time_repr(printer, *args)
return '%s,%s' % (label, time)
def _print_contents_pretty(self, printer, *args):
label = self._print_label_pretty(printer, *args)
time = self._print_time_pretty(printer, *args)
return printer._print_seq((label, time), delimiter=';')
def _print_contents_latex(self, printer, *args):
label = self._print_sequence(
self.label, self._label_separator, printer, *args)
time = self._print_time_latex(printer, *args)
return '%s;%s' % (label, time)
class TimeDepKet(TimeDepState, KetBase):
"""General time-dependent Ket in quantum mechanics.
This inherits from ``TimeDepState`` and ``KetBase`` and is the main class
that should be used for Kets that vary with time. Its dual is a
``TimeDepBra``.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
Create a TimeDepKet and look at its attributes::
>>> from sympy.physics.quantum import TimeDepKet
>>> k = TimeDepKet('psi', 't')
>>> k
|psi;t>
>>> k.time
t
>>> k.label
(psi,)
>>> k.hilbert_space
H
TimeDepKets know about their dual bra::
>>> k.dual
<psi;t|
>>> k.dual_class()
<class 'sympy.physics.quantum.state.TimeDepBra'>
"""
@classmethod
def dual_class(self):
return TimeDepBra
class TimeDepBra(TimeDepState, BraBase):
"""General time-dependent Bra in quantum mechanics.
This inherits from TimeDepState and BraBase and is the main class that
should be used for Bras that vary with time. Its dual is a TimeDepBra.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
>>> from sympy.physics.quantum import TimeDepBra
>>> from sympy import symbols, I
>>> b = TimeDepBra('psi', 't')
>>> b
<psi;t|
>>> b.time
t
>>> b.label
(psi,)
>>> b.hilbert_space
H
>>> b.dual
|psi;t>
"""
@classmethod
def dual_class(self):
return TimeDepKet
class Wavefunction(Function):
"""Class for representations in continuous bases
This class takes an expression and coordinates in its constructor. It can
be used to easily calculate normalizations and probabilities.
Parameters
==========
expr : Expr
The expression representing the functional form of the w.f.
coords : Symbol or tuple
The coordinates to be integrated over, and their bounds
Examples
========
Particle in a box, specifying bounds in the more primitive way of using
Piecewise:
>>> from sympy import Symbol, Piecewise, pi, N
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x = Symbol('x', real=True)
>>> n = 1
>>> L = 1
>>> g = Piecewise((0, x < 0), (0, x > L), (sqrt(2//L)*sin(n*pi*x/L), True))
>>> f = Wavefunction(g, x)
>>> f.norm
1
>>> f.is_normalized
True
>>> p = f.prob()
>>> p(0)
0
>>> p(L)
0
>>> p(0.5)
2
>>> p(0.85*L)
2*sin(0.85*pi)**2
>>> N(p(0.85*L))
0.412214747707527
Additionally, you can specify the bounds of the function and the indices in
a more compact way:
>>> from sympy import symbols, pi, diff
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> f(L+1)
0
>>> f(L-1)
sqrt(2)*sin(pi*n*(L - 1)/L)/sqrt(L)
>>> f(-1)
0
>>> f(0.85)
sqrt(2)*sin(0.85*pi*n/L)/sqrt(L)
>>> f(0.85, n=1, L=1)
sqrt(2)*sin(0.85*pi)
>>> f.is_commutative
False
All arguments are automatically sympified, so you can define the variables
as strings rather than symbols:
>>> expr = x**2
>>> f = Wavefunction(expr, 'x')
>>> type(f.variables[0])
<class 'sympy.core.symbol.Symbol'>
Derivatives of Wavefunctions will return Wavefunctions:
>>> diff(f, x)
Wavefunction(2*x, x)
"""
#Any passed tuples for coordinates and their bounds need to be
#converted to Tuples before Function's constructor is called, to
#avoid errors from calling is_Float in the constructor
def __new__(cls, *args, **options):
new_args = [None for i in args]
ct = 0
for arg in args:
if isinstance(arg, tuple):
new_args[ct] = Tuple(*arg)
else:
new_args[ct] = arg
ct += 1
return super(Function, cls).__new__(cls, *new_args, **options)
def __call__(self, *args, **options):
var = self.variables
if len(args) != len(var):
raise NotImplementedError(
"Incorrect number of arguments to function!")
ct = 0
#If the passed value is outside the specified bounds, return 0
for v in var:
lower, upper = self.limits[v]
#Do the comparison to limits only if the passed symbol is actually
#a symbol present in the limits;
#Had problems with a comparison of x > L
if isinstance(args[ct], Expr) and \
not (lower in args[ct].free_symbols
or upper in args[ct].free_symbols):
continue
if (args[ct] < lower) == True or (args[ct] > upper) == True:
return 0
ct += 1
expr = self.expr
#Allows user to make a call like f(2, 4, m=1, n=1)
for symbol in list(expr.free_symbols):
if str(symbol) in options.keys():
val = options[str(symbol)]
expr = expr.subs(symbol, val)
return expr.subs(zip(var, args))
def _eval_derivative(self, symbol):
expr = self.expr
deriv = expr._eval_derivative(symbol)
return Wavefunction(deriv, *self.args[1:])
def _eval_conjugate(self):
return Wavefunction(conjugate(self.expr), *self.args[1:])
def _eval_transpose(self):
return self
@property
def free_symbols(self):
return self.expr.free_symbols
@property
def is_commutative(self):
"""
Override Function's is_commutative so that order is preserved in
represented expressions
"""
return False
@classmethod
def eval(self, *args):
return None
@property
def variables(self):
"""
Return the coordinates which the wavefunction depends on
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x,y = symbols('x,y')
>>> f = Wavefunction(x*y, x, y)
>>> f.variables
(x, y)
>>> g = Wavefunction(x*y, x)
>>> g.variables
(x,)
"""
var = [g[0] if isinstance(g, Tuple) else g for g in self._args[1:]]
return tuple(var)
@property
def limits(self):
"""
Return the limits of the coordinates which the w.f. depends on If no
limits are specified, defaults to ``(-oo, oo)``.
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, (x, 0, 1))
>>> f.limits
{x: (0, 1)}
>>> f = Wavefunction(x**2, x)
>>> f.limits
{x: (-oo, oo)}
>>> f = Wavefunction(x**2 + y**2, x, (y, -1, 2))
>>> f.limits
{x: (-oo, oo), y: (-1, 2)}
"""
limits = [(g[1], g[2]) if isinstance(g, Tuple) else (-oo, oo)
for g in self._args[1:]]
return dict(zip(self.variables, tuple(limits)))
@property
def expr(self):
"""
Return the expression which is the functional form of the Wavefunction
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, x)
>>> f.expr
x**2
"""
return self._args[0]
@property
def is_normalized(self):
"""
Returns true if the Wavefunction is properly normalized
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.is_normalized
True
"""
return (self.norm == 1.0)
@property
@cacheit
def norm(self):
"""
Return the normalization of the specified functional form.
This function integrates over the coordinates of the Wavefunction, with
the bounds specified.
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
sqrt(2)*sqrt(L)/2
"""
exp = self.expr*conjugate(self.expr)
var = self.variables
limits = self.limits
for v in var:
curr_limits = limits[v]
exp = integrate(exp, (v, curr_limits[0], curr_limits[1]))
return sqrt(exp)
def normalize(self):
"""
Return a normalized version of the Wavefunction
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x = symbols('x', real=True)
>>> L = symbols('L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.normalize()
Wavefunction(sqrt(2)*sin(pi*n*x/L)/sqrt(L), (x, 0, L))
"""
const = self.norm
if const == oo:
raise NotImplementedError("The function is not normalizable!")
else:
return Wavefunction((const)**(-1)*self.expr, *self.args[1:])
def prob(self):
"""
Return the absolute magnitude of the w.f., `|\psi(x)|^2`
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', real=True)
>>> n = symbols('n', integer=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.prob()
Wavefunction(sin(pi*n*x/L)**2, x)
"""
return Wavefunction(self.expr*conjugate(self.expr), *self.variables)
| bsd-3-clause |
acimmarusti/isl_exercises | chap4/chap4ex13.py | 1 | 5743 | from __future__ import print_function, division
import matplotlib.pyplot as plt
import numpy as np
import scipy
import pandas as pd
import seaborn as sns
from sklearn.datasets import load_boston
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score
from pandas.tools.plotting import scatter_matrix
import statsmodels.formula.api as smf
import statsmodels.api as sm
#Load boston dataset from sklearn#
boston = load_boston()
#Columns#
#print(boston['feature_names'])
#Descriptio#
#print(boston['DESCR'])
rawdata = pd.DataFrame(boston.data, columns=boston.feature_names)
rawdata['MEDV'] = boston.target
#Convert to NaN#
data = rawdata.replace(to_replace='None', value=np.nan).copy()
#Create the binary variable CRIM01#
data['CRIM01'] = np.where(data['CRIM'] > data['CRIM'].median(), 1, 0)
#Columns#
numcols = list(data.columns)
numcols.remove('CRIM')
#Predictor without target columns#
xcols = list(numcols)
xcols.remove('CRIM01')
#Summary (mean, stdev, range, etc)#
print('\nFull data summary')
print(data.describe())
#Correlations#
print('\nData correlations')
dcorrs = data.corr()
print(dcorrs)
#Pair plot matrix#
#sns.set()
#sns.pairplot(data[numcols], hue='CRIM01')
print('\n\n### LOGISTIC REGRESSION###')
## Logistic regression with statsmodels ##
plr_form = 'CRIM01~' + '+'.join(xcols)
prelogreg = smf.glm(formula=plr_form, data=data, family=sm.families.Binomial()).fit()
#Remove predictors with high P-values from LogReg#
logit_pvals = prelogreg.pvalues
pred_keep = list(logit_pvals[logit_pvals < 0.05].index)
pred_keep.remove('Intercept')
print('\nAfter first LogReg iteration, keeping only: ', pred_keep)
# New LogReg with only low p-value predictors#
lr_form = 'CRIM01~' + '+'.join(pred_keep)
logreg = smf.glm(formula=lr_form, data=data, family=sm.families.Binomial()).fit()
print('\nLogistic regression fit summary')
print(logreg.summary())
#Splitting the data for train/test#
X_data = data[pred_keep]
Y_data = data['CRIM01']
X_train, X_test, Y_train, Y_test = train_test_split(X_data, Y_data, test_size=0.5, random_state=42, stratify=Y_data)
# Initiate logistic regression object
logit_clf = LogisticRegression()
# Fit model. Let X_train = matrix of predictors, Y_train = matrix of variables.
resLogit_clf = logit_clf.fit(X_train, Y_train)
#Predicted values for training set
Y_pred_logit = resLogit_clf.predict(X_test)
#Confusion matrix#
print("\nConfusion matrix logit:")
print(confusion_matrix(Y_test, Y_pred_logit))
#Accuracy, precision and recall#
print('\nAccuracy logit:', np.round(accuracy_score(Y_test, Y_pred_logit), 3))
print("Precision logit:", np.round(precision_score(Y_test, Y_pred_logit, pos_label=1), 3))
print("Recall logit:", np.round(recall_score(Y_test, Y_pred_logit, pos_label=1), 3))
print('\n\n### LINEAR DISCRIMINANT ANALYSIS ###')
# Initiate logistic regression object
lda_clf = LinearDiscriminantAnalysis()
# Fit model. Let X_train = matrix of new_pred, Y_train = matrix of variables.
reslda_clf = lda_clf.fit(X_train, Y_train)
#Predicted values for training set
Y_pred_lda = reslda_clf.predict(X_test)
#Prior probabilities#
print("\nPrior probabilities")
print(reslda_clf.classes_)
print(reslda_clf.priors_)
#Group means#
print("\nGroup means")
#print(reslda_clf.classes_)
print(reslda_clf.means_)
#Coefficients#
print("\nCoefficients")
#print(reslda_clf.classes_)
print(reslda_clf.intercept_)
print(reslda_clf.coef_)
#Confusion matrix#
print("\nConfusion matrix LDA:")
print(confusion_matrix(Y_test, Y_pred_lda))
#Accuracy, precision and recall#
print("\nAccuracy LDA:", np.round(accuracy_score(Y_test, Y_pred_lda), 3))
print("Precision LDA:", np.round(precision_score(Y_test, Y_pred_lda, pos_label=1), 3))
print("Recall LDA:", np.round(recall_score(Y_test, Y_pred_lda, pos_label=1), 3))
print('\n\n### QUADRATIC DISCRIMINANT ANALYSIS ###')
# Initiate QDA object
qda_clf = QuadraticDiscriminantAnalysis()
# Fit model. Let X_train = matrix of new_pred, Y_train = matrix of variables.
resqda_clf = qda_clf.fit(X_train, Y_train)
#Predicted values for training set
Y_pred_qda = resqda_clf.predict(X_test)
#Prior probabilities#
print("\nPrior probabilities")
print(resqda_clf.classes_)
print(resqda_clf.priors_)
#Group means#
print("\nGroup means")
#print(resqda_clf.classes_)
print(resqda_clf.means_)
#Confusion matrix#
print("\nConfusion matrix QDA:")
print(confusion_matrix(Y_test, Y_pred_qda))
#Accuracy, precision and recall#
print("\nAccuracy QDA:", np.round(accuracy_score(Y_test, Y_pred_qda), 3))
print("Precision QDA:", np.round(precision_score(Y_test, Y_pred_qda, pos_label=1), 3))
print("Recall QDA:", np.round(recall_score(Y_test, Y_pred_qda, pos_label=1), 3))
print('\n\n### K NEAREST NEIGHBORS ###')
#K value#
kval = 15
print('\nUsing k = ' + str(kval))
# Initiate KNN object
knn_clf = KNeighborsClassifier(n_neighbors=15)
# Fit model. Let X_train = matrix of new_pred, Y_train = matrix of variables.
resknn_clf = knn_clf.fit(X_train, Y_train)
#Predicted values for training set
Y_pred_knn = resknn_clf.predict(X_test)
#Confusion matrix#
print("\nConfusion matrix KNN:")
print(confusion_matrix(Y_test, Y_pred_knn))
#Accuracy, precision and recall#
print("\nAccuracy KNN:", np.round(accuracy_score(Y_test, Y_pred_knn), 3))
print("Precision KNN:", np.round(precision_score(Y_test, Y_pred_knn, pos_label=1), 3))
print("Recall KNN:", np.round(recall_score(Y_test, Y_pred_knn, pos_label=1), 3))
#plt.show()
| gpl-3.0 |
hlin117/statsmodels | statsmodels/examples/ex_kernel_regression_dgp.py | 34 | 1202 | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 06 09:50:54 2013
Author: Josef Perktold
"""
from __future__ import print_function
if __name__ == '__main__':
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.nonparametric.api import KernelReg
import statsmodels.sandbox.nonparametric.dgp_examples as dgp
seed = np.random.randint(999999)
seed = 430973
print(seed)
np.random.seed(seed)
funcs = [dgp.UnivariateFanGijbels1(),
dgp.UnivariateFanGijbels2(),
dgp.UnivariateFanGijbels1EU(),
#dgp.UnivariateFanGijbels2(distr_x=stats.uniform(-2, 4))
dgp.UnivariateFunc1()
]
res = []
fig = plt.figure()
for i,func in enumerate(funcs):
#f = func()
f = func
model = KernelReg(endog=[f.y], exog=[f.x], reg_type='ll',
var_type='c', bw='cv_ls')
mean, mfx = model.fit()
ax = fig.add_subplot(2, 2, i+1)
f.plot(ax=ax)
ax.plot(f.x, mean, color='r', lw=2, label='est. mean')
ax.legend(loc='upper left')
res.append((model, mean, mfx))
fig.suptitle('Kernel Regression')
fig.show()
| bsd-3-clause |
eteq/bokeh | examples/interactions/interactive_bubble/data.py | 49 | 1265 | import numpy as np
from bokeh.palettes import Spectral6
def process_data():
from bokeh.sampledata.gapminder import fertility, life_expectancy, population, regions
# Make the column names ints not strings for handling
columns = list(fertility.columns)
years = list(range(int(columns[0]), int(columns[-1])))
rename_dict = dict(zip(columns, years))
fertility = fertility.rename(columns=rename_dict)
life_expectancy = life_expectancy.rename(columns=rename_dict)
population = population.rename(columns=rename_dict)
regions = regions.rename(columns=rename_dict)
# Turn population into bubble sizes. Use min_size and factor to tweak.
scale_factor = 200
population_size = np.sqrt(population / np.pi) / scale_factor
min_size = 3
population_size = population_size.where(population_size >= min_size).fillna(min_size)
# Use pandas categories and categorize & color the regions
regions.Group = regions.Group.astype('category')
regions_list = list(regions.Group.cat.categories)
def get_color(r):
return Spectral6[regions_list.index(r.Group)]
regions['region_color'] = regions.apply(get_color, axis=1)
return fertility, life_expectancy, population_size, regions, years, regions_list
| bsd-3-clause |
bnaul/scikit-learn | sklearn/neighbors/_classification.py | 2 | 23284 | """Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from ..utils.validation import _is_arraylike, _num_samples
import warnings
from ._base import _check_weights, _get_weights
from ._base import NeighborsBase, KNeighborsMixin, RadiusNeighborsMixin
from ..base import ClassifierMixin
from ..utils import check_array
from ..utils.validation import _deprecate_positional_args
class KNeighborsClassifier(KNeighborsMixin,
ClassifierMixin,
NeighborsBase):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, default=5
Number of neighbors to use by default for :meth:`kneighbors` queries.
weights : {'uniform', 'distance'} or callable, default='uniform'
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : int, default=2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : str or callable, default='minkowski'
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of :class:`DistanceMetric` for a
list of available metrics.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square during fit. X may be a :term:`sparse graph`,
in which case only "nonzero" elements may be considered neighbors.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Doesn't affect :meth:`fit` method.
Attributes
----------
classes_ : array of shape (n_classes,)
Class labels known to the classifier
effective_metric_ : str or callble
The distance metric used. It will be same as the `metric` parameter
or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
'minkowski' and `p` parameter set to 2.
effective_metric_params_ : dict
Additional keyword arguments for the metric function. For most metrics
will be same with `metric_params` parameter, but may also contain the
`p` parameter value if the `effective_metric_` attribute is set to
'minkowski'.
n_samples_fit_ : int
Number of samples in the fitted data.
outputs_2d_ : bool
False when `y`'s shape is (n_samples, ) or (n_samples, 1) during fit
otherwise True.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y)
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances
but different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
@_deprecate_positional_args
def __init__(self, n_neighbors=5, *,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=None,
**kwargs):
super().__init__(
n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params,
n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def fit(self, X, y):
"""Fit the k-nearest neighbors classifier from the training dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples) if metric='precomputed'
Training data.
y : {array-like, sparse matrix} of shape (n_samples,) or \
(n_samples, n_outputs)
Target values.
Returns
-------
self : KNeighborsClassifier
The fitted k-nearest neighbors classifier.
"""
return self._fit(X, y)
def predict(self, X):
"""Predict the class labels for the provided data.
Parameters
----------
X : array-like of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : ndarray of shape (n_queries,) or (n_queries, n_outputs)
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_queries = _num_samples(X)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_queries, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array-like of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
p : ndarray of shape (n_queries, n_classes), or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_queries = _num_samples(X)
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_queries, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(RadiusNeighborsMixin,
ClassifierMixin,
NeighborsBase):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, default=1.0
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
weights : {'uniform', 'distance'} or callable, default='uniform'
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : int, default=2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : str or callable, default='minkowski'
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of :class:`DistanceMetric` for a
list of available metrics.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square during fit. X may be a :term:`sparse graph`,
in which case only "nonzero" elements may be considered neighbors.
outlier_label : {manual label, 'most_frequent'}, default=None
label for outlier samples (samples with no neighbors in given radius).
- manual label: str or int label (should be the same type as y)
or list of manual labels if multi-output is used.
- 'most_frequent' : assign the most frequent label of y to outliers.
- None : when any outlier is detected, ValueError will be raised.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier.
effective_metric_ : str or callable
The distance metric used. It will be same as the `metric` parameter
or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
'minkowski' and `p` parameter set to 2.
effective_metric_params_ : dict
Additional keyword arguments for the metric function. For most metrics
will be same with `metric_params` parameter, but may also contain the
`p` parameter value if the `effective_metric_` attribute is set to
'minkowski'.
n_samples_fit_ : int
Number of samples in the fitted data.
outlier_label_ : int or array-like of shape (n_class,)
Label which is given for outlier samples (samples with no neighbors
on given radius).
outputs_2d_ : bool
False when `y`'s shape is (n_samples, ) or (n_samples, 1) during fit
otherwise True.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y)
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
>>> print(neigh.predict_proba([[1.0]]))
[[0.66666667 0.33333333]]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
@_deprecate_positional_args
def __init__(self, radius=1.0, *, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, n_jobs=None,
**kwargs):
super().__init__(
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def fit(self, X, y):
"""Fit the radius neighbors classifier from the training dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples) if metric='precomputed'
Training data.
y : {array-like, sparse matrix} of shape (n_samples,) or \
(n_samples, n_outputs)
Target values.
Returns
-------
self : RadiusNeighborsClassifier
The fitted radius neighbors classifier.
"""
self._fit(X, y)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
if self.outlier_label is None:
outlier_label_ = None
elif self.outlier_label == 'most_frequent':
outlier_label_ = []
# iterate over multi-output, get the most frequent label for each
# output.
for k, classes_k in enumerate(classes_):
label_count = np.bincount(_y[:, k])
outlier_label_.append(classes_k[label_count.argmax()])
else:
if (_is_arraylike(self.outlier_label) and
not isinstance(self.outlier_label, str)):
if len(self.outlier_label) != len(classes_):
raise ValueError("The length of outlier_label: {} is "
"inconsistent with the output "
"length: {}".format(self.outlier_label,
len(classes_)))
outlier_label_ = self.outlier_label
else:
outlier_label_ = [self.outlier_label] * len(classes_)
for classes, label in zip(classes_, outlier_label_):
if (_is_arraylike(label) and
not isinstance(label, str)):
# ensure the outlier lable for each output is a scalar.
raise TypeError("The outlier_label of classes {} is "
"supposed to be a scalar, got "
"{}.".format(classes, label))
if np.append(classes, label).dtype != classes.dtype:
# ensure the dtype of outlier label is consistent with y.
raise TypeError("The dtype of outlier_label {} is "
"inconsistent with classes {} in "
"y.".format(label, classes))
self.outlier_label_ = outlier_label_
return self
def predict(self, X):
"""Predict the class labels for the provided data.
Parameters
----------
X : array-like of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : ndarray of shape (n_queries,) or (n_queries, n_outputs)
Class labels for each data sample.
"""
probs = self.predict_proba(X)
classes_ = self.classes_
if not self.outputs_2d_:
probs = [probs]
classes_ = [self.classes_]
n_outputs = len(classes_)
n_queries = probs[0].shape[0]
y_pred = np.empty((n_queries, n_outputs), dtype=classes_[0].dtype)
for k, prob in enumerate(probs):
# iterate over multi-output, assign labels based on probabilities
# of each output.
max_prob_index = prob.argmax(axis=1)
y_pred[:, k] = classes_[k].take(max_prob_index)
outlier_zero_probs = (prob == 0).all(axis=1)
if outlier_zero_probs.any():
zero_prob_index = np.flatnonzero(outlier_zero_probs)
y_pred[zero_prob_index, k] = self.outlier_label_[k]
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array-like of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
p : ndarray of shape (n_queries, n_classes), or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
n_queries = _num_samples(X)
neigh_dist, neigh_ind = self.radius_neighbors(X)
outlier_mask = np.zeros(n_queries, dtype=bool)
outlier_mask[:] = [len(nind) == 0 for nind in neigh_ind]
outliers = np.flatnonzero(outlier_mask)
inliers = np.flatnonzero(~outlier_mask)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
if self.outlier_label_ is None and outliers.size > 0:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'giving a label for outliers, '
'or considering removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
if weights is not None:
weights = weights[inliers]
probabilities = []
# iterate over multi-output, measure probabilities of the k-th output.
for k, classes_k in enumerate(classes_):
pred_labels = np.zeros(len(neigh_ind), dtype=object)
pred_labels[:] = [_y[ind, k] for ind in neigh_ind]
proba_k = np.zeros((n_queries, classes_k.size))
proba_inl = np.zeros((len(inliers), classes_k.size))
# samples have different size of neighbors within the same radius
if weights is None:
for i, idx in enumerate(pred_labels[inliers]):
proba_inl[i, :] = np.bincount(idx,
minlength=classes_k.size)
else:
for i, idx in enumerate(pred_labels[inliers]):
proba_inl[i, :] = np.bincount(idx,
weights[i],
minlength=classes_k.size)
proba_k[inliers, :] = proba_inl
if outliers.size > 0:
_outlier_label = self.outlier_label_[k]
label_index = np.flatnonzero(classes_k == _outlier_label)
if label_index.size == 1:
proba_k[outliers, label_index[0]] = 1.0
else:
warnings.warn('Outlier label {} is not in training '
'classes. All class probabilities of '
'outliers will be assigned with 0.'
''.format(self.outlier_label_[k]))
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
| bsd-3-clause |
eickenberg/scikit-learn | sklearn/cluster/bicluster/spectral.py | 1 | 19540 | """Implements spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.externals import six
from sklearn.utils.arpack import svds
from sklearn.utils.arpack import eigsh
from sklearn.cluster import KMeans
from sklearn.cluster import MiniBatchKMeans
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.extmath import make_nonnegative
from sklearn.utils.extmath import norm
from sklearn.utils.validation import assert_all_finite
from sklearn.utils.validation import check_array
from .utils import check_array_ndim
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
check_array_ndim(X)
self._check_parameters()
self._fit(X)
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
_, v = eigsh(safe_sparse_dot(array.T, array),
ncv=self.n_svd_vecs)
vt = v.T
if np.any(np.isnan(u)):
_, u = eigsh(safe_sparse_dot(array, array.T),
ncv=self.n_svd_vecs)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
`rows_` : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
`columns_` : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
`row_labels_` : array-like, shape (n_rows,)
The bicluster label of each row.
`column_labels_` : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
`rows_` : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
`columns_` : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
`row_labels_` : array-like, shape (n_rows,)
Row partition labels.
`column_labels_` : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
Adai0808/BuildingMachineLearningSystemsWithPython | ch07/predict10k_en.py | 22 | 2428 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import numpy as np
from sklearn.datasets import load_svmlight_file
from sklearn.cross_validation import KFold
from sklearn.linear_model import ElasticNetCV, ElasticNet
from sklearn.metrics import mean_squared_error, r2_score
from matplotlib import pyplot as plt
data, target = load_svmlight_file('data/E2006.train')
# Edit the lines below if you want to switch method:
# from sklearn.linear_model import Lasso
# met = Lasso(alpha=0.1)
met = ElasticNet(alpha=0.1)
kf = KFold(len(target), n_folds=5)
pred = np.zeros_like(target)
for train, test in kf:
met.fit(data[train], target[train])
pred[test] = met.predict(data[test])
print('[EN 0.1] RMSE on testing (5 fold), {:.2}'.format(np.sqrt(mean_squared_error(target, pred))))
print('[EN 0.1] R2 on testing (5 fold), {:.2}'.format(r2_score(target, pred)))
print('')
# Construct an ElasticNetCV object (use all available CPUs)
met = ElasticNetCV(n_jobs=-1)
kf = KFold(len(target), n_folds=5)
pred = np.zeros_like(target)
for train, test in kf:
met.fit(data[train], target[train])
pred[test] = met.predict(data[test])
print('[EN CV] RMSE on testing (5 fold), {:.2}'.format(np.sqrt(mean_squared_error(target, pred))))
print('[EN CV] R2 on testing (5 fold), {:.2}'.format(r2_score(target, pred)))
print('')
met.fit(data, target)
pred = met.predict(data)
print('[EN CV] RMSE on training, {:.2}'.format(np.sqrt(mean_squared_error(target, pred))))
print('[EN CV] R2 on training, {:.2}'.format(r2_score(target, pred)))
# Construct an ElasticNetCV object (use all available CPUs)
met = ElasticNetCV(n_jobs=-1, l1_ratio=[.01, .05, .25, .5, .75, .95, .99])
kf = KFold(len(target), n_folds=5)
pred = np.zeros_like(target)
for train, test in kf:
met.fit(data[train], target[train])
pred[test] = met.predict(data[test])
print('[EN CV l1_ratio] RMSE on testing (5 fold), {:.2}'.format(np.sqrt(mean_squared_error(target, pred))))
print('[EN CV l1_ratio] R2 on testing (5 fold), {:.2}'.format(r2_score(target, pred)))
print('')
fig, ax = plt.subplots()
y = target
ax.scatter(y, pred, c='k')
ax.plot([-5,-1], [-5,-1], 'r-', lw=2)
ax.set_xlabel('Actual value')
ax.set_ylabel('Predicted value')
fig.savefig('Figure_10k_scatter_EN_l1_ratio.png')
| mit |
kernc/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 43 | 24671 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from numpy.testing import run_module_suite
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (
chi2, f_classif, f_oneway, f_regression, mutual_info_classif,
mutual_info_regression, SelectPercentile, SelectKBest, SelectFpr,
SelectFdr, SelectFwe, GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
def test_mutual_info_classif():
X, y = make_classification(n_samples=100, n_features=5,
n_informative=1, n_redundant=1,
n_repeated=0, n_classes=2,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
# Test in KBest mode.
univariate_filter = SelectKBest(mutual_info_classif, k=2)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
mutual_info_classif, mode='k_best', param=2).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(5)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
# Test in Percentile mode.
univariate_filter = SelectPercentile(mutual_info_classif, percentile=40)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
mutual_info_classif, mode='percentile', param=40).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(5)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
def test_mutual_info_regression():
X, y = make_regression(n_samples=100, n_features=10, n_informative=2,
shuffle=False, random_state=0, noise=10)
# Test in KBest mode.
univariate_filter = SelectKBest(mutual_info_regression, k=2)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
mutual_info_regression, mode='k_best', param=2).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(10)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
# Test in Percentile mode.
univariate_filter = SelectPercentile(mutual_info_regression, percentile=20)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(mutual_info_regression, mode='percentile',
param=20).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(10)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause |
rexshihaoren/scikit-learn | examples/model_selection/plot_validation_curve.py | 229 | 1823 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
zhoulingjun/zipline | tests/test_algorithm_gen.py | 18 | 7339 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from nose.tools import (
timed,
nottest
)
from datetime import datetime
import pandas as pd
import pytz
from zipline.finance import trading
from zipline.algorithm import TradingAlgorithm
from zipline.finance import slippage
from zipline.utils import factory
from zipline.utils.factory import create_simulation_parameters
from zipline.utils.test_utils import (
setup_logger,
teardown_logger
)
from zipline.protocol import (
Event,
DATASOURCE_TYPE
)
DEFAULT_TIMEOUT = 15 # seconds
EXTENDED_TIMEOUT = 90
class RecordDateSlippage(slippage.FixedSlippage):
def __init__(self, spread):
super(RecordDateSlippage, self).__init__(spread=spread)
self.latest_date = None
def simulate(self, event, open_orders):
self.latest_date = event.dt
result = super(RecordDateSlippage, self).simulate(event, open_orders)
return result
class TestAlgo(TradingAlgorithm):
def __init__(self, asserter, *args, **kwargs):
super(TestAlgo, self).__init__(*args, **kwargs)
self.asserter = asserter
def initialize(self, window_length=100):
self.latest_date = None
self.set_slippage(RecordDateSlippage(spread=0.05))
self.stocks = [self.sid(8229)]
self.ordered = False
self.num_bars = 0
def handle_data(self, data):
self.num_bars += 1
self.latest_date = self.get_datetime()
if not self.ordered:
for stock in self.stocks:
self.order(stock, 100)
self.ordered = True
else:
self.asserter.assertGreaterEqual(
self.latest_date,
self.slippage.latest_date
)
class AlgorithmGeneratorTestCase(TestCase):
def setUp(self):
setup_logger(self)
def tearDown(self):
teardown_logger(self)
@nottest
def test_lse_algorithm(self):
lse = trading.TradingEnvironment(
bm_symbol='^FTSE',
exchange_tz='Europe/London'
)
with lse:
sim_params = factory.create_simulation_parameters(
start=datetime(2012, 5, 1, tzinfo=pytz.utc),
end=datetime(2012, 6, 30, tzinfo=pytz.utc)
)
algo = TestAlgo(self, identifiers=[8229], sim_params=sim_params)
trade_source = factory.create_daily_trade_source(
[8229],
200,
sim_params
)
algo.set_sources([trade_source])
gen = algo.get_generator()
results = list(gen)
self.assertEqual(len(results), 42)
# May 7, 2012 was an LSE holiday, confirm the 4th trading
# day was May 8.
self.assertEqual(results[4]['daily_perf']['period_open'],
datetime(2012, 5, 8, 8, 31, tzinfo=pytz.utc))
@timed(DEFAULT_TIMEOUT)
def test_generator_dates(self):
"""
Ensure the pipeline of generators are in sync, at least as far as
their current dates.
"""
sim_params = factory.create_simulation_parameters(
start=datetime(2011, 7, 30, tzinfo=pytz.utc),
end=datetime(2012, 7, 30, tzinfo=pytz.utc)
)
algo = TestAlgo(self, identifiers=[8229], sim_params=sim_params)
trade_source = factory.create_daily_trade_source(
[8229],
sim_params
)
algo.set_sources([trade_source])
gen = algo.get_generator()
self.assertTrue(list(gen))
self.assertTrue(algo.slippage.latest_date)
self.assertTrue(algo.latest_date)
@timed(DEFAULT_TIMEOUT)
def test_handle_data_on_market(self):
"""
Ensure that handle_data is only called on market minutes.
i.e. events that come in at midnight should be processed at market
open.
"""
from zipline.finance.trading import SimulationParameters
sim_params = SimulationParameters(
period_start=datetime(2012, 7, 30, tzinfo=pytz.utc),
period_end=datetime(2012, 7, 30, tzinfo=pytz.utc),
data_frequency='minute'
)
algo = TestAlgo(self, identifiers=[8229], sim_params=sim_params)
midnight_custom_source = [Event({
'custom_field': 42.0,
'sid': 'custom_data',
'source_id': 'TestMidnightSource',
'dt': pd.Timestamp('2012-07-30', tz='UTC'),
'type': DATASOURCE_TYPE.CUSTOM
})]
minute_event_source = [Event({
'volume': 100,
'price': 200.0,
'high': 210.0,
'open_price': 190.0,
'low': 180.0,
'sid': 8229,
'source_id': 'TestMinuteEventSource',
'dt': pd.Timestamp('2012-07-30 9:31 AM', tz='US/Eastern').
tz_convert('UTC'),
'type': DATASOURCE_TYPE.TRADE
})]
algo.set_sources([midnight_custom_source, minute_event_source])
gen = algo.get_generator()
# Consume the generator
list(gen)
# Though the events had different time stamps, handle data should
# have only been called once, at the market open.
self.assertEqual(algo.num_bars, 1)
@timed(DEFAULT_TIMEOUT)
def test_progress(self):
"""
Ensure the pipeline of generators are in sync, at least as far as
their current dates.
"""
sim_params = factory.create_simulation_parameters(
start=datetime(2008, 1, 1, tzinfo=pytz.utc),
end=datetime(2008, 1, 5, tzinfo=pytz.utc)
)
algo = TestAlgo(self, sim_params=sim_params)
trade_source = factory.create_daily_trade_source(
[8229],
sim_params
)
algo.set_sources([trade_source])
gen = algo.get_generator()
results = list(gen)
self.assertEqual(results[-2]['progress'], 1.0)
def test_benchmark_times_match_market_close_for_minutely_data(self):
"""
Benchmark dates should be adjusted so that benchmark events are
emitted at the end of each trading day when working with minutely
data.
Verification relies on the fact that there are no trades so
algo.datetime should be equal to the last benchmark time.
See https://github.com/quantopian/zipline/issues/241
"""
sim_params = create_simulation_parameters(num_days=1,
data_frequency='minute')
algo = TestAlgo(self, sim_params=sim_params, identifiers=[8229])
algo.run(source=[], overwrite_sim_params=False)
self.assertEqual(algo.datetime, sim_params.last_close)
| apache-2.0 |
dean0x7d/pybinding | pybinding/support/collections.py | 1 | 3143 | import numpy as np
from matplotlib.collections import Collection
from matplotlib.artist import allow_rasterization
# noinspection PyAbstractClass
class CircleCollection(Collection):
"""Custom circle collection
The default matplotlib `CircleCollection` creates circles based on their
area in screen units. This class uses the radius in data units. It behaves
like a much faster version of a `PatchCollection` of `Circle`.
The implementation is similar to `EllipseCollection`.
"""
def __init__(self, radius, **kwargs):
super().__init__(**kwargs)
from matplotlib import path, transforms
self.radius = np.atleast_1d(radius)
self._paths = [path.Path.unit_circle()]
self.set_transform(transforms.IdentityTransform())
self._transforms = np.empty((0, 3, 3))
def _set_transforms(self):
ax = self.axes
self._transforms = np.zeros((self.radius.size, 3, 3))
self._transforms[:, 0, 0] = self.radius * ax.bbox.width / ax.viewLim.width
self._transforms[:, 1, 1] = self.radius * ax.bbox.height / ax.viewLim.height
self._transforms[:, 2, 2] = 1
@allow_rasterization
def draw(self, renderer):
self._set_transforms()
super().draw(renderer)
class Circle3DCollection(CircleCollection):
def __init__(self, radius, zs=0, zdir='z', depthshade=True, **kwargs):
super().__init__(radius, **kwargs)
self._depthshade = depthshade
self.set_3d_properties(zs, zdir)
self._A0 = self._A
def set_array(self, array):
self._A0 = array
super().set_array(array)
def set_3d_properties(self, zs, zdir):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
offsets = self.get_offsets()
if len(offsets) > 0:
xs, ys = list(zip(*offsets))
else:
xs = []
ys = []
from mpl_toolkits.mplot3d.art3d import juggle_axes
self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)
self._facecolor3d = self.get_facecolor()
self._edgecolor3d = self.get_edgecolor()
def do_3d_projection(self, renderer):
from mpl_toolkits.mplot3d import proj3d
from mpl_toolkits.mplot3d.art3d import zalpha
from matplotlib import colors as mcolors
# transform and sort in z direction
v = np.array(proj3d.proj_transform_clip(*self._offsets3d, M=renderer.M)[:3])
idx = v[2].argsort()[::-1]
vzs = v[2, idx]
self.set_offsets(v[:2, idx].transpose())
super().set_array(self._A0[idx])
fcs = zalpha(self._facecolor3d, vzs) if self._depthshade else self._facecolor3d
fcs = mcolors.colorConverter.to_rgba_array(fcs, self._alpha)
self.set_facecolors(fcs)
ecs = zalpha(self._edgecolor3d, vzs) if self._depthshade else self._edgecolor3d
ecs = mcolors.colorConverter.to_rgba_array(ecs, self._alpha)
self.set_edgecolors(ecs)
return min(vzs) if vzs.size > 0 else np.nan
| bsd-2-clause |