repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
balavenkatesan/yellowbrick | yellowbrick/features/radviz.py | 1 | 8495 | # yellowbrick.features.radviz
# Implements radviz for feature analysis.
#
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Created: Fri Oct 07 13:18:00 2016 -0400
#
# Copyright (C) 2016 District Data Labs
# For license information, see LICENSE.txt
#
# ID: radviz.py [] benjamin@bengfort.com $
"""
Implements radviz for feature analysis.
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from yellowbrick.features.base import DataVisualizer
from yellowbrick.exceptions import YellowbrickTypeError
from yellowbrick.style.colors import resolve_colors, get_color_cycle
##########################################################################
## Quick Methods
##########################################################################
def radviz(X, y=None, ax=None, features=None, classes=None,
color=None, colormap=None, **kwargs):
"""Displays each feature as an axis around a circle surrounding a scatter
plot whose points are each individual instance.
This helper function is a quick wrapper to utilize the RadialVisualizer
(Transformer) for one-off analysis.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
ax : matplotlib axes
The axes to plot the figure on.
features : list of strings
The names of the features or columns
classes : list of strings
The names of the classes in the target
color : list or tuple of colors
Specify the colors for each individual class
colormap : string or matplotlib cmap
Sequential colormap for continuous target
Returns
-------
ax : matplotlib axes
Returns the axes that the parallel coordinates were drawn on.
"""
# Instantiate the visualizer
visualizer = RadialVisualizer(
ax, features, classes, color, colormap, **kwargs
)
# Fit and transform the visualizer (calls draw)
visualizer.fit(X, y, **kwargs)
visualizer.transform(X)
# Return the axes object on the visualizer
return visualizer.ax
##########################################################################
## Static RadViz Visualizer
##########################################################################
class RadialVisualizer(DataVisualizer):
"""
RadViz is a multivariate data visualization algorithm that plots each
axis uniformely around the circumference of a circle then plots points on
the interior of the circle such that the point normalizes its values on
the axes from the center to each arc.
"""
def __init__(self, ax=None, features=None, classes=None, color=None,
colormap=None, **kwargs):
"""
Initialize the base radviz with many of the options required in order
to make the visualization work.
Parameters
----------
:param ax: the axis to plot the figure on.
:param features: a list of feature names to use
If a DataFrame is passed to fit and features is None, feature
names are selected as the columns of the DataFrame.
:param classes: a list of class names for the legend
If classes is None and a y value is passed to fit then the classes
are selected from the target vector.
:param color: optional list or tuple of colors to colorize lines
Use either color to colorize the lines on a per class basis or
colormap to color them on a continuous scale.
:param colormap: optional string or matplotlib cmap to colorize lines
Use either color to colorize the lines on a per class basis or
colormap to color them on a continuous scale.
:param kwargs: keyword arguments passed to the super class.
These parameters can be influenced later on in the visualization
process, but can and should be set as early as possible.
"""
super(RadialVisualizer, self).__init__(
ax, features, classes, color, colormap, **kwargs
)
@staticmethod
def normalize(X):
"""
MinMax normalization to fit a matrix in the space [0,1] by column.
"""
a = X.min(axis=0)
b = X.max(axis=0)
return (X - a[np.newaxis, :]) / ((b - a)[np.newaxis, :])
def draw(self, X, y, **kwargs):
"""
Called from the fit method, this method creates the radviz canvas and
draws each instance as a class or target colored point, whose location
is determined by the feature data set.
"""
# Get the shape of the data
nrows, ncols = X.shape
# Create the axes if they don't exist
if self.ax is None:
self.ax = plt.gca(xlim=[-1,1], ylim=[-1,1])
# Create the colors
# TODO: Allow both colormap, listed colors, and palette definition
# TODO: Make this an independent function or property for override!
# color_values = resolve_colors(
# num_colors=len(self.classes_), colormap=self.colormap, color=self.color
# )
color_values = get_color_cycle()
colors = dict(zip(self.classes_, color_values))
# Create a data structure to hold scatter plot representations
to_plot = {}
for kls in self.classes_:
to_plot[kls] = [[], []]
# Compute the arcs around the circumference for each feature axis
# TODO: make this an independent function for override
s = np.array([
(np.cos(t), np.sin(t))
for t in [
2.0 * np.pi * (i / float(ncols))
for i in range(ncols)
]
])
# Compute the locations of the scatter plot for each class
# Normalize the data first to plot along the 0, 1 axis
for i, row in enumerate(self.normalize(X)):
row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)
xy = (s * row_).sum(axis=0) / row.sum()
kls = self.classes_[y[i]]
to_plot[kls][0].append(xy[0])
to_plot[kls][1].append(xy[1])
# Add the scatter plots from the to_plot function
# TODO: store these plots to add more instances to later
# TODO: make this a separate function
for i, kls in enumerate(self.classes_):
self.ax.scatter(to_plot[kls][0], to_plot[kls][1], color=colors[kls], label=str(kls), **kwargs)
# Add the circular axis path
# TODO: Make this a seperate function (along with labeling)
self.ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor='none'))
# Add the feature names
for xy, name in zip(s, self.features_):
# Add the patch indicating the location of the axis
self.ax.add_patch(patches.Circle(xy, radius=0.025, facecolor='#777777'))
# Add the feature names offset around the axis marker
if xy[0] < 0.0 and xy[1] < 0.0:
self.ax.text(xy[0] - 0.025, xy[1] - 0.025, name, ha='right', va='top', size='small')
elif xy[0] < 0.0 and xy[1] >= 0.0:
self.ax.text(xy[0] - 0.025, xy[1] + 0.025, name, ha='right', va='bottom', size='small')
elif xy[0] >= 0.0 and xy[1] < 0.0:
self.ax.text(xy[0] + 0.025, xy[1] - 0.025, name, ha='left', va='top', size='small')
elif xy[0] >= 0.0 and xy[1] >= 0.0:
self.ax.text(xy[0] + 0.025, xy[1] + 0.025, name, ha='left', va='bottom', size='small')
self.ax.axis('equal')
def finalize(self, **kwargs):
"""
Finalize executes any subclass-specific axes finalization steps.
The user calls poof and poof calls finalize.
Parameters
----------
kwargs: generic keyword arguments.
"""
# Set the title
self.set_title(
'RadViz for {} Features'.format(len(self.features_))
)
# Remove the ticks from the graph
self.ax.set_yticks([])
self.ax.set_xticks([])
# Add the legend
self.ax.legend(loc='best')
# Alias for RadViz
RadViz = RadialVisualizer
| apache-2.0 |
appapantula/scikit-learn | sklearn/gaussian_process/tests/test_gaussian_process.py | 267 | 6813 | """
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
| bsd-3-clause |
rohanp/scikit-learn | sklearn/utils/tests/test_estimator_checks.py | 69 | 3894 | import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import MultiTaskElasticNet
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised by fit"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check for sparse matrix input handling
name = NoSparseClassifier.__name__
msg = "Estimator " + name + " doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(AdaBoostClassifier)
check_estimator(MultiTaskElasticNet)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
| bsd-3-clause |
cainiaocome/scikit-learn | sklearn/cluster/__init__.py | 364 | 1228 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
inhuszar/histreg | register-img.py | 1 | 25627 | #!/Users/inhuszar/MND_HistReg/MND_HistReg_Python/bin/python
# 2017-Jun-14
# Fork of the original insert-block.py. Goal: make it a user-friendly utility
# capable of registering multi-channel 2D images using rigid-body transformation
import numpy as np
import matplotlib.pyplot as plt
import tifffile as tiff
from scipy.optimize import minimize, differential_evolution, basinhopping, brute
from scipy.interpolate import RegularGridInterpolator
from math import sin, cos, pi, floor, sqrt
from fractions import Fraction
from args import *
import os
from PIL import Image
from skimage.color import rgb2grey
from mutual_information import mutual_information
CLFLAGS = {'ref': '--ref',
'rgb': '--rgb',
'apply': '--applyxfm',
'maskval': '--maskval',
'initvals': '--initvals',
'bounds': '--bounds',
'steps': '--steps',
'show': '--show',
'out': '--out',
'omat': '--omat',
'tif': '--tif',
'ds': '--ds',
'verbose': '-v'}
N_AFF_PARAMS = 4
# ~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ## FUNCTION DEFINITIONS ## #
# ~~~~~~~~~~~~~~~~~~~~~~~~~~ #
def extract_shape(img):
"""Additional flexibility to work with multi-channel images."""
s = img.shape
if len(s) == 2:
s = s + (1,)
if len(s) == 3:
pass
else:
print ('Invalid dimensions for image')
raise ValueError
return s
def downsample(img, factor, bounds_error=False, fill_value=0):
"""Downsample images isotropically by a given scalar. Uses interpolation."""
h, w, ch = extract_shape(img)
dtype = img.dtype
nx = np.linspace(0, w, int(floor(w / factor)) + 1, False)
ny = np.linspace(0, h, int(floor(h / factor)) + 1, False)
nyy, nxx = np.meshgrid(ny, nx, indexing='ij')
newimg = np.zeros((ny.size, nx.size, ch), dtype=dtype)
for c in range(ch):
if ch == 1:
ipol = RegularGridInterpolator((range(h), range(w)), img[:, :],
bounds_error=bounds_error,
fill_value=fill_value)
else:
ipol = RegularGridInterpolator((range(h), range(w)), img[:, :, c],
bounds_error=bounds_error,
fill_value=fill_value)
newimg[:, :, c] = ipol(np.vstack((nyy.ravel(), nxx.ravel())).T) \
.reshape(nxx.shape).astype(dtype)
if ch == 1:
return newimg[:, :, 0]
else:
return newimg
def colour_grad(img, kernelsize=3):
"""Calculates local multi-channel gradients using my harmonic kernel."""
h, w, ch = extract_shape(img)
harmonic = lambda n: sum(Fraction(1, d) for d in range(1, n + 1))
U = np.ones((1, kernelsize))
D = 2.0 / (kernelsize * (kernelsize - 1)) * \
np.array([float(harmonic(i) - harmonic(kernelsize - 1 - i))
for i in range(kernelsize)])
offset = int(kernelsize / 2)
dx = np.zeros((h,w,ch), dtype=np.float64)
dy = np.zeros((h,w,ch), dtype=np.float64)
for c in range(ch):
for y in xrange(offset, h - (kernelsize - offset)):
#print '{}/{} row...'.format(y - offset + 1, h - kernelsize)
for x in xrange(offset, w - (kernelsize - offset)):
if ch == 1:
K = img[y - offset:y - offset + kernelsize,
x - offset:x - offset + kernelsize]
else:
K = img[y - offset:y - offset + kernelsize,
x - offset:x - offset + kernelsize, c]
current_dx = np.dot(np.dot(U, K), D.T)
current_dy = np.dot(D, np.dot(K, U.T))
magnitude = sqrt(current_dx ** 2 + current_dy ** 2)
if magnitude == 0:
dx[y, x, c] = 0
dy[y, x, c] = 0
continue
dx[y, x, c] = current_dx * 1.0 / magnitude
dy[y, x, c] = current_dy * 1.0 / magnitude
return np.sqrt(dx**2 + dy**2)
def get_affine_from_params(affine_params, orig_x=0, orig_y=0, factor=1):
"""Creates 3x3 affine matrix from the supplied 4 parameters.
Please note the constraint of isometric scaling. The factor compensates for
downscaling."""
rot_z, dx, dy, sx = affine_params
sy = sx
affine = np.array([[sx * cos(rot_z), -sy * sin(rot_z),
factor * (-orig_x) * sx * cos(rot_z) -
factor * (-orig_y) * sy * sin(rot_z) +
factor * dx],
[sx * sin(rot_z), sy * cos(rot_z),
factor * (-orig_x) * sx * sin(rot_z) +
factor * (-orig_y) * sy * cos(rot_z) +
factor * dy],
[0, 0, 1]])
return affine
def transform(img, affine, refimg=None):
"""Performs affine transformation on the coordinates of an image given the
transformation matrix. Output coordinates are based on the coordinates of a
reference image and are in the form array([Y, X]).
If no reference image is specified, the transformation is self-referential.
The rotation is relative to the point (orig_x, orig_y). If either is not
specified, they are set to the respective component of the centre of
gravity."""
if refimg is not None:
h, w, ch = extract_shape(refimg)
else:
h, w, ch = extract_shape(img)
yy, xx = np.meshgrid(range(h), range(w), indexing='ij')
res = np.dot(np.linalg.inv(affine), np.vstack((xx.ravel(), yy.ravel(),
np.ones(xx.size))))
return np.roll(res[:2, :].T, 1, axis=1)
def transform_img(img, affine, refimg=None):
"""Performs actual image transformation based on the location of the origin
and the components of an affine matrix."""
h, w, ch = extract_shape(img)
dtype = img.dtype
ipol = RegularGridInterpolator((range(h), range(w)), img,
bounds_error=False,
fill_value=np.array([0]*ch))
if refimg is not None:
h, w, ch = extract_shape(refimg)
transformed_img = ipol(transform(img, affine, refimg=refimg))\
.reshape((h,w,ch)).astype(dtype)
if ch == 1:
return transformed_img[:, :, 0]
else:
return transformed_img
def set_interpolators(img, refimg, gradients=False):
"""Creates interpolators for both the input and the reference images. Also
creates interpolators for the multi-channel gradient maps (optional).
The interpolators bridge the gap between the potentially different sizes of
the input and the reference image. Setting up the interpolators outside the
cost function improves the performance of registration."""
h, w, ch = extract_shape(refimg)
ih, iw, ich = extract_shape(img)
# Define interpolators for both the input and the reference image
refimg_ipol = RegularGridInterpolator((range(h), range(w)), refimg,
bounds_error=False,
fill_value=np.array([0] * ch))
img_ipol = RegularGridInterpolator((range(ih), range(iw)), img,
bounds_error=False,
fill_value=np.array([0] * ich))
# Define interpolators for the colour gradient maps
if gradients:
img_grad_ipol = RegularGridInterpolator((range(ih), range(iw)),
colour_grad(img, 10),
bounds_error=False,
fill_value=np.array(
[0] * ich))
refimg_grad_ipol = RegularGridInterpolator((range(h), range(w)),
colour_grad(refimg, 10),
bounds_error=False,
fill_value=np.array(
[0] * ch))
return img_ipol, refimg_ipol, img_grad_ipol, refimg_grad_ipol
else:
return img_ipol, refimg_ipol
def costfun(affine_params, img_ipol, refimg_ipol, img_grad_ipol=None,
refimg_grad_ipol=None, orig_x=0, orig_y=0, maskval=None,
verbose=False):
"""Calculates the alignment penalty score (cost) between the input image and
the reference image given the transformation parameters.
The cost function measures the normalised mutual information."""
if verbose:
print affine_params
affine = get_affine_from_params(affine_params, orig_x, orig_y, 1)
yx_xfm = transform(img=img_ipol.values, affine=affine,
refimg=refimg_ipol.values)
yy, xx = np.meshgrid(refimg_ipol.grid[0], refimg_ipol.grid[1],
indexing='ij')
yx_ref = np.vstack((yy.ravel(), xx.ravel())).T
# Measure the area of the transformed input image. This will be a second
# normalisation factor for the normalised mutual information metric, so as
# to avoid the otherwise inevitable inflation of the image.
if maskval is not None:
roi_img_yx = np.where((img_ipol(yx_xfm) != maskval).any(axis=1))[0]
total = roi_img_yx.size
else:
total = 1
h, w, ch = extract_shape(refimg_ipol.values)
cost = -mutual_information(rgb2grey(img_ipol(yx_xfm).reshape((h, w, ch))),
rgb2grey(refimg_ipol(yx_ref).reshape((h, w, ch))))/total
#plt.imshow(rgb2grey(img_ipol(yx_xfm).reshape((h, w, ch))), cmap='gray')
#plt.show()
if verbose:
print cost
return cost
def costfun_lsq(affine_params, img_ipol, refimg_ipol, img_grad_ipol=None,
refimg_grad_ipol=None, orig_x=0, orig_y=0, maskval=None,
verbose=False):
"""Calculates the alignment penalty score (cost) between the input image and
the reference image given the transformation parameters.
The total cost is the sum of squared differences plus a scalar multiple of a
regularisation term, which is currently a sum of squared differences between
multi-channel gradients."""
alpha = 1 # the scalar
affine = get_affine_from_params(affine_params, orig_x, orig_y, 1)
yx_xfm = transform(img=img_ipol.values, affine=affine,
refimg=refimg_ipol.values)
yy, xx = np.meshgrid(refimg_ipol.grid[0], refimg_ipol.grid[1],
indexing='ij')
yx_ref = np.vstack((yy.ravel(), xx.ravel())).T
h, w, ch = extract_shape(refimg_ipol.values)
if maskval is not None:
#roi_img_yx = np.where((img_ipol(yx_xfm) != maskval).any(axis=1))[0]
roi_ref_yx = np.where((refimg_ipol(yx_ref) != maskval).any(axis=1))[0]
#im = img_ipol(yx_xfm).astype(np.uint8)
#im[roi_img_yx, :] = np.array([255,255,255])
#print affine_params
#plt.imshow(im.reshape(refimg_ipol.values.shape))
#plt.show()
lsqterm = np.sum((refimg_ipol(yx_ref)[roi_ref_yx] -
img_ipol(yx_xfm)[roi_ref_yx]) ** 2)
regterm = np.sum((refimg_grad_ipol(yx_ref)[roi_ref_yx] -
img_grad_ipol(yx_xfm)[roi_ref_yx]) ** 2)
else:
lsqterm = np.sum((refimg_ipol(yx_ref).reshape((h, w, ch)) -
img_ipol(yx_xfm).reshape((h, w, ch))) ** 2)
regterm = np.sum((refimg_grad_ipol(yx_ref).reshape((h, w, ch)) -
img_grad_ipol(yx_xfm).reshape((h, w, ch))) ** 2)
cost = lsqterm #+ alpha * regterm
if verbose:
print cost
return cost
def perform_registration(img, refimg, affine_param_bounds, init_steps=5,
dscale=1, initvals=None, maskval=None, verbose=True):
"""Perform registration given the input and the reference image plus the
bounds on the affine parameters and the number of initialisation steps for
each affine parameter."""
# Set up interpolators
ipols = set_interpolators(downsample(img, dscale),
downsample(refimg, dscale), gradients=True)
# Calculate the centre of gravity for the input image
h, w, ch = extract_shape(ipols[0].values)
if ch > 1:
orig_y, orig_x = \
np.mean(np.vstack(np.where(np.sum(ipols[0].values, axis=2)!=0)).T,
axis=0)
else:
orig_y, orig_x = np.mean(np.vstack(np.where(ipols[0].values != 0)).T,
axis=0)
# Adjust the affine parameter bounds to the current scale
affine_param_bounds = np.array(affine_param_bounds)
affine_param_bounds[1:3,:] = affine_param_bounds[1:3,:] / float(dscale)
affine_param_bounds = tuple([tuple(row) for row in affine_param_bounds])
# Initialise registration
if initvals is None:
gridpts = tuple([np.linspace(affine_param_bounds[i][0],
affine_param_bounds[i][1],
init_steps, endpoint=True)
for i in range(len(affine_param_bounds))])
grid = np.meshgrid(*gridpts)
grid = np.vstack([grid[i].ravel() for i in range(len(grid))]).T
initial_costs = []
print 'Calculating initial guess...'
print 'Scaled parameter boundaries:', affine_param_bounds
for x0 in grid:
initial_costs.append(costfun(x0, img_ipol=ipols[0],
refimg_ipol=ipols[1],
img_grad_ipol=ipols[2],
refimg_grad_ipol=ipols[3],
orig_x=orig_x, orig_y=orig_y,
maskval=maskval, verbose=False))
x0 = grid[np.argmin(np.vstack(initial_costs))]
print 'Best guess for initialisation: ', x0
else:
x0 = initvals
x0[1] = x0[1] * 1.0 / dscale
x0[2] = x0[2] * 1.0 / dscale
print 'Scaled initial affine parameters:', x0
# Perform registration by minimising the cost function
print 'Optimizing...'
print 'Scaled param bounds:', affine_param_bounds
"""
opt = minimize(costfun, x0=x0, bounds=affine_param_bounds,
args=(ipols[0], ipols[1], ipols[2], ipols[3], orig_x, orig_y,
maskval, verbose), method='TNC', options={'eps': 0.01,
'scale': [1,10,10,1]},
jac=False)
"""
opt = differential_evolution(costfun, bounds=affine_param_bounds,
args=(ipols[0], ipols[1], ipols[2], ipols[3], orig_x, orig_y,
maskval, verbose), strategy='best2exp')
"""
opt = basinhopping(costfun, x0=x0, bounds=affine_param_bounds,
args=(ipols[0], ipols[1], ipols[2], ipols[3], orig_x, orig_y,
maskval, verbose))
opt = brute(costfun, ranges=affine_param_bounds,
args=(ipols[0], ipols[1], ipols[2], ipols[3], orig_x, orig_y,
maskval, verbose), Ns=20)
"""
# Generate output: transformed image and transformation matrix
print opt.x
omat = get_affine_from_params(opt.x, orig_x=orig_x, orig_y=orig_y,
factor=dscale)
out = transform_img(img, omat, refimg)
return out, omat
# ~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ## MAIN CODE ## #
# ~~~~~~~~~~~~~~~~~~~~~~~~~~ #
def main():
err = 0
# Load input image
imfile = subarg(sys.argv[0])[0]
fpath, fname = os.path.split(imfile)
try:
if argexist(CLFLAGS['tif']):
img = tiff.imread(imfile)
else:
img = np.array(Image.open(imfile))
if argexist(CLFLAGS['rgb']) & (extract_shape(img)[2] > 3):
img = img[:,:,:3]
except:
print ('Input image could not be opened from {}.'.format(imfile))
exit()
# Read reference
if argexist(CLFLAGS['ref']):
if argexist(CLFLAGS['ref'], True):
refimfile = subarg(CLFLAGS['ref'])[0]
try:
if argexist(CLFLAGS['tif']):
refimg = tiff.imread(refimfile)
else:
refimg = np.array(Image.open(refimfile))
if argexist(CLFLAGS['rgb']) & (extract_shape(refimg)[2] > 3):
refimg = refimg[:, :, :3]
except:
print ('Reference image could not be opened from {}.'
.format(refimfile))
exit()
else:
print ('Reference image was not specified.')
exit()
# Check image compatibility
if extract_shape(img)[2] != extract_shape(refimg)[2]:
print ('Channel number mismatch between input and reference.')
exit()
# Read downsample factor
try:
factor = float(subarg(CLFLAGS['ds'], 1)[0])
except:
print ('Invalid factor for downsampling.')
exit()
# Read transformation matrix from file if specified
mat = None # this will remain None if registration is needed
if argexist(CLFLAGS['apply']):
matfile = subarg(CLFLAGS['apply'])[0]
try:
mat = np.loadtxt(matfile, dtype=np.float64)
except:
print ('Transformation matrix could not be loaded from {}'
.format(matfile))
exit()
if mat.shape != (3,3):
print ('Transformation matrix had invalid shape.')
exit()
# Read initialisation values
initvals = None
if argexist(CLFLAGS['initvals']):
if argexist(CLFLAGS['initvals'], True):
initvals = subarg(CLFLAGS['initvals'])
if len(initvals) != N_AFF_PARAMS:
print ('Invalid affine parameters for initialisation.')
exit()
try:
# Strip brackets
if not str(initvals[0][0]).isdigit():
initvals[0] = initvals[0][1:]
if not str(initvals[-1][-1]).isdigit():
initvals[-1] = initvals[-1][:-1]
initvals = np.array([float(val) for val in initvals])
except:
print ('Invalid affine parameters for initialisation.')
exit()
# The user-provided initial conditions must never be out of the
# deafult bounds.
if not argexist(CLFLAGS['bounds'], True):
bnds = [[-pi, pi], [initvals[1]*0.5, initvals[1]*1.5],
[initvals[2]*0.5, initvals[2]*1.5],
[initvals[3]*0.9, initvals[3]*1.1]]
else:
print ('Initial affine parameters are not specified.')
exit()
# Read bounds
if argexist(CLFLAGS['bounds']):
if argexist(CLFLAGS['bounds'], True):
bnds = subarg(CLFLAGS['bounds'])
if len(bnds) != 2 * N_AFF_PARAMS:
print ('Invalid bounds for initial parameter search.')
exit()
try:
bnds[0] = bnds[0][1:]
bnds[-1] = bnds[-1][:-1]
bnds = [[float(bnds[2*i]), float(bnds[2*i+1])]
for i in range(N_AFF_PARAMS)]
except:
print ('Invalid bounds for initial parameter search.')
exit()
else:
print ('Invalid bounds for initial parameter search.')
exit()
# Don't override the bounds that have been adapted to the provided initvals
elif not argexist(CLFLAGS['initvals'], True):
h, w, ch = extract_shape(img)
bnds = [[-pi, pi], [-w/2, w/2], [-h/2, h/2], [0.8, 1.2]]
else:
pass
# Read grid search step count
steps = 5
if argexist(CLFLAGS['steps']):
if argexist(CLFLAGS['steps'], True):
steps = subarg(CLFLAGS['steps'])[0]
try:
steps = int(steps)
except:
print ('Invalid number of steps for initial grid search.')
exit()
else:
print ('Invalid number of steps for initial grid search.')
exit()
# Read mask value
maskval = None
if argexist(CLFLAGS['maskval']):
if argexist(CLFLAGS['maskval'], True):
maskval = subarg(CLFLAGS['maskval'])
h, w, ch = extract_shape(img)
if len(maskval) == ch:
# Strip brackets
if not str(maskval[0][0]).isdigit():
maskval[0] = maskval[0][1:]
if not str(maskval[-1][-1]).isdigit():
maskval[-1] = maskval[-1][:-1]
try:
maskval = np.array([float(val) for val in maskval])
except:
print ('Invalid mask value.')
exit()
else:
print ('Mask value dimension must match the number of'
' channels.')
exit()
else:
print ('Invalid mask value.')
exit()
# Read output name
outfile = None
if argexist(CLFLAGS['out']):
outfile = subarg(CLFLAGS['out'], os.path.join(fpath, fname[:-4] +
'_aligned' +
fname[-4:]))[0]
omatfile = None
if argexist(CLFLAGS['omat']):
omatfile = subarg(CLFLAGS['omat'], os.path.join(fpath, fname[:-4] +
'_omat.mat'))[0]
# Read verbose switch
if argexist(CLFLAGS['verbose']):
verbose = True
else:
verbose = False
# Do the job
if mat is None:
print ('REGISTRATION MODE active.')
outimg, omat = perform_registration(img, refimg,
affine_param_bounds=bnds,
init_steps=steps, dscale=factor,
initvals=initvals, maskval=maskval,
verbose=verbose)
if argexist(CLFLAGS['show']):
print ('Showing the alignment...')
plt.imshow((outimg.astype(np.float64)-refimg.astype(np.float64)))
plt.show()
else:
print ('TRANSFORMATION MODE active.')
try:
outimg = transform_img(img, mat, refimg)
if argexist(CLFLAGS['show']):
print ('Showing the transformed image...')
plt.imshow(outimg.astype(img.dtype))
plt.show()
except:
print ('ERROR: The transformation was not successful.')
err = err + 1
# Save the output
if outfile is not None:
try:
if argexist(CLFLAGS['tif']):
tiff.imsave(outfile, outimg.astype(img.dtype))
else:
Image.fromarray(outimg).save(outfile)
print ('SAVED: {}'.format(outfile))
except:
print ('ERROR: {} could not be saved.'.format(outfile))
err = err + 1
if omatfile is not None:
try:
np.savetxt(omatfile, omat)
print ('SAVED: {}'.format(omatfile))
except:
print ('ERROR: {} could not be saved.'.format(omatfile))
err = err + 1
# Conclude run
if err > 0:
print ('{} error(s) occured.'.format(err))
else:
print ('All tasks were successfully completed.')
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# Program execution starts here. #
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
if __name__ == "__main__":
if len(sys.argv) > 1:
main()
else:
print (
"""
The register-img.py utility performs rigid-body registration between two 2D
images. Given a prespecified affine matrix, a linear transformation is
carried out on the input image.
When the program is run to register images, a transformed image and/or the
transformation matrix is returned. The size and shape of the output image
are equal to the size and shape of the reference image.
When the program is run to perform a prespecified linear transformation, the
resultant image is returned.
Usage:
./register-img.py <input> --ref <reference> --out [output] --omat
./register-img.py <input> --applyxfm <affine.mat> --out [output]
Options:
--rgb Forces to use maximum 3 channels. (Use for RGBA.)
--ds <factor> Downsample images for better performance. (px/mm)
(Default: off. Recommended downsampling: to 1mm/px)
--initvals Manual initialisation of affine parameters. Use []!
[rz,dx,dy,sxy] (rz: rotation, dx, dy: translation, sxy: scale)
(Default: automatic best-guess initialisation.)
--bounds Lower and upper bounds for the affine parameters.
[l1,u1...l4,u4] (Default: [-pi,pi,-w/2,w/2,-h/2,h/2,0.8,1.2])
--maskval [v1...] Masked input pixels are excluded from the cost.
--steps <n_steps> Number of steps per parameter during the gridsearch.
--tif Forces to use tifffile.py for the input and output.
--show Show the output.
-v Verbose: report the evolution of the cost function.
"""
) | mit |
avistous/QSTK | Bin/converter.py | 2 | 2914 | '''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on Jan 1, 2011
@author:Drew Bratcher
@contact: dbratcher@gatech.edu
@summary: Contains tutorial for backtester and report.
'''
#
# fundsToPNG.py
#
# Short script which produces a graph of funds
# over time from a pickle file.
#
# Drew Bratcher
#
from pylab import *
from qstkutil import DataAccess as da
from qstkutil import tsutil as tsu
from quicksim import quickSim
from copy import deepcopy
import math
from pandas import *
import matplotlib.pyplot as plt
import cPickle
def fundsToPNG(funds,output_file):
plt.clf()
if(type(funds)==type(list())):
for i in range(0,len(funds)):
plt.plot(funds[i].index,funds[i].values)
else:
plt.plot(funds.index,funds.values)
plt.ylabel('Fund Value')
plt.xlabel('Date')
plt.gcf().autofmt_xdate(rotation=45)
plt.draw()
savefig(output_file, format='png')
def fundsAnalysisToPNG(funds,output_file):
plt.clf()
if(type(funds)!=type(list())):
print 'fundsmatrix only contains one timeseries, not able to analyze.'
#convert to daily returns
count=list()
dates=list()
sum=list()
for i in range(0,len(funds)):
ret=tsu.daily(funds[i].values)
for j in range(0, len(ret)):
if (funds[i].index[j] in dates):
sum[dates.index(funds[i].index[j])]+=ret[j]
count[dates.index(funds[i].index[j])]+=1
else:
dates.append(funds[i].index[j])
count.append(1)
sum.append(ret[j])
#compute average
tot_ret=deepcopy(sum)
for i in range(0,len(sum)):
tot_ret[i]=sum[i]/count[i]
#compute std
std=zeros(len(sum))
for i in range(0,len(funds)):
temp=tsu.daily(funds[i].values)
for j in range(0,len(temp)):
std[dates.index(funds[i].index[j])]=0
std[dates.index(funds[i].index[j])]+=math.pow(temp[j]-tot_ret[dates.index(funds[i].index[j])],2)
for i in range(1, len(std)):
# std[i]=math.sqrt(std[i]/count[i])+std[i-1]
std[i]=math.sqrt(std[i]/count[i])
#compute total returns
lower=deepcopy(tot_ret)
upper=deepcopy(tot_ret)
tot_ret[0]=funds[0].values[0]
lower[0]=funds[0].values[0]
upper[0]=lower[0]
# for i in range(1,len(tot_ret)):
# tot_ret[i]=tot_ret[i-1]+(tot_ret[i])*tot_ret[i-1]
# lower[i]=tot_ret[i-1]-(std[i])*tot_ret[i-1]
# upper[i]=tot_ret[i-1]+(std[i])*tot_ret[i-1]
for i in range(1,len(tot_ret)):
lower[i]=(tot_ret[i]-std[i]+1)*lower[i-1]
upper[i]=(tot_ret[i]+std[i]+1)*upper[i-1]
tot_ret[i]=(tot_ret[i]+1)*tot_ret[i-1]
plt.clf()
plt.plot(dates,tot_ret)
plt.plot(dates,lower)
plt.plot(dates,upper)
plt.legend(('Tot_Ret','Lower','Upper'),loc='upper left')
plt.ylabel('Fund Total Return')
plt.ylim(ymin=0,ymax=2*tot_ret[0])
plt.draw()
savefig(output_file, format='png')
| bsd-3-clause |
aminoj/Interactive-Orbitals-Simulation | export 3d pic /3p.py | 1 | 8570 | from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
rstride = 15
cstride = 15
MaxBound = 3
MinBound = -3
u = np.linspace(0, 2*np.pi, 100)
v = np.linspace(0, np.pi, 100)
#OUTSIDE -ALL DIRECTIONS
#-------------------------------------------------------------------------------
#Left-Outer Outer
x2 = np.outer(np.cos(u), np.sin(v))
z2 = np.outer(np.sin(u), np.sin(v))
y2 = -0.4*np.cos(u)-2.05
ax.plot_surface(x2, y2, z2, rstride = rstride, cstride = cstride, color=(0,1,0.44), linewidth=0)
#-------------------------------------------------------------------------------
#Left-Outer Inner
x3 = np.outer(np.cos(u), np.sin(v))
z3 = np.outer(np.sin(u), np.sin(v))
y3 = abs((x3)**2 + (z3)**2 + 5)**(0.5)-4.1
ax.plot_surface(x3, y3, z3, rstride = rstride, cstride = cstride, color=(0,1,0.44), linewidth=0)
#-------------------------------------------------------------------------------
#Right-Outer Outer
x4 = np.outer(np.cos(u), np.sin(v))
z4 = np.outer(np.sin(u), np.sin(v))
y4 = 0.4*np.cos(u)+2.05
ax.plot_surface(x4, y4, z4, rstride = rstride, cstride = cstride, color=(0,1,0.44), linewidth=0)
#-------------------------------------------------------------------------------
#Right-Outer Inner
x5 = np.outer(np.cos(u), np.sin(v))
z5 = np.outer(np.sin(u), np.sin(v))
y5 = abs((x5)**2 + (z5)**2 - 5)**(0.5)-0.35
ax.plot_surface(x5, y5, z5, rstride = rstride, cstride = cstride, color=(0,1,0.44), linewidth=0)
#-------------------------------------------------------------------------------
#DIFFERENT DIRECTION
#-------------------------------------------------------------------------------
#Left-Outer Outer
y2 = np.outer(np.cos(u), np.sin(v))
z2 = np.outer(np.sin(u), np.sin(v))
x2 = -0.4*np.cos(u)-2.07
ax.plot_surface(x2, y2, z2, rstride = rstride, cstride = cstride, color=(0,1,0.44), linewidth=0)
#-------------------------------------------------------------------------------
#Left-Outer Inner
y3 = np.outer(np.cos(u), np.sin(v))
z3 = np.outer(np.sin(u), np.sin(v))
x3 = abs((y3)**2 + (z3)**2 + 5)**(0.5)-4.1
ax.plot_surface(x3, y3, z3, rstride = rstride, cstride = cstride, color=(0,1,0.44), linewidth=0)
#-------------------------------------------------------------------------------
#Right-Outer Outer
y4 = np.outer(np.cos(u), np.sin(v))
z4 = np.outer(np.sin(u), np.sin(v))
x4 = 0.4*np.cos(u)+2.07
ax.plot_surface(x4, y4, z4, rstride = rstride, cstride = cstride, color=(0,1,0.44), linewidth=0)
#-------------------------------------------------------------------------------
#Right-Outer Inner
y5 = np.outer(np.cos(u), np.sin(v))
z5 = np.outer(np.sin(u), np.sin(v))
x5 = abs((y5)**2 + (z5)**2 - 5)**(0.5)-0.35
ax.plot_surface(x5, y5, z5, rstride = rstride, cstride = cstride, color=(0,1,0.44), linewidth=0)
#-------------------------------------------------------------------------------
#DIFFERENT DIRECTION
#-------------------------------------------------------------------------------
#Left-Outer Outer
y2 = np.outer(np.cos(u), np.sin(v))
x2 = np.outer(np.sin(u), np.sin(v))
z2 = -0.4*np.cos(u)-2.07
ax.plot_surface(x2, y2, z2, rstride = rstride, cstride = cstride, color=(0,1,0.44), linewidth=0)
#-------------------------------------------------------------------------------
#Left-Outer Inner
y3 = np.outer(np.cos(u), np.sin(v))
x3 = np.outer(np.sin(u), np.sin(v))
z3 = abs((x3)**2 + (y3)**2 + 5)**(0.5)-4.1
ax.plot_surface(x3, y3, z3, rstride = rstride, cstride = cstride, color=(0,1,0.44), linewidth=0)
#-------------------------------------------------------------------------------
#Right-Outer Outer
x4 = np.outer(np.cos(u), np.sin(v))
y4 = np.outer(np.sin(u), np.sin(v))
z4 = 0.4*np.cos(u)+2.07
ax.plot_surface(x4, y4, z4, rstride = rstride, cstride = cstride, color=(0,1,0.44), linewidth=0)
#-------------------------------------------------------------------------------
#Right-Outer Inner
x5 = np.outer(np.cos(u), np.sin(v))
y5 = np.outer(np.sin(u), np.sin(v))
z5 = abs((x5)**2 + (y5)**2 - 5)**(0.5)-0.35
ax.plot_surface(x5, y5, z5, rstride = rstride, cstride = cstride, color=(0,1,0.44), linewidth=0)
#-------------------------------------------------------------------------------
#INSIDE - ALL DIRECTIONS
#-------------------------------------------------------------------------------
#Right-Inner Outer
x6 = 0.5*np.outer(np.cos(u), np.sin(v))
z6 = 0.5*np.outer(np.sin(u), np.sin(v))
y6 = 0.2*np.cos(u) +1
ax.plot_surface(x6, y6, z6, rstride = rstride, cstride = cstride, color=(0,1,0.44), linewidth=0)
#-------------------------------------------------------------------------------
#Left-Inner Outer
x7 = 0.5*np.outer(np.cos(u), np.sin(v))
z7 = 0.5*np.outer(np.sin(u), np.sin(v))
y7 = 0.2*np.cos(u) +1
ax.plot_surface(x7, -y7, z7, rstride = rstride, cstride = cstride, color=(0,1,0.44), linewidth=0)
#-------------------------------------------------------------------------------
#Right-Inner Inner
x8 = 0.5*np.outer(np.cos(u), np.sin(v))
z8 = 0.5*np.outer(np.sin(u), np.sin(v))
y8 = abs((x8)**2 + (z8)**2 - 5)**(0.5)-1.4
ax.plot_surface(x8, y8, z8, rstride = rstride, cstride = cstride, color=(0,1,0.44), linewidth=0)
#-------------------------------------------------------------------------------
#Left-Inner Inner
x8 = 0.5*np.outer(np.cos(u), np.sin(v))
z8 = 0.5*np.outer(np.sin(u), np.sin(v))
y8 = abs((x8)**2 + (z8)**2 - 5)**(0.5)-1.4
ax.plot_surface(x8, -y8, z8, rstride = rstride, cstride = cstride, color=(0,1,0.44), linewidth=0)
#-------------------------------------------------------------------------------
#Right-Inner Outer
y6 = 0.5*np.outer(np.cos(u), np.sin(v))
z6 = 0.5*np.outer(np.sin(u), np.sin(v))
x6 = 0.2*np.cos(u) +1
ax.plot_surface(x6, y6, z6, rstride = rstride, cstride = cstride, color=(0,1,0.44), linewidth=0)
#-------------------------------------------------------------------------------
#Left-Inner Outer
y7 = 0.5*np.outer(np.cos(u), np.sin(v))
z7 = 0.5*np.outer(np.sin(u), np.sin(v))
x7 = 0.2*np.cos(u) +1
ax.plot_surface(-x7, y7, z7, rstride = rstride, cstride = cstride, color=(0,1,0.44), linewidth=0)
#-------------------------------------------------------------------------------
#Right-Inner Inner
y8 = 0.5*np.outer(np.cos(u), np.sin(v))
z8 = 0.5*np.outer(np.sin(u), np.sin(v))
x8 = abs((y8)**2 + (z8)**2 - 5)**(0.5)-1.4
ax.plot_surface(x8, y8, z8, rstride = rstride, cstride = cstride, color=(0,1,0.44), linewidth=0)
#-------------------------------------------------------------------------------
#Left-Inner Inner
y9 = 0.5*np.outer(np.cos(u), np.sin(v))
z9 = 0.5*np.outer(np.sin(u), np.sin(v))
x9 = abs((y9)**2 + (z9)**2 - 5)**(0.5)-1.4
ax.plot_surface(-x9, y9, z9, rstride = rstride, cstride = cstride, color=(0,1,0.44), linewidth=0)
#-------------------------------------------------------------------------------
#Right-Inner Outer
x6 = 0.5*np.outer(np.cos(u), np.sin(v))
y6 = 0.5*np.outer(np.sin(u), np.sin(v))
z6 = 0.2*np.cos(u) +1
ax.plot_surface(x6, y6, z6, rstride = rstride, cstride = cstride, color=(0,1,0.44), linewidth=0)
#-------------------------------------------------------------------------------
#Left-Inner Outer
x7 = 0.5*np.outer(np.cos(u), np.sin(v))
y7 = 0.5*np.outer(np.sin(u), np.sin(v))
z7 = 0.2*np.cos(u) +1
ax.plot_surface(x7, y7, -z7, rstride = rstride, cstride = cstride, color=(0,1,0.44), linewidth=0)
#-------------------------------------------------------------------------------
#Right-Inner Inner
x8 = 0.5*np.outer(np.cos(u), np.sin(v))
y8 = 0.5*np.outer(np.sin(u), np.sin(v))
z8 = abs((x8)**2 + (y8)**2 - 5)**(0.5)-1.4
ax.plot_surface(x8, y8, z8, rstride = rstride, cstride = cstride, color=(0,1,0.44), linewidth=0)
#-------------------------------------------------------------------------------
#Left-Inner Inner
x8 = 0.5*np.outer(np.cos(u), np.sin(v))
y8 = 0.5*np.outer(np.sin(u), np.sin(v))
z8 = abs((x8)**2 + (y8)**2 - 5)**(0.5)-1.4
ax.plot_surface(x8, y8, -z8, rstride = rstride, cstride = cstride, color=(0,1,0.44), linewidth=0)
plt.show()
ax.set_xlim3d(MaxBound, MinBound)
ax.set_ylim3d(MaxBound, MinBound)
ax.set_zlim3d(MaxBound, MinBound)
e = 0
b = 0
for ii in xrange(0,120,1):
ax.view_init(elev=e, azim=b*4)
if(ii < 10) :
plt.savefig("3p/movie00%s.png" %ii)
elif(ii < 100):
plt.savefig("3p/movie0%s.png" %ii)
else :
plt.savefig("3p/movie%s.png" %ii)
if(ii == 20 or ii == 40 or ii == 60 or ii == 80 or ii == 100):
e = e + 12
b = 0
b = b + 1 | apache-2.0 |
acapet/GHER-POSTPROC | Examples/EasyExample.py | 1 | 2826 | # We only import librairies needed for plotting
# Other librairies are imported in the class definition file, G3D_class.py,
# which contains all process and variables function definition.
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime as dt
import G3D_class
# We instantiate an object of the class G3D, just by giving the path to the netcdf file to work with
# Up to now I'm working with 4D netcdf files containing several variables.
# Outputs from different files can be merged easily, as can be seen in other examples
G = G3D_class.G3D('../data/CART1CLIP/1980.nc')
# All loaded variables are attributes of the G3D instance.
# For instance the variable "bat" is defined directly when the object is instantiated.
# Other are loaded only when needed.
# Variables are python Masked_array, so they have an attribute mask which is an arreay of booleans
# Here we want to define a mask based on bathymetry
maskDS= (G.bat<50 ) & ~(G.bat.mask) # Mask should be True where masked
maskSH= (G.bat>=50) & ~(G.bat.mask) # Mask should be True where masked
# All processing functions are called as function of the G3D instance.
# Variable name is given as an argument. Some functions allows more argument.
# This would give the basin averaged time series of salinity
T1 = G.avgspatial('SAL')
# The avgspatial function enables an optional mask argument
# Note also , that we can use a variable name that is not defined in the netcdf file.
# In this case the toolbox will automatically look for the function "instance_SSS"
sssDS=G.avgspatial('SSS',maskDS)
sssSH=G.avgspatial('SSS',maskSH)
# The following is general python plotting ..
# the "dates" attributes is also loaded automatically
####################
# 1st figure :
####################
locator = mdates.AutoDateLocator()
formator = mdates.AutoDateFormatter(locator)
fig=plt.figure(figsize=(15, 8))
ax=plt.subplot(1, 1, 1)
ax.xaxis_date()
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formator)
plt.plot(G.dates,sssSH, label = 'Average surface salinity on the shelf')
plt.plot(G.dates,sssDS, label = 'Average surface salinity in the open sea')
plt.title('Salinity')
plt.ylabel('Salinity - [p.s.u.]')
fig.savefig(G.figoutputdir+'Simple.png')
| gpl-3.0 |
hrjn/scikit-learn | examples/svm/plot_svm_kernels.py | 96 | 2019 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10, edgecolors='k')
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired,
edgecolors='k')
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
davidsandberg/facenet | contributed/batch_represent.py | 1 | 5519 | #!/usr/bin/env python
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""
Allows you to generate embeddings from a directory of images in the format:
Instructions:
Image data directory should look like the following figure:
person-1
├── image-1.jpg
├── image-2.png
...
└── image-p.png
...
person-m
├── image-1.png
├── image-2.jpg
...
└── image-q.png
Trained Model:
- Both the trained model metagraph and the model parameters need to exist
in the same directory, and the metagraph should have the extension '.meta'.
####
USAGE:
$ python batch_represent.py -d <YOUR IMAGE DATA DIRECTORY> -o <DIRECTORY TO STORE OUTPUT ARRAYS> --trained_model_dir <DIRECTORY CONTAINING PRETRAINED MODEL>
###
"""
"""
Attributions:
The code is heavily inspired by the code from by David Sandberg's ../src/validate_on_lfw.py
The concept is inspired by Brandon Amos' github.com/cmusatyalab/openface/blob/master/batch-represent/batch-represent.lua
"""
#----------------------------------------------------
# MIT License
#
# Copyright (c) 2017 Rakshak Talwar
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#----------------------------------------------------
import os
import sys
import argparse
import importlib
import time
sys.path.insert(1, "../src")
import facenet
import numpy as np
from sklearn.datasets import load_files
import tensorflow as tf
from six.moves import xrange
def main(args):
with tf.Graph().as_default():
with tf.Session() as sess:
# create output directory if it doesn't exist
output_dir = os.path.expanduser(args.output_dir)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
# load the model
print("Loading trained model...\n")
meta_file, ckpt_file = facenet.get_model_filenames(os.path.expanduser(args.trained_model_dir))
facenet.load_model(args.trained_model_dir, meta_file, ckpt_file)
# grab all image paths and labels
print("Finding image paths and targets...\n")
data = load_files(args.data_dir, load_content=False, shuffle=False)
labels_array = data['target']
paths = data['filenames']
# Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
image_size = images_placeholder.get_shape()[1]
embedding_size = embeddings.get_shape()[1]
# Run forward pass to calculate embeddings
print('Generating embeddings from images...\n')
start_time = time.time()
batch_size = args.batch_size
nrof_images = len(paths)
nrof_batches = int(np.ceil(1.0*nrof_images / batch_size))
emb_array = np.zeros((nrof_images, embedding_size))
for i in xrange(nrof_batches):
start_index = i*batch_size
end_index = min((i+1)*batch_size, nrof_images)
paths_batch = paths[start_index:end_index]
images = facenet.load_data(paths_batch, do_random_crop=False, do_random_flip=False, image_size=image_size, do_prewhiten=True)
feed_dict = { images_placeholder:images, phase_train_placeholder:False}
emb_array[start_index:end_index,:] = sess.run(embeddings, feed_dict=feed_dict)
time_avg_forward_pass = (time.time() - start_time) / float(nrof_images)
print("Forward pass took avg of %.3f[seconds/image] for %d images\n" % (time_avg_forward_pass, nrof_images))
print("Finally saving embeddings and gallery to: %s" % (output_dir))
# save the gallery and embeddings (signatures) as numpy arrays to disk
np.save(os.path.join(output_dir, "gallery.npy"), labels_array)
np.save(os.path.join(output_dir, "signatures.npy"), emb_array)
def parse_arguments(argv):
parser = argparse.ArgumentParser(description="Batch-represent face embeddings from a given data directory")
parser.add_argument('-d', '--data_dir', type=str,
help='directory of images with structure as seen at the top of this file.')
parser.add_argument('-o', '--output_dir', type=str,
help='directory containing aligned face patches with file structure as seen at the top of this file.')
parser.add_argument('--trained_model_dir', type=str,
help='Load a trained model before training starts.')
parser.add_argument('--batch_size', type=int, help='Number of images to process in a batch.', default=50)
return parser.parse_args(argv)
if __name__ == "__main__":
main(parse_arguments(sys.argv[1:]))
| mit |
mne-tools/mne-tools.github.io | stable/_downloads/ca415e118a91c7bc4499db9f40b0a222/topo_customized.py | 15 | 1940 | """
========================================
Plot custom topographies for MEG sensors
========================================
This example exposes the :func:`~mne.viz.iter_topography` function that makes
it very easy to generate custom sensor topography plots.
Here we will plot the power spectrum of each channel on a topographic
layout.
"""
# Author: Denis A. Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.viz import iter_topography
from mne import io
from mne.time_frequency import psd_welch
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 20, fir_design='firwin')
picks = mne.pick_types(raw.info, meg=True, exclude=[])
tmin, tmax = 0, 120 # use the first 120s of data
fmin, fmax = 2, 20 # look at frequencies between 2 and 20Hz
n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
psds, freqs = psd_welch(raw, picks=picks, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax)
psds = 20 * np.log10(psds) # scale to dB
def my_callback(ax, ch_idx):
"""
This block of code is executed once you click on one of the channel axes
in the plot. To work with the viz internals, this function should only take
two parameters, the axis and the channel or data index.
"""
ax.plot(freqs, psds[ch_idx], color='red')
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Power (dB)')
for ax, idx in iter_topography(raw.info,
fig_facecolor='white',
axis_facecolor='white',
axis_spinecolor='white',
on_pick=my_callback):
ax.plot(psds[idx], color='red')
plt.gcf().suptitle('Power spectral densities')
plt.show()
| bsd-3-clause |
zoyahav/incubator-airflow | airflow/hooks/hive_hooks.py | 22 | 27917 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from builtins import zip
from past.builtins import basestring
import collections
import unicodecsv as csv
import itertools
import logging
import re
import subprocess
import time
from tempfile import NamedTemporaryFile
import hive_metastore
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.utils.helpers import as_flattened_list
from airflow.utils.file import TemporaryDirectory
from airflow import configuration
import airflow.security.utils as utils
HIVE_QUEUE_PRIORITIES = ['VERY_HIGH', 'HIGH', 'NORMAL', 'LOW', 'VERY_LOW']
class HiveCliHook(BaseHook):
"""Simple wrapper around the hive CLI.
It also supports the ``beeline``
a lighter CLI that runs JDBC and is replacing the heavier
traditional CLI. To enable ``beeline``, set the use_beeline param in the
extra field of your connection as in ``{ "use_beeline": true }``
Note that you can also set default hive CLI parameters using the
``hive_cli_params`` to be used in your connection as in
``{"hive_cli_params": "-hiveconf mapred.job.tracker=some.jobtracker:444"}``
Parameters passed here can be overridden by run_cli's hive_conf param
The extra connection parameter ``auth`` gets passed as in the ``jdbc``
connection string as is.
:param mapred_queue: queue used by the Hadoop Scheduler (Capacity or Fair)
:type mapred_queue: string
:param mapred_queue_priority: priority within the job queue.
Possible settings include: VERY_HIGH, HIGH, NORMAL, LOW, VERY_LOW
:type mapred_queue_priority: string
:param mapred_job_name: This name will appear in the jobtracker.
This can make monitoring easier.
:type mapred_job_name: string
"""
def __init__(
self,
hive_cli_conn_id="hive_cli_default",
run_as=None,
mapred_queue=None,
mapred_queue_priority=None,
mapred_job_name=None):
conn = self.get_connection(hive_cli_conn_id)
self.hive_cli_params = conn.extra_dejson.get('hive_cli_params', '')
self.use_beeline = conn.extra_dejson.get('use_beeline', False)
self.auth = conn.extra_dejson.get('auth', 'noSasl')
self.conn = conn
self.run_as = run_as
if mapred_queue_priority:
mapred_queue_priority = mapred_queue_priority.upper()
if mapred_queue_priority not in HIVE_QUEUE_PRIORITIES:
raise AirflowException(
"Invalid Mapred Queue Priority. Valid values are: "
"{}".format(', '.join(HIVE_QUEUE_PRIORITIES)))
self.mapred_queue = mapred_queue
self.mapred_queue_priority = mapred_queue_priority
self.mapred_job_name = mapred_job_name
def _prepare_cli_cmd(self):
"""
This function creates the command list from available information
"""
conn = self.conn
hive_bin = 'hive'
cmd_extra = []
if self.use_beeline:
hive_bin = 'beeline'
jdbc_url = "jdbc:hive2://{conn.host}:{conn.port}/{conn.schema}"
if configuration.get('core', 'security') == 'kerberos':
template = conn.extra_dejson.get(
'principal', "hive/_HOST@EXAMPLE.COM")
if "_HOST" in template:
template = utils.replace_hostname_pattern(
utils.get_components(template))
proxy_user = "" # noqa
if conn.extra_dejson.get('proxy_user') == "login" and conn.login:
proxy_user = "hive.server2.proxy.user={0}".format(conn.login)
elif conn.extra_dejson.get('proxy_user') == "owner" and self.run_as:
proxy_user = "hive.server2.proxy.user={0}".format(self.run_as)
jdbc_url += ";principal={template};{proxy_user}"
elif self.auth:
jdbc_url += ";auth=" + self.auth
jdbc_url = jdbc_url.format(**locals())
cmd_extra += ['-u', jdbc_url]
if conn.login:
cmd_extra += ['-n', conn.login]
if conn.password:
cmd_extra += ['-p', conn.password]
hive_params_list = self.hive_cli_params.split()
return [hive_bin] + cmd_extra + hive_params_list
def _prepare_hiveconf(self, d):
"""
This function prepares a list of hiveconf params
from a dictionary of key value pairs.
:param d:
:type d: dict
>>> hh = HiveCliHook()
>>> hive_conf = {"hive.exec.dynamic.partition": "true",
... "hive.exec.dynamic.partition.mode": "nonstrict"}
>>> hh._prepare_hiveconf(hive_conf)
["-hiveconf", "hive.exec.dynamic.partition=true",\
"-hiveconf", "hive.exec.dynamic.partition.mode=nonstrict"]
"""
if not d:
return []
return as_flattened_list(
itertools.izip(
["-hiveconf"] * len(d),
["{}={}".format(k, v) for k, v in d.items()]
)
)
def run_cli(self, hql, schema=None, verbose=True, hive_conf=None):
"""
Run an hql statement using the hive cli. If hive_conf is specified
it should be a dict and the entries will be set as key/value pairs
in HiveConf
:param hive_conf: if specified these key value pairs will be passed
to hive as ``-hiveconf "key"="value"``. Note that they will be
passed after the ``hive_cli_params`` and thus will override
whatever values are specified in the database.
:type hive_conf: dict
>>> hh = HiveCliHook()
>>> result = hh.run_cli("USE airflow;")
>>> ("OK" in result)
True
"""
conn = self.conn
schema = schema or conn.schema
if schema:
hql = "USE {schema};\n{hql}".format(**locals())
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
f.write(hql.encode('UTF-8'))
f.flush()
hive_cmd = self._prepare_cli_cmd()
hive_conf_params = self._prepare_hiveconf(hive_conf)
if self.mapred_queue:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.queuename={}'
.format(self.mapred_queue)])
if self.mapred_queue_priority:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.priority={}'
.format(self.mapred_queue_priority)])
if self.mapred_job_name:
hive_conf_params.extend(
['-hiveconf',
'mapred.job.name={}'
.format(self.mapred_job_name)])
hive_cmd.extend(hive_conf_params)
hive_cmd.extend(['-f', f.name])
if verbose:
logging.info(" ".join(hive_cmd))
sp = subprocess.Popen(
hive_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmp_dir)
self.sp = sp
stdout = ''
while True:
line = sp.stdout.readline()
if not line:
break
stdout += line.decode('UTF-8')
if verbose:
logging.info(line.decode('UTF-8').strip())
sp.wait()
if sp.returncode:
raise AirflowException(stdout)
return stdout
def test_hql(self, hql):
"""
Test an hql statement using the hive cli and EXPLAIN
"""
create, insert, other = [], [], []
for query in hql.split(';'): # naive
query_original = query
query = query.lower().strip()
if query.startswith('create table'):
create.append(query_original)
elif query.startswith(('set ',
'add jar ',
'create temporary function')):
other.append(query_original)
elif query.startswith('insert'):
insert.append(query_original)
other = ';'.join(other)
for query_set in [create, insert]:
for query in query_set:
query_preview = ' '.join(query.split())[:50]
logging.info("Testing HQL [{0} (...)]".format(query_preview))
if query_set == insert:
query = other + '; explain ' + query
else:
query = 'explain ' + query
try:
self.run_cli(query, verbose=False)
except AirflowException as e:
message = e.args[0].split('\n')[-2]
logging.info(message)
error_loc = re.search('(\d+):(\d+)', message)
if error_loc and error_loc.group(1).isdigit():
l = int(error_loc.group(1))
begin = max(l-2, 0)
end = min(l+3, len(query.split('\n')))
context = '\n'.join(query.split('\n')[begin:end])
logging.info("Context :\n {0}".format(context))
else:
logging.info("SUCCESS")
def load_df(
self,
df,
table,
create=True,
recreate=False,
field_dict=None,
delimiter=',',
encoding='utf8',
pandas_kwargs=None, **kwargs):
"""
Loads a pandas DataFrame into hive.
Hive data types will be inferred if not passed but column names will
not be sanitized.
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param create: whether to create the table if it doesn't exist
:type create: bool
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param field_dict: mapping from column name to hive data type
:type field_dict: dict
:param encoding: string encoding to use when writing DataFrame to file
:type encoding: str
:param pandas_kwargs: passed to DataFrame.to_csv
:type pandas_kwargs: dict
:param kwargs: passed to self.load_file
"""
def _infer_field_types_from_df(df):
DTYPE_KIND_HIVE_TYPE = {
'b': 'BOOLEAN', # boolean
'i': 'BIGINT', # signed integer
'u': 'BIGINT', # unsigned integer
'f': 'DOUBLE', # floating-point
'c': 'STRING', # complex floating-point
'O': 'STRING', # object
'S': 'STRING', # (byte-)string
'U': 'STRING', # Unicode
'V': 'STRING' # void
}
return dict((col, DTYPE_KIND_HIVE_TYPE[dtype.kind]) for col, dtype in df.dtypes.iteritems())
if pandas_kwargs is None:
pandas_kwargs = {}
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
if field_dict is None and (create or recreate):
field_dict = _infer_field_types_from_df(df)
df.to_csv(f, sep=delimiter, **pandas_kwargs)
return self.load_file(filepath=f.name,
table=table,
delimiter=delimiter,
field_dict=field_dict,
**kwargs)
def load_file(
self,
filepath,
table,
delimiter=",",
field_dict=None,
create=True,
overwrite=True,
partition=None,
recreate=False):
"""
Loads a local file into Hive
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param create: whether to create the table if it doesn't exist
:type create: bool
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param partition: target partition as a dict of partition columns
and values
:type partition: dict
:param delimiter: field delimiter in the file
:type delimiter: str
"""
hql = ''
if recreate:
hql += "DROP TABLE IF EXISTS {table};\n"
if create or recreate:
if field_dict is None:
raise ValueError("Must provide a field dict when creating a table")
fields = ",\n ".join(
[k + ' ' + v for k, v in field_dict.items()])
hql += "CREATE TABLE IF NOT EXISTS {table} (\n{fields})\n"
if partition:
pfields = ",\n ".join(
[p + " STRING" for p in partition])
hql += "PARTITIONED BY ({pfields})\n"
hql += "ROW FORMAT DELIMITED\n"
hql += "FIELDS TERMINATED BY '{delimiter}'\n"
hql += "STORED AS textfile;"
hql = hql.format(**locals())
logging.info(hql)
self.run_cli(hql)
hql = "LOAD DATA LOCAL INPATH '{filepath}' "
if overwrite:
hql += "OVERWRITE "
hql += "INTO TABLE {table} "
if partition:
pvals = ", ".join(
["{0}='{1}'".format(k, v) for k, v in partition.items()])
hql += "PARTITION ({pvals});"
hql = hql.format(**locals())
logging.info(hql)
self.run_cli(hql)
def kill(self):
if hasattr(self, 'sp'):
if self.sp.poll() is None:
print("Killing the Hive job")
self.sp.terminate()
time.sleep(60)
self.sp.kill()
class HiveMetastoreHook(BaseHook):
""" Wrapper to interact with the Hive Metastore"""
def __init__(self, metastore_conn_id='metastore_default'):
self.metastore_conn = self.get_connection(metastore_conn_id)
self.metastore = self.get_metastore_client()
def __getstate__(self):
# This is for pickling to work despite the thirft hive client not
# being pickable
d = dict(self.__dict__)
del d['metastore']
return d
def __setstate__(self, d):
self.__dict__.update(d)
self.__dict__['metastore'] = self.get_metastore_client()
def get_metastore_client(self):
"""
Returns a Hive thrift client.
"""
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
from hive_service import ThriftHive
ms = self.metastore_conn
auth_mechanism = ms.extra_dejson.get('authMechanism', 'NOSASL')
if configuration.get('core', 'security') == 'kerberos':
auth_mechanism = ms.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = ms.extra_dejson.get('kerberos_service_name', 'hive')
socket = TSocket.TSocket(ms.host, ms.port)
if configuration.get('core', 'security') == 'kerberos' and auth_mechanism == 'GSSAPI':
try:
import saslwrapper as sasl
except ImportError:
import sasl
def sasl_factory():
sasl_client = sasl.Client()
sasl_client.setAttr("host", ms.host)
sasl_client.setAttr("service", kerberos_service_name)
sasl_client.init()
return sasl_client
from thrift_sasl import TSaslClientTransport
transport = TSaslClientTransport(sasl_factory, "GSSAPI", socket)
else:
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
return ThriftHive.Client(protocol)
def get_conn(self):
return self.metastore
def check_for_partition(self, schema, table, partition):
"""
Checks whether a partition exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: string
:param table: Name of hive table @partition belongs to
:type schema: string
:partition: Expression that matches the partitions to check for
(eg `a = 'b' AND c = 'd'`)
:type schema: string
:rtype: boolean
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_partition('airflow', t, "ds='2015-01-01'")
True
"""
self.metastore._oprot.trans.open()
partitions = self.metastore.get_partitions_by_filter(
schema, table, partition, 1)
self.metastore._oprot.trans.close()
if partitions:
return True
else:
return False
def check_for_named_partition(self, schema, table, partition_name):
"""
Checks whether a partition with a given name exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: string
:param table: Name of hive table @partition belongs to
:type schema: string
:partition: Name of the partitions to check for (eg `a=b/c=d`)
:type schema: string
:rtype: boolean
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_named_partition('airflow', t, "ds=2015-01-01")
True
>>> hh.check_for_named_partition('airflow', t, "ds=xxx")
False
"""
self.metastore._oprot.trans.open()
try:
self.metastore.get_partition_by_name(
schema, table, partition_name)
return True
except hive_metastore.ttypes.NoSuchObjectException:
return False
finally:
self.metastore._oprot.trans.close()
def get_table(self, table_name, db='default'):
"""Get a metastore table object
>>> hh = HiveMetastoreHook()
>>> t = hh.get_table(db='airflow', table_name='static_babynames')
>>> t.tableName
'static_babynames'
>>> [col.name for col in t.sd.cols]
['state', 'year', 'name', 'gender', 'num']
"""
self.metastore._oprot.trans.open()
if db == 'default' and '.' in table_name:
db, table_name = table_name.split('.')[:2]
table = self.metastore.get_table(dbname=db, tbl_name=table_name)
self.metastore._oprot.trans.close()
return table
def get_tables(self, db, pattern='*'):
"""
Get a metastore table object
"""
self.metastore._oprot.trans.open()
tables = self.metastore.get_tables(db_name=db, pattern=pattern)
objs = self.metastore.get_table_objects_by_name(db, tables)
self.metastore._oprot.trans.close()
return objs
def get_databases(self, pattern='*'):
"""
Get a metastore table object
"""
self.metastore._oprot.trans.open()
dbs = self.metastore.get_databases(pattern)
self.metastore._oprot.trans.close()
return dbs
def get_partitions(
self, schema, table_name, filter=None):
"""
Returns a list of all partitions in a table. Works only
for tables with less than 32767 (java short max val).
For subpartitioned table, the number might easily exceed this.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> parts = hh.get_partitions(schema='airflow', table_name=t)
>>> len(parts)
1
>>> parts
[{'ds': '2015-01-01'}]
"""
self.metastore._oprot.trans.open()
table = self.metastore.get_table(dbname=schema, tbl_name=table_name)
if len(table.partitionKeys) == 0:
raise AirflowException("The table isn't partitioned")
else:
if filter:
parts = self.metastore.get_partitions_by_filter(
db_name=schema, tbl_name=table_name,
filter=filter, max_parts=32767)
else:
parts = self.metastore.get_partitions(
db_name=schema, tbl_name=table_name, max_parts=32767)
self.metastore._oprot.trans.close()
pnames = [p.name for p in table.partitionKeys]
return [dict(zip(pnames, p.values)) for p in parts]
def max_partition(self, schema, table_name, field=None, filter=None):
"""
Returns the maximum value for all partitions in a table. Works only
for tables that have a single partition key. For subpartitioned
table, we recommend using signal tables.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.max_partition(schema='airflow', table_name=t)
'2015-01-01'
"""
parts = self.get_partitions(schema, table_name, filter)
if not parts:
return None
elif len(parts[0]) == 1:
field = list(parts[0].keys())[0]
elif not field:
raise AirflowException(
"Please specify the field you want the max "
"value for")
return max([p[field] for p in parts])
def table_exists(self, table_name, db='default'):
"""
Check if table exists
>>> hh = HiveMetastoreHook()
>>> hh.table_exists(db='airflow', table_name='static_babynames')
True
>>> hh.table_exists(db='airflow', table_name='does_not_exist')
False
"""
try:
t = self.get_table(table_name, db)
return True
except Exception as e:
return False
class HiveServer2Hook(BaseHook):
"""
Wrapper around the impyla library
Note that the default authMechanism is PLAIN, to override it you
can specify it in the ``extra`` of your connection in the UI as in
"""
def __init__(self, hiveserver2_conn_id='hiveserver2_default'):
self.hiveserver2_conn_id = hiveserver2_conn_id
def get_conn(self, schema=None):
db = self.get_connection(self.hiveserver2_conn_id)
auth_mechanism = db.extra_dejson.get('authMechanism', 'PLAIN')
kerberos_service_name = None
if configuration.get('core', 'security') == 'kerberos':
auth_mechanism = db.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = db.extra_dejson.get('kerberos_service_name', 'hive')
# impyla uses GSSAPI instead of KERBEROS as a auth_mechanism identifier
if auth_mechanism == 'KERBEROS':
logging.warning("Detected deprecated 'KERBEROS' for authMechanism for %s. Please use 'GSSAPI' instead",
self.hiveserver2_conn_id)
auth_mechanism = 'GSSAPI'
from impala.dbapi import connect
return connect(
host=db.host,
port=db.port,
auth_mechanism=auth_mechanism,
kerberos_service_name=kerberos_service_name,
user=db.login,
database=schema or db.schema or 'default')
def get_results(self, hql, schema='default', arraysize=1000):
from impala.error import ProgrammingError
with self.get_conn(schema) as conn:
if isinstance(hql, basestring):
hql = [hql]
results = {
'data': [],
'header': [],
}
cur = conn.cursor()
for statement in hql:
cur.execute(statement)
records = []
try:
# impala Lib raises when no results are returned
# we're silencing here as some statements in the list
# may be `SET` or DDL
records = cur.fetchall()
except ProgrammingError:
logging.debug("get_results returned no records")
if records:
results = {
'data': records,
'header': cur.description,
}
return results
def to_csv(
self,
hql,
csv_filepath,
schema='default',
delimiter=',',
lineterminator='\r\n',
output_header=True,
fetch_size=1000):
schema = schema or 'default'
with self.get_conn(schema) as conn:
with conn.cursor() as cur:
logging.info("Running query: " + hql)
cur.execute(hql)
schema = cur.description
with open(csv_filepath, 'wb') as f:
writer = csv.writer(f,
delimiter=delimiter,
lineterminator=lineterminator,
encoding='utf-8')
if output_header:
writer.writerow([c[0] for c in cur.description])
i = 0
while True:
rows = [row for row in cur.fetchmany(fetch_size) if row]
if not rows:
break
writer.writerows(rows)
i += len(rows)
logging.info("Written {0} rows so far.".format(i))
logging.info("Done. Loaded a total of {0} rows.".format(i))
def get_records(self, hql, schema='default'):
"""
Get a set of records from a Hive query.
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> len(hh.get_records(sql))
100
"""
return self.get_results(hql, schema=schema)['data']
def get_pandas_df(self, hql, schema='default'):
"""
Get a pandas dataframe from a Hive query
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> df = hh.get_pandas_df(sql)
>>> len(df.index)
100
"""
import pandas as pd
res = self.get_results(hql, schema=schema)
df = pd.DataFrame(res['data'])
df.columns = [c[0] for c in res['header']]
return df
| apache-2.0 |
jat255/hyperspy | hyperspy/tests/drawing/test_plot_signal1d.py | 1 | 11359 | # Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import os
from shutil import copyfile
import matplotlib.pyplot as plt
import numpy as np
import pytest
import scipy.misc
import hyperspy.api as hs
from hyperspy.misc.test_utils import update_close_figure
from hyperspy.signals import Signal1D
from hyperspy.drawing.signal1d import Signal1DLine
from hyperspy.tests.drawing.test_plot_signal import _TestPlot
scalebar_color = 'blue'
default_tol = 2.0
baseline_dir = 'plot_signal1d'
style_pytest_mpl = 'default'
style = ['default', 'overlap', 'cascade', 'mosaic', 'heatmap']
@pytest.fixture
def mpl_generate_path_cmdopt(request):
return request.config.getoption("--mpl-generate-path")
def _generate_filename_list(style):
path = os.path.dirname(__file__)
filename_list = ['test_plot_spectra_%s' % s for s in style] + \
['test_plot_spectra_rev_%s' % s for s in style]
filename_list2 = []
for filename in filename_list:
for i in range(0, 4):
filename_list2.append(os.path.join(path, baseline_dir,
'%s%i.png' % (filename, i)))
return filename_list2
@pytest.fixture
def setup_teardown(request, scope="class"):
mpl_generate_path_cmdopt = request.config.getoption("--mpl-generate-path")
# SETUP
# duplicate baseline images to match the test_name when the
# parametrized 'test_plot_spectra' are run. For a same 'style', the
# expected images are the same.
if mpl_generate_path_cmdopt is None:
for filename in _generate_filename_list(style):
copyfile("%s.png" % filename[:-5], filename)
yield
# TEARDOWN
# Create the baseline images: copy one baseline image for each test
# and remove the other ones.
if mpl_generate_path_cmdopt:
for filename in _generate_filename_list(style):
copyfile(filename, "%s.png" % filename[:-5])
# Delete the images that have been created in 'setup_class'
for filename in _generate_filename_list(style):
os.remove(filename)
@pytest.mark.usefixtures("setup_teardown")
class TestPlotSpectra():
s = hs.signals.Signal1D(scipy.misc.ascent()[100:160:10])
# Add a test signal with decreasing axis
s_reverse = s.deepcopy()
s_reverse.axes_manager[1].offset = 512
s_reverse.axes_manager[1].scale = -1
def _generate_parameters(style):
parameters = []
for s in style:
for fig in [True, None]:
for ax in [True, None]:
parameters.append([s, fig, ax])
return parameters
def _generate_ids(style, duplicate=4):
ids = []
for s in style:
ids.extend([s] * duplicate)
return ids
@pytest.mark.parametrize(("style", "fig", "ax"),
_generate_parameters(style),
ids=_generate_ids(style))
@pytest.mark.mpl_image_compare(baseline_dir=baseline_dir,
tolerance=default_tol, style=style_pytest_mpl)
def test_plot_spectra(self, style, fig, ax):
if fig:
fig = plt.figure()
if ax:
fig = plt.figure()
ax = fig.add_subplot(111)
ax = hs.plot.plot_spectra(self.s, style=style, legend='auto',
fig=fig, ax=ax)
if style == 'mosaic':
ax = ax[0]
return ax.figure
@pytest.mark.parametrize(("style", "fig", "ax"),
_generate_parameters(style),
ids=_generate_ids(style))
@pytest.mark.mpl_image_compare(baseline_dir=baseline_dir,
tolerance=default_tol, style=style_pytest_mpl)
def test_plot_spectra_rev(self, style, fig, ax):
if fig:
fig = plt.figure()
if ax:
fig = plt.figure()
ax = fig.add_subplot(111)
ax = hs.plot.plot_spectra(self.s_reverse, style=style, legend='auto',
fig=fig, ax=ax)
if style == 'mosaic':
ax = ax[0]
return ax.figure
@pytest.mark.parametrize("figure", ['1nav', '1sig', '2nav', '2sig'])
@pytest.mark.mpl_image_compare(baseline_dir=baseline_dir,
tolerance=default_tol, style=style_pytest_mpl)
def test_plot_spectra_sync(self, figure):
s1 = hs.signals.Signal1D(scipy.misc.face()).as_signal1D(0).inav[:, :3]
s2 = s1.deepcopy() * -1
hs.plot.plot_signals([s1, s2])
if figure == '1nav':
return s1._plot.signal_plot.figure
if figure == '1sig':
return s1._plot.navigator_plot.figure
if figure == '2nav':
return s2._plot.navigator_plot.figure
if figure == '2sig':
return s2._plot.navigator_plot.figure
def test_plot_spectra_legend_pick(self):
x = np.linspace(0., 2., 512)
n = np.arange(1, 5)
x_pow_n = x[None, :]**n[:, None]
s = hs.signals.Signal1D(x_pow_n)
my_legend = [r'x^' + str(io) for io in n]
f = plt.figure()
ax = hs.plot.plot_spectra(s, legend=my_legend, fig=f)
leg = ax.get_legend()
leg_artists = leg.get_lines()
click = plt.matplotlib.backend_bases.MouseEvent(
'button_press_event', f.canvas, 0, 0, 'left')
for artist, li in zip(leg_artists, ax.lines[::-1]):
plt.matplotlib.backends.backend_agg.FigureCanvasBase.pick_event(
f.canvas, click, artist)
assert not li.get_visible()
plt.matplotlib.backends.backend_agg.FigureCanvasBase.pick_event(
f.canvas, click, artist)
@pytest.mark.mpl_image_compare(baseline_dir=baseline_dir,
tolerance=default_tol, style=style_pytest_mpl)
def test_plot_spectra_auto_update(self):
s = hs.signals.Signal1D(np.arange(100))
s2 = s / 2
ax = hs.plot.plot_spectra([s, s2])
s.data = -s.data
s.events.data_changed.trigger(s)
s2.data = -s2.data * 4 + 50
s2.events.data_changed.trigger(s2)
return ax.get_figure()
@update_close_figure
def test_plot_nav0_close():
test_plot = _TestPlot(ndim=0, sdim=1)
test_plot.signal.plot()
return test_plot.signal
@update_close_figure
def test_plot_nav1_close():
test_plot = _TestPlot(ndim=1, sdim=1)
test_plot.signal.plot()
return test_plot.signal
@update_close_figure
def test_plot_nav2_close():
test_plot = _TestPlot(ndim=2, sdim=1)
test_plot.signal.plot()
return test_plot.signal
def _test_plot_two_cursors(ndim):
test_plot = _TestPlot(ndim=ndim, sdim=1) # sdim=2 not supported
s = test_plot.signal
s.metadata.General.title = 'Nav %i, Sig 1, two cursor' % ndim
s.axes_manager[0].index = 4
s.plot()
s._plot.add_right_pointer()
s._plot.navigator_plot.figure.canvas.draw()
s._plot.signal_plot.figure.canvas.draw()
s._plot.right_pointer.axes_manager[0].index = 2
if ndim == 2:
s.axes_manager[1].index = 2
s._plot.right_pointer.axes_manager[1].index = 3
return s
@pytest.mark.parametrize('autoscale', ['', 'x', 'xv', 'v'])
@pytest.mark.parametrize('norm', ['log', 'auto'])
def test_plot_two_cursos_parameters(autoscale, norm):
kwargs = {'autoscale':autoscale, 'norm':norm}
test_plot = _TestPlot(ndim=2, sdim=1) # sdim=2 not supported
s = test_plot.signal
s.plot(**kwargs)
s._plot.add_right_pointer(**kwargs)
for line in s._plot.signal_plot.ax_lines:
assert line.autoscale == autoscale
def _generate_parameter():
parameters = []
for ndim in [1, 2]:
for plot_type in ['nav', 'sig']:
parameters.append([ndim, plot_type])
return parameters
@pytest.mark.mpl_image_compare(baseline_dir=baseline_dir,
tolerance=default_tol, style=style_pytest_mpl)
def test_plot_log_scale():
s = Signal1D(np.exp(-np.arange(100) / 5.0))
s.plot(norm='log')
return s._plot.signal_plot.figure
@pytest.mark.parametrize(("ndim", "plot_type"),
_generate_parameter())
@pytest.mark.mpl_image_compare(baseline_dir=baseline_dir,
tolerance=default_tol, style=style_pytest_mpl)
def test_plot_two_cursors(ndim, plot_type):
s = _test_plot_two_cursors(ndim=ndim)
if plot_type == "sig":
f = s._plot.signal_plot.figure
else:
f= s._plot.navigator_plot.figure
f.canvas.draw()
f.canvas.flush_events()
return f
@update_close_figure
def test_plot_nav2_sig1_two_cursors_close():
return _test_plot_two_cursors(ndim=2)
def test_plot_with_non_finite_value():
s = hs.signals.Signal1D(np.array([np.nan, 2.0]))
s.plot()
s.axes_manager.events.indices_changed.trigger(s.axes_manager)
s = hs.signals.Signal1D(np.array([np.nan, np.nan]))
s.plot()
s.axes_manager.events.indices_changed.trigger(s.axes_manager)
s = hs.signals.Signal1D(np.array([-np.inf, 2.0]))
s.plot()
s.axes_manager.events.indices_changed.trigger(s.axes_manager)
s = hs.signals.Signal1D(np.array([np.inf, 2.0]))
s.plot()
s.axes_manager.events.indices_changed.trigger(s.axes_manager)
def test_plot_add_line_events():
s = hs.signals.Signal1D(np.arange(100))
s.plot()
assert len(s.axes_manager.events.indices_changed.connected) == 1
figure = s._plot.signal_plot
def line_function(axes_manager=None):
return 100 - np.arange(100)
line = Signal1DLine()
line.data_function = line_function
line.set_line_properties(color='blue', type='line', scaley=False)
figure.add_line(line, connect_navigation=True)
line.plot()
assert len(line.events.closed.connected) == 1
assert len(s.axes_manager.events.indices_changed.connected) == 2
line.close()
figure.close_right_axis()
assert len(line.events.closed.connected) == 0
assert len(s.axes_manager.events.indices_changed.connected) == 1
figure.close()
assert len(s.axes_manager.events.indices_changed.connected) == 0
@pytest.mark.parametrize("autoscale", ['', 'x', 'xv', 'v'])
@pytest.mark.mpl_image_compare(baseline_dir=baseline_dir,
tolerance=default_tol, style=style_pytest_mpl)
def test_plot_autoscale(autoscale):
s = hs.datasets.artificial_data.get_core_loss_eels_line_scan_signal(
add_powerlaw=True, add_noise=False)
s.plot(autoscale=autoscale)
ax = s._plot.signal_plot.ax
ax.set_xlim(500.0, 700.0)
ax.set_ylim(-10.0, 20.0)
s.axes_manager.events.indices_changed.trigger(s.axes_manager)
return s._plot.signal_plot.figure
| gpl-3.0 |
zihua/scikit-learn | examples/cluster/plot_agglomerative_clustering.py | 343 | 2931 | """
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
| bsd-3-clause |
wooey/Wooey | wooey/tests/scripts/heatmap.py | 6 | 1803 | #!/usr/bin/env python
__author__ = 'chris'
import argparse
import os
import sys
import pandas as pd
import seaborn as sns
import numpy as np
parser = argparse.ArgumentParser(description="Create a heatmap from a delimited file.")
parser.add_argument('--tsv', help='The delimited file to plot.', type=argparse.FileType('r'), required=True)
parser.add_argument('--delimiter', help='The delimiter for fields. Default: tab', type=str, default='\t')
parser.add_argument('--row', help='The column containing row to create a heatmap from. Default to first row.', type=str)
parser.add_argument('--cols', help='The columns to choose values from (separate by a comma for multiple). Default: All non-rows', type=str)
parser.add_argument('--log-normalize', help='Whether to log normalize data.', action='store_true')
def main():
args = parser.parse_args()
data = pd.read_table(args.tsv, index_col=args.row if args.row else 0, sep=args.delimiter, encoding='utf-8')
if args.cols:
try:
data = data.loc[:,args.cols.split(',')]
except KeyError:
data = data.iloc[:,[int(i)-1 for i in args.cols.split(',')]]
if len(data.columns) > 50:
raise BaseException('Too many columns')
data = np.log2(data) if args.log_normalize else data
data[data==-1*np.inf] = data[data!=-1*np.inf].min().min()
width = 5+0 if len(data.columns)<50 else (len(data.columns)-50)/100
row_cutoff = 1000
height = 15+0 if len(data)<row_cutoff else (len(data)-row_cutoff)/75.0
seaborn_map = sns.clustermap(data, figsize=(width, height))
seaborn_map.savefig('{}_heatmap.png'.format(os.path.split(args.tsv.name)[1]))
seaborn_map.data2d.to_csv('{}_heatmap.tsv'.format(os.path.split(args.tsv.name)[1]), sep='\t')
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause |
msultan/msmbuilder | msmbuilder/hmm/discrete_approx.py | 12 | 6593 | """Discrete approximations to continuous distributions"""
# Author: Robert McGibbon <rmcgibbo@gmail.com>
# Contributors:
# Copyright (c) 2014, Stanford University
# All rights reserved.
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function, division, absolute_import
import numpy as np
import scipy.misc
import scipy.linalg
import scipy.optimize
from mdtraj.utils import ensure_type
__all__ = ['discrete_approx_mvn', 'NotSatisfiableError']
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
class NotSatisfiableError(Exception):
pass
def discrete_approx_mvn(X, means, covars, match_variances=True):
"""Find a discrete approximation to a multivariate normal distribution.
The method employs find the discrete distribution with support only at the
supplied points X with minimal K-L divergence to a target multivariate
normal distribution under the constraints that the mean and variance
of the discrete distribution match the normal distribution exactly.
Parameters
----------
X : np.ndarray, shape=(n_points, n_features)
The allowable points
means : np.ndarray, shape=(n_features)
The mean vector of the MVN
covars : np.ndarray, shape=(n_features, n_features) or shape=(n_features,)
If covars is 2D, it's interpreted as the covariance matrix for
the model. If 1D, we assume a diagonal covariance matrix with the
specified diagonal entries.
match_variances : bool, optimal
When True, both the means and the variances of the discrete distribution
are constrained. Under some circumstances, this is not satisfiable (e.g.
if there aren't enough samples
Returns
-------
weights : np.ndarray, shape=(n_samples,)
The weight for each of the points in X in the resulting
discrete probability distribution
Notes
-----
The discrete distribution is one that has mass only at the specified
points. It can therefore be parameterized by a set of weights on each
point. If :math:`\{X_i\}` is the set of allowable points, and
:math:`\{w_i\}` are the weights, then our discrete distribution has
the form
.. math::
p(y; w) = w_i \sum \delta(y - X_i).
We chose the :math:`w_i` by minimizing the K-L divergence from the our
discrete distribution to the desired multivariate normal subject to a
constraint that the first moments of the discrete distribution match
the mean of the multivariate normal exactly, and that the variances
also match. Let :math:`q(x)` be the target distribution. The optimal
weights are then
.. math::
min_{\{w_i\}} \sum_i p(X_i; w) \log \frac{p(X_i; w)}{q(X_i)}
subject to
.. math::
\sum_i (X_i) p(X_i; w) = \int_\Omega (x) q(x) = \mu,
\sum_i (X_i-mu)**2 p(X_i; w) = \int_\Omega (x-mu) q(x).
References
----------
.. [1] Tanaka, Ken'ichiro, and Alexis Akira Toda. "Discrete approximations
of continuous distributions by maximum entropy." Economics Letters 118.3
(2013): 445-450.
"""
X = ensure_type(np.asarray(X), dtype=np.float32, ndim=2, name='X', warn_on_cast=False)
means = ensure_type(np.asarray(means), np.float64, ndim=1, name='means', warn_on_cast=False)
covars = np.asarray(covars)
# Get the un-normalized probability of each point X_i in the MVN
# `prob` are the q(X_i) in the mathematics
# `moments` are the \bar{T} that we want to match.
if covars.ndim == 1:
# diagonal covariance case
if not len(covars) == len(means):
raise ValueError('Shape Error: covars and means musth have the same length')
prob = np.exp(-0.5 * np.sum(1. / np.sqrt(covars) * (X - means) ** 2, axis=1))
moments = np.concatenate((means, covars)) if match_variances else means
elif covars.ndim == 2:
if not (covars.shape[0] == len(means) and covars.shape[1] == len(means)):
raise ValueError('Shape Error: covars must be square, with size = len(means)')
# full 2d covariance matrix
cv_chol = scipy.linalg.cholesky(covars, lower=True)
cv_sol = scipy.linalg.solve_triangular(cv_chol, (X - means).T, lower=True).T
prob = np.exp(-0.5 * (np.sum(cv_sol ** 2, axis=1)))
moments = np.concatenate((means, np.diag(covars))) if match_variances else means
else:
raise ValueError('covars must be 1D or 2D')
# this is T(x_i) for each X_i
moment_contributions = np.hstack((X, (X - means) ** 2)) if match_variances else X
def objective_and_grad(l):
dot = np.dot(moment_contributions, l)
lse = scipy.misc.logsumexp(dot, b=prob)
# value of the objective function
obj_value = lse - np.dot(l, moments)
# gradient of objective function
dot_max = dot.max(axis=0)
exp_term = np.sum(moment_contributions * (prob * np.exp(dot - dot_max)).reshape(-1, 1), axis=0)
log_numerator = np.log(exp_term) + dot_max
grad_value = np.exp(log_numerator - lse) - moments
return obj_value, grad_value
result = scipy.optimize.minimize(
objective_and_grad, jac=True, x0=np.ones_like(moments), method='BFGS')
if not result['success']:
raise NotSatisfiableError()
dot = np.dot(moment_contributions, result['x'])
log_denominator = scipy.misc.logsumexp(dot, b=prob)
weights = prob * np.exp(dot - log_denominator)
if not np.all(np.isfinite(weights)):
raise NotSatisfiableError()
weights = weights / np.sum(weights)
return weights
if __name__ == '__main__':
np.random.seed(10)
import matplotlib.pyplot as pp
length = 100
X = np.random.uniform(low=-5, high=5, size=(length, 1))
weights = discrete_approx_mvn(X, [0], [2])
pp.title('dot(weights, X) = %.5f, dot(weights, X**2)=%f' %
(np.dot(weights, X), np.dot(weights, X ** 2)))
for i in range(length):
pp.plot([X[i, 0], X[i, 0]], [0, weights[i]])
pp.figure()
X = np.random.uniform(low=-2, high=2, size=(length, 1))
weights = discrete_approx_mvn(X, [0], [1])
pp.title('dot(weights, X) = %.5f, dot(weights, X**2)=%f' %
(np.dot(weights, X), np.dot(weights, X ** 2)))
for i in range(length):
pp.plot([X[i, 0], X[i, 0]], [0, weights[i]])
pp.show()
| lgpl-2.1 |
AndreasMadsen/bachelor-code | test/test.py | 1 | 2947 |
from nose.tools import *
import warnings
import os.path as path
import sys
import os
thisdir = path.dirname(path.realpath(__file__))
sys.path.append(path.join(thisdir, '..'))
import progressbar
import matplotlib as mpl
if (os.environ.get('DISPLAY') is None): mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import theano
import neural
np.random.seed(42)
warnings.filterwarnings(
action='error',
category=UserWarning
)
warnings.filterwarnings(
action='ignore',
message='numpy.ndarray size changed, may indicate binary incompatibility'
)
is_HPC = (os.environ.get('DTU_HPC') is not None)
is_optimize = (os.environ.get('OPTIMIZE') is not None)
run_name = (os.environ.get('OUTNAME')
if os.environ.get('OUTNAME') is not None
else str(os.getpid()))
if (not is_HPC):
theano.config.compute_test_value = 'warn'
if (not is_optimize and not is_HPC):
theano.config.optimizer = 'None'
theano.config.linker = 'py'
theano.config.exception_verbosity = 'high'
if (theano.config.optimizer != 'None'):
print('Theano optimizer enabled')
def classifier(model, generator, y_shape, performance,
trainer=neural.learn.batch, train_size=500, test_size=100,
asserts=True, plot=False, epochs=100, **kwargs):
if (plot): print('testing classifier')
# Setup dataset and train model
train_dataset = generator(train_size)
test_dataset = generator(test_size)
train_error = np.zeros(epochs + 1)
test_error = np.zeros(epochs + 1)
# Train and show progress
if (not plot): print()
pbar = progressbar.ProgressBar(
widgets=[
'Training: ', progressbar.Bar(),
progressbar.Percentage(), ' | ', progressbar.ETA()
],
maxval=epochs
).start()
def on_epoch(model, epoch_i):
if (plot or epoch_i == 0 or epoch_i == epochs):
train_error[epoch_i] = model.test(*train_dataset.astuple())
test_error[epoch_i] = model.test(*test_dataset.astuple())
pbar.update(epoch_i)
trainer(model, train_dataset, on_epoch=on_epoch, epochs=epochs, **kwargs)
pbar.finish()
# Calculate precition and missclassificationrate
y = model.predict(test_dataset.data)
misses = np.mean(np.argmax(y, axis=1) != test_dataset.target)
# Plot
if (plot):
print('miss classifications:', misses)
plt.figure()
plt.plot(np.arange(0, epochs + 1), train_error, label='train', alpha=0.5)
plt.plot(np.arange(0, epochs + 1), test_error, label='test', alpha=0.5)
plt.legend()
plt.ylabel('loss')
if (is_HPC): plt.savefig('loss.png')
else: plt.show()
# Assert
if (asserts):
assert(train_error[0] > train_error[-1] > 0)
assert(test_error[0] > test_error[-1] > 0)
assert_equal(y.shape, y_shape)
assert((1 - misses) > performance)
__all__ = ['classifier']
| mit |
rajat1994/scikit-learn | examples/decomposition/plot_incremental_pca.py | 244 | 1878 | """
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for c, i, target_name in zip("rgb", [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
c=c, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best")
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
| bsd-3-clause |
yavalvas/yav_com | build/matplotlib/examples/event_handling/pick_event_demo.py | 7 | 6316 | #!/usr/bin/env python
"""
You can enable picking by setting the "picker" property of an artist
(for example, a matplotlib Line2D, Text, Patch, Polygon, AxesImage,
etc...)
There are a variety of meanings of the picker property
None - picking is disabled for this artist (default)
boolean - if True then picking will be enabled and the
artist will fire a pick event if the mouse event is over
the artist
float - if picker is a number it is interpreted as an
epsilon tolerance in points and the artist will fire
off an event if it's data is within epsilon of the mouse
event. For some artists like lines and patch collections,
the artist may provide additional data to the pick event
that is generated, for example, the indices of the data within
epsilon of the pick event
function - if picker is callable, it is a user supplied
function which determines whether the artist is hit by the
mouse event.
hit, props = picker(artist, mouseevent)
to determine the hit test. If the mouse event is over the
artist, return hit=True and props is a dictionary of properties
you want added to the PickEvent attributes
After you have enabled an artist for picking by setting the "picker"
property, you need to connect to the figure canvas pick_event to get
pick callbacks on mouse press events. For example,
def pick_handler(event):
mouseevent = event.mouseevent
artist = event.artist
# now do something with this...
The pick event (matplotlib.backend_bases.PickEvent) which is passed to
your callback is always fired with two attributes:
mouseevent - the mouse event that generate the pick event. The
mouse event in turn has attributes like x and y (the coordinates in
display space, such as pixels from left, bottom) and xdata, ydata (the
coords in data space). Additionally, you can get information about
which buttons were pressed, which keys were pressed, which Axes
the mouse is over, etc. See matplotlib.backend_bases.MouseEvent
for details.
artist - the matplotlib.artist that generated the pick event.
Additionally, certain artists like Line2D and PatchCollection may
attach additional meta data like the indices into the data that meet
the picker criteria (for example, all the points in the line that are within
the specified epsilon tolerance)
The examples below illustrate each of these methods.
"""
from __future__ import print_function
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.patches import Rectangle
from matplotlib.text import Text
from matplotlib.image import AxesImage
import numpy as np
from numpy.random import rand
if 1: # simple picking, lines, rectangles and text
fig, (ax1, ax2) = plt.subplots(2,1)
ax1.set_title('click on points, rectangles or text', picker=True)
ax1.set_ylabel('ylabel', picker=True, bbox=dict(facecolor='red'))
line, = ax1.plot(rand(100), 'o', picker=5) # 5 points tolerance
# pick the rectangle
bars = ax2.bar(range(10), rand(10), picker=True)
for label in ax2.get_xticklabels(): # make the xtick labels pickable
label.set_picker(True)
def onpick1(event):
if isinstance(event.artist, Line2D):
thisline = event.artist
xdata = thisline.get_xdata()
ydata = thisline.get_ydata()
ind = event.ind
print('onpick1 line:', zip(np.take(xdata, ind), np.take(ydata, ind)))
elif isinstance(event.artist, Rectangle):
patch = event.artist
print('onpick1 patch:', patch.get_path())
elif isinstance(event.artist, Text):
text = event.artist
print('onpick1 text:', text.get_text())
fig.canvas.mpl_connect('pick_event', onpick1)
if 1: # picking with a custom hit test function
# you can define custom pickers by setting picker to a callable
# function. The function has the signature
#
# hit, props = func(artist, mouseevent)
#
# to determine the hit test. if the mouse event is over the artist,
# return hit=True and props is a dictionary of
# properties you want added to the PickEvent attributes
def line_picker(line, mouseevent):
"""
find the points within a certain distance from the mouseclick in
data coords and attach some extra attributes, pickx and picky
which are the data points that were picked
"""
if mouseevent.xdata is None: return False, dict()
xdata = line.get_xdata()
ydata = line.get_ydata()
maxd = 0.05
d = np.sqrt((xdata-mouseevent.xdata)**2. + (ydata-mouseevent.ydata)**2.)
ind = np.nonzero(np.less_equal(d, maxd))
if len(ind):
pickx = np.take(xdata, ind)
picky = np.take(ydata, ind)
props = dict(ind=ind, pickx=pickx, picky=picky)
return True, props
else:
return False, dict()
def onpick2(event):
print('onpick2 line:', event.pickx, event.picky)
fig, ax = plt.subplots()
ax.set_title('custom picker for line data')
line, = ax.plot(rand(100), rand(100), 'o', picker=line_picker)
fig.canvas.mpl_connect('pick_event', onpick2)
if 1: # picking on a scatter plot (matplotlib.collections.RegularPolyCollection)
x, y, c, s = rand(4, 100)
def onpick3(event):
ind = event.ind
print('onpick3 scatter:', ind, np.take(x, ind), np.take(y, ind))
fig, ax = plt.subplots()
col = ax.scatter(x, y, 100*s, c, picker=True)
#fig.savefig('pscoll.eps')
fig.canvas.mpl_connect('pick_event', onpick3)
if 1: # picking images (matplotlib.image.AxesImage)
fig, ax = plt.subplots()
im1 = ax.imshow(rand(10,5), extent=(1,2,1,2), picker=True)
im2 = ax.imshow(rand(5,10), extent=(3,4,1,2), picker=True)
im3 = ax.imshow(rand(20,25), extent=(1,2,3,4), picker=True)
im4 = ax.imshow(rand(30,12), extent=(3,4,3,4), picker=True)
ax.axis([0,5,0,5])
def onpick4(event):
artist = event.artist
if isinstance(artist, AxesImage):
im = artist
A = im.get_array()
print('onpick4 image', A.shape)
fig.canvas.mpl_connect('pick_event', onpick4)
plt.show()
| mit |
vikramvgarg/grins | contrib/scripts/plot_transport.py | 6 | 2004 | import matplotlib
from matplotlib import rc
rc('text',usetex=True)
#matplotlib.use("PDF")
import matplotlib.pyplot as plot
from numpy import loadtxt
import sys
tick_label_fontsize=14
axis_label_fontsize=14
matplotlib.rc('xtick', labelsize=tick_label_fontsize )
matplotlib.rc(('xtick.major','xtick.minor'), pad=10)
matplotlib.rc('ytick', labelsize=tick_label_fontsize)
matplotlib.rc('text.latex', preamble=[r"\usepackage{amsmath}"])
matplotlib.rc('font', **{'family':'serif', 'serif':['Computer Modern Roman'],
'monospace':['Computer Modern Typewriter']})
if( len(sys.argv) < 2 ):
print "Error: Must supply transport data file name!"
sys.exit(1)
datafile = sys.argv[1]
file = open(datafile, "r")
file.readline()
species = file.readline().split()
file.close()
n_species = len(species)
data = loadtxt( datafile, comments="#", skiprows=3)
T = data[:,0]
mu = data[:,1]
k = data[:,2]
D = data[:,3:3+n_species+1]
line_formats = [ "b-", "r-", "g-", "c-", "k-", \
"b--", "r--", "g--", "c--", "k--", \
"b:", "r:", "g:", "c:", "k:", \
"b-.", "r-.", "g-.", "c-.", "k-.", \
"b.", "r.", "g.", "c.", "k." ]
fig = plot.figure()
axes = fig.add_subplot(111)
axes.yaxis.major.formatter.set_powerlimits((0,0))
axes.set_xlabel(r"$T$ [K]")
axes.set_ylabel(r"$mu$")
axes.plot(T,mu,"b-")
axes.grid(True)
plot.savefig("mu.png")
fig2 = plot.figure()
axes2 = fig2.add_subplot(111)
axes2.yaxis.major.formatter.set_powerlimits((0,0))
axes2.set_xlabel(r"$T$ [K]")
axes2.set_ylabel(r"$k$")
axes2.plot(T,k,"b-")
axes2.grid(True)
plot.savefig("k.png")
fig3 = plot.figure()
axes3 = fig3.add_subplot(111)
axes3.yaxis.major.formatter.set_powerlimits((0,0))
axes3.set_xlabel(r"$T$ [K]")
axes3.set_ylabel(r"$D$")
for i,s in enumerate(species):
axes3.plot(T, D[:,i], line_formats[i], label=r"D "+s)
axes3.legend(loc="upper left")
axes3.grid(True)
plot.savefig("D.png")
plot.show()
| lgpl-2.1 |
yunque/sms-tools | lectures/04-STFT/plots-code/blackman-even-odd.py | 24 | 1481 | import matplotlib.pyplot as plt
import numpy as np
from scipy.fftpack import fft, fftshift
from scipy import signal
M = 32
N = 128
hN = N/2
hM = M/2
fftbuffer = np.zeros(N)
w = signal.blackman(M)
plt.figure(1, figsize=(9.5, 6))
plt.subplot(3,2,1)
plt.plot(np.arange(-hM, hM), w, 'b', lw=1.5)
plt.axis([-hM, hM-1, 0, 1.05])
plt.title('w1, M=32')
fftbuffer = np.zeros(N)
fftbuffer[:hM] = w[hM:]
fftbuffer[N-hM:] = w[:hM]
X = fft(fftbuffer)
mX = 20*np.log10(abs(fftshift(X)))
plt.subplot(3,2,3)
plt.plot(np.arange(-hN, hN), mX-max(mX), 'r', lw=1.5)
plt.axis([-hN/2,hN/2,-80,1])
plt.title('mW1')
pX = np.angle(fftshift(X))
plt.subplot(3,2,5)
plt.plot(np.arange(-hN, hN), pX, 'c', lw=1.5)
plt.axis([-hN,hN-1,-np.pi,np.pi])
plt.title('pW1')
M = 31
N = 128
hN = N/2
hM = (M+1)/2
fftbuffer = np.zeros(N)
w = signal.blackman(M)
plt.subplot(3,2,2)
plt.plot(np.arange(-hM, hM-1), w, 'b', lw=1.5)
plt.axis([-hM, hM, 0, 1.05])
plt.title('w2, M=31')
fftbuffer = np.zeros(N)
fftbuffer[:hM] = w[hM-1:]
fftbuffer[N-hM+1:] = w[:hM-1]
X = fft(fftbuffer)
mX = 20*np.log10(abs(fftshift(X)))
plt.subplot(3,2,4)
plt.plot(np.arange(-hN, hN), mX-max(mX), 'r', lw=1.5)
plt.axis([-hN/2,hN/2-1,-80,1])
plt.title('mW2')
pX = np.angle(fftshift(X))
plt.subplot(3,2,6)
plt.plot(np.arange(-hN, hN), pX, 'c', lw=1.5)
plt.axis([-hN,hN-1,-np.pi,np.pi])
plt.title('pW2')
plt.tight_layout()
plt.savefig('blackman-even-odd.png')
plt.show() | agpl-3.0 |
ericjpj/ns-3-dev | src/core/examples/sample-rng-plot.py | 188 | 1246 | # -*- Mode:Python; -*-
# /*
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# */
# Demonstrate use of ns-3 as a random number generator integrated with
# plotting tools; adapted from Gustavo Carneiro's ns-3 tutorial
import numpy as np
import matplotlib.pyplot as plt
import ns.core
# mu, var = 100, 225
rng = ns.core.NormalVariable(100.0, 225.0)
x = [rng.GetValue() for t in range(10000)]
# the histogram of the data
n, bins, patches = plt.hist(x, 50, normed=1, facecolor='g', alpha=0.75)
plt.title('ns-3 histogram')
plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
plt.axis([40, 160, 0, 0.03])
plt.grid(True)
plt.show()
| gpl-2.0 |
Bismarrck/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/__init__.py | 42 | 2656 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to allow different io formats (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import extract_dask_data
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import extract_dask_labels
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import HAS_DASK
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import queue_parsed_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_examples
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_record_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_examples
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_examples_shared_queue
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_features_shared_queue
from tensorflow.contrib.learn.python.learn.learn_io.numpy_io import numpy_input_fn
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_data
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_labels
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_matrix
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import pandas_input_fn
from tensorflow.contrib.learn.python.learn.learn_io.generator_io import generator_input_fn
| apache-2.0 |
mirjalil/ml-visual-recognition | codes/utilities.py | 1 | 3143 | import numpy as np
import pandas
import sys
ndim = 900
### Calclulate Standardized Mean Difference Between Classes
def calStandMeanDiff(y, cstat, yneg, ypos):
sx = np.zeros(shape=ndim, dtype=float)
ssx = np.zeros(shape=ndim, dtype=float)
n1 = np.sum(np.in1d(y, yneg))
n2 = np.sum(np.in1d(y, ypos))
sys.stderr.write("Number of samples in NegClass: %d and PosClass: %d \n"%(n1, n2))
for yi in yneg:
sx += cstat[yi][0]
ssx += cstat[yi][1]
r1_mean = sx / float(n1)
r1_var = (ssx - 2*sx*r1_mean + r1_mean**2) / float(n1)
sx = np.zeros(shape=ndim, dtype=float)
ssx = np.zeros(shape=ndim, dtype=float)
for yi in ypos:
sx += cstat[yi][0]
ssx += cstat[yi][1]
r2_mean = sx / float(n2)
r2_var = (ssx - 2*sx*r2_mean + r2_mean**2) / float(n2)
tot_mean = cstat['all'][0] / float(cstat['all'][2])
tot_var = (cstat['all'][1] - 2*cstat['all'][0]*tot_mean + tot_mean**2) / float(cstat['all'][2])
rdiff = (r1_mean - r2_mean) / np.sqrt(tot_var)
return (rdiff)
##########################################
def readRandomSample(data_fname, y, size, goodfeat=None, acc_miny=None, acc_maxy=None):
""" Read a random sample
"""
if goodfeat is None:
goodfeat = np.arange(ndim)
Xsub = np.empty(shape=(size,goodfeat.shape[0]), dtype=float)
ysub = np.zeros(shape=size, dtype=int)
if acc_miny is None:
acc_miny = np.min(y)
if acc_maxy is None:
acc_maxy = np.max(y)
#yuniq, ycount = np.unique(y, return_counts=True)
#tot_acceptable = np.sum(ycount[np.where((yuniq >= acc_miny) & (yuniq <= acc_maxy))[0]])
acceptable_indx = np.where((y>=acc_miny) & (y<=acc_maxy))[0]
assert(acceptable_indx.shape[0] > size)
choice_indx = np.sort(np.random.choice(acceptable_indx, size, replace=False))
print(choice_indx.shape)
#sys.stderr.write("Total Accetables: --> %d"%(tot_acceptable))
#proba = 1.0 - size/float(tot_acceptable)
with open(data_fname, 'r') as fp:
n = 0
nf = 0
for line in fp:
# if (y[n] >= acc_miny and y[n]<=acc_maxy):
# if np.random.uniform(low=0, high=1) > proba and nf < size:
if nf < size:
if n == choice_indx[nf]:
line = line.strip().split()
ix = -1
for i,v in enumerate(line):
if np.any(goodfeat == i):
ix += 1
Xsub[nf,ix] = int(v)
ysub[nf] = y[n]
nf += 1
n += 1
return(Xsub, ysub)
### Performance Evaluation
def evalPerformance(ytrue, ypred):
assert(ytrue.shape == ypred.shape)
tp = np.sum(ypred[np.where(ytrue == 1)[0]] == 1)
fp = np.sum(ypred[np.where(ytrue == -1)[0]] == 1)
tn = np.sum(ypred[np.where(ytrue == -1)[0]] == -1)
fn = ytrue.shape[0]-(tp+fp+tn)
#sys.stderr.write (" (%d %d %d %d)"%(tp, fp, tn, fn))
prec = tp / float(tp + fp)
recall = tp / float(tp + fn)
f1score = 2*tp / float(2*tp + fp + fn)
return (prec, recall, f1score)
| apache-2.0 |
NaturalEcon/RDb | RDb/models/managers.py | 1 | 8223 | from django.db.models.query import QuerySet
import numpy as np
import pandas as pd
from django.db import models
from model_utils.managers import PassThroughManager
"""
managers.py: Django managers module.
Created on Wed Jan 8 22:43:33 2014
@author: acumen, chrisdev
"""
table_prefix = 'RDb_ne'
# Thanks to 'chrisdev' for the Django-Pandas code.
class DataFrameQuerySet(QuerySet):
def to_pivot_table(self, *fields, **kwargs):
"""
A convenience method for creating a time series i.e the
DataFrame index is instance of a DateTime or PeriodIndex
Parameters
----------
fields: The model fields to utilise in creating the frame.
to span a relationship, just use the field name of related
fields across models, separated by double underscores,
values : column to aggregate, optional
rows : list of column names or arrays to group on
Keys to group on the x-axis of the pivot table
cols : list of column names or arrays to group on
Keys to group on the y-axis of the pivot table
aggfunc : function, default numpy.mean, or list of functions
If list of functions passed, the resulting pivot table will have
hierarchical columns whose top level are the function names
(inferred from the function objects themselves)
fill_value : scalar, default None
Value to replace missing values with
margins : boolean, default False
Add all row / columns (e.g. for subtotal / grand totals)
dropna : boolean, default True
Do not include columns whose entries are all NaN
"""
df = self.to_dataframe(*fields)
values = kwargs.pop('values')
rows = kwargs.pop('rows')
cols = kwargs.pop('cols')
aggfunc = kwargs.pop('aggfunc', np.mean)
fill_value = kwargs.pop('fill_value', None)
margins = kwargs.pop('margins', False)
dropna = kwargs.pop('dropna', False)
return pd.pivot_table(df, values=values,
fill_value=fill_value,
rows=rows, cols=cols,
aggfunc=aggfunc,
margins=margins,
dropna=dropna)
def to_timeseries(self, *fields, **kwargs):
"""
A convenience method for creating a time series i.e. the
DataFrame index is instance of a DateTime or PeriodIndex
Parameters
----------
fields: The model fields to utilise in creating the frame.
to span a relationship, just use the field name of related
fields across models, separated by double underscores,
index: Specify the field to use for the index. If the index
field is not in the field list it will be appended. This
is mandatory.
storage: Specify if the queryset uses the `wide` or `long` format
for data.
pivot_column: Required once the you specify `long` format
storage. This could either be a list or string identifying
the field name or combination of field. If the pivot_column
is a single column, then the unique values in this column become
new columns in the DataFrame
If the pivot column is a list the values in these columns are
concatenated (using the '-' as a separator)
and these values are used for the new timeseries columns
values: Also required if you utilize the `long` storage the
values column name is use for populating new frame values
freq: The offset string or object representing a target conversion
rs_kwargs: Arguments based on pandas.DataFrame.resample
"""
index = kwargs.pop('index', None)
if not index:
raise AssertionError('You must supply an index field')
storage = kwargs.get('storage', 'wide')
if storage not in ['wide', 'long']:
raise AssertionError('storage must be wide or long')
if storage == 'wide':
df = self.to_dataframe(*fields, index=index)
else:
df = self.to_dataframe(*fields)
values = kwargs.get('values', None)
if values is None:
raise AssertionError('You must specify a values field')
pivot_columns = kwargs.get('pivot_columns', None)
if pivot_columns is None:
raise AssertionError('You must specify pivot_columns')
if isinstance(pivot_columns, list):
df['combined_keys'] = ''
for c in pivot_columns:
df['combined_keys'] += df[c].str.upper() + '.'
df['combined_keys'] += values.lower()
df = df.pivot(index=index,
columns='combined_keys',
values=values)
else:
df = df.pivot(index=index,
columns=pivot_columns,
values=values)
rule = kwargs.get('freq', None)
if rule:
rs_kwargs = kwargs.get('rs_kwargs', None)
if rs_kwargs:
df = df.resample(rule, **rs_kwargs)
else:
df = df.resample(rule)
return df
def to_dataframe(self, *fields, **kwargs):
"""
Returns a DataFrame from the queryset
Parameters
-----------
fields: The model fields to utilise in creating the frame.
to span a relationship, just use the field name of related
fields across models, separated by double underscores,
:param index: Specify the field to use for the index. If the index
field is not in the field list it will be appended
:param fill_na: Fill in missing observations using one of the following
this is a string specifying a pandas fill method
{'backfill, 'bill', 'pad', 'ffill'} or a scalar value
:param coerce_float: Attempt to convert the numeric non-string fields
like object, decimal etc. to float if possible
"""
index = kwargs.pop('index', None)
fill_na = kwargs.pop('fill_na', None)
coerce_float = kwargs.pop('coerce_float', False)
if not fields:
fields = tuple(self.model._meta.get_all_field_names())
if index is not None:
# add it to the fields if not already there
if index not in fields:
fields = fields + (index,)
qs = self.values_list(*fields)
recs = None
try:
recs = np.core.records.fromrecords(qs, names=qs.field_names)
except IndexError:
pass
df = pd.DataFrame.from_records(recs, coerce_float=coerce_float)
if index is not None:
df = df.set_index(index)
if fill_na is not None:
if fill_na not in ['backfill', 'bfill', 'pad', 'ffill']:
df = df.fillna(value=fill_na)
else:
df = df.fillna(method=fill_na)
return df
class DataFrameManager(PassThroughManager):
def get_query_set(self):
return DataFrameQuerySet(self.model)
class ABOUTManager(models.Manager):
"""ABOUTManager: A manager for models based on the ABOUT abstract class."""
def resources(self):
return super(ABOUTManager, self).get_queryset().exclude(resource__exact=None)
def collections(self):
return super(ABOUTManager, self).get_queryset().exclude(collection__exact=None)
def processes(self):
return super(ABOUTManager, self).get_queryset().exclude(process__exact=None)
def actors(self):
return super(ABOUTManager, self).get_queryset().exclude(actor__exact=None)
class ResourceSurveyManager(models.Manager):
def get_query_set(self):
return super(ABOUTManager, self).get_queryset().filter(resource__neid=self.neid)
def by_value_type(self,value_type):
return self.surveys.filter(value_type__exact=value_type)
| gpl-3.0 |
fyffyt/scikit-learn | sklearn/linear_model/tests/test_randomized_l1.py | 214 | 4690 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |
zafargilani/stcs | lib/clustering/gmm.py | 1 | 2963 | #!/usr/bin/env/ python -W ignore
# -*- coding: utf-8 -*-
"""
Gaussian Mixture Model for clustering types of Twitter accts by
using python scikit-learn (sklearn) GaussianMixture class.
(gmm)
refr: https://brilliant.org/wiki/gaussian-mixture-model/
libr: http://scikit-learn.org/stable/modules/generated/sklearn.mixture.GaussianMixture.html
code: https://github.com/scikit-learn/scikit-learn/blob/master/examples/mixture
(preprocessing)
libr: http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.normalize.html
libr: http://scikit-learn.org/stable/modules/preprocessing.html#normalization
(dependencies)
numpy sklearn
(execute)
python gmm.py K /input/to/userengagements.csv /output/to/
"""
import sys
import csv
import numpy as np
from sklearn import preprocessing
from sklearn import mixture
###############################################
'''
Apply Gaussian Mixture Models to the tweet feature-set
for clustering entities
'''
def process_gmm():
K = int(sys.argv[1])
# load all cols except screen_names (0)
#X = np.loadtxt(sys.argv[2], delimiter=',', skiprows=1,
X = np.loadtxt(sys.argv[2], delimiter=',', skiprows=0,
#usecols=range(1,23)) # range(start,stop) - stop not inclusive, 1-16 or 1-23, 0 is screen_name
usecols=range(1,16)) # range(start,stop) - stop not inclusive, 1-16 or 1-23, 0 is screen_name
#X = np.genfromtxt(sys.argv[2], delimiter=',', skip_header=1,
# #usecols=range(1,23)) # range(start,stop) - stop not inclusive, 1-16 or 1-23, 0 is screen_name
# usecols=range(1,16)) # range(start,stop) - stop not inclusive, 1-16 or 1-23, 0 is screen_name
## Tranpose (to normalise per col), Normalise, Tranpose (back to correct matrix arrangement)
#X_tran = X.transpose()
#X_norm = preprocessing.normalize(X_tran, norm='l1') # L1 for least absolute deviations
#X = X_norm.transpose()
#print("K: {}, data shape: [{}][{}]".format(K, len(X), len(X[0])))
# Fit a Gaussian mixture with EM using K components
gmm = mixture.GaussianMixture(n_components=K, covariance_type='full',
tol=1e-4, max_iter=500, n_init=3, init_params='kmeans',
warm_start=True, verbose=1).fit(X)
## generate random samples from the fitted Gaussian distribution
#sample = gmm.sample(1000)
# load screen_names
with open(sys.argv[2]) as csvfile:
read_csv = csv.reader(csvfile, delimiter=',')
screen_names = []
for row in read_csv:
screen_names.append(row[0])
# label clusters
labels = gmm.predict(X)
clusters = {}
i = 0
for label in labels:
if label in clusters:
clusters[label].append(screen_names[i])
else:
clusters[label] = [screen_names[i]]
i += 1
# outputs
for cluster in clusters:
f = open(sys.argv[3]+"/gmm."+sys.argv[2].split("/")[-1:].pop()+".K"+sys.argv[1]+".cluster"+str(cluster)+".out", "w+")
for c in clusters[cluster]:
f.write("{}\n".format(c))
f.close()
###############################################
if __name__ == "__main__":
try:
process_gmm()
except:
raise
| gpl-3.0 |
zhewang/lcvis | curve_fit.py | 2 | 2081 | import argparse
import csv
import json
import numpy as np
# import matplotlib.pyplot as plt
from supersmoother import SuperSmoother
from scipy import interpolate
def fillNaN(x, y):
length = len(x)
yy = []
for i in range(length):
if 0*y[i] != 0*y[i]:
for j in range(length):
if y[j % length] == y[j % length]:
yy.append(y[j])
break
else:
yy.append(y[i])
return x, yy
def fitcurve(lc_data, period):
Mag = np.array([i["mag"] for i in lc_data], dtype=np.float32)
MJD = np.array([i["time"] for i in lc_data], dtype=np.float32)
Error = np.array([i["error"] for i in lc_data], dtype=np.float32)
t = MJD - MJD.min()
phi = np.array([i/period - int(i/period) for i in t])
xdata = phi
ydata = Mag
model = SuperSmoother()
model.fit(xdata, ydata)
x = np.linspace(0, 1, num = 50).tolist()
y = model.predict(x).tolist()
data = [{"phase": [], "mag": []}]
x, y = fillNaN(x, y)
for i in range(len(y)):
if y[i] == y[i]:
data[0]["phase"].append(x[i])
data[0]["mag"].append(y[i])
return data
def feature_derive(fileName, period, saveFileName):
lc_file = open(fileName, 'r')
lc_data = json.load(lc_file)
data = fitcurve(lc_data, period)
f_out = open(saveFileName, 'w')
f_out.write(json.dumps(data, sort_keys=True, indent=4))
f_out.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('path', help='path to dataset folder')
args = parser.parse_args()
with open('{}/object_list.csv'.format(args.path), newline='') as csvfile:
objects = csv.reader(csvfile)
next(objects, None)
for row in objects:
obj_id = int(row[0])
period = float(row[1])
print("Fitting {}".format(obj_id))
if period > 0:
feature_derive(args.path+'/'+str(obj_id)+'.dat.json', period,
args.path+'/'+str(obj_id)+'.fit.json')
| gpl-2.0 |
Hi-king/scikitcl | hmm.py | 1 | 3145 | #! /usr/bin/python
# -*- coding: utf-8 -*-
##==========##
## argument ##
##==========##
import argparse
import sys
class ArgumentParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
class MyFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter): pass
parser = ArgumentParser(
formatter_class=MyFormatter,
description='''
=========================================================
Random Forest Classifier
=========================================================
''',epilog = '''
output:
testset_svmdata_rforest_predict.dat
=========================================================
ogaki@iis.u-tokyo.ac.jp
2013/1/10
=========================================================
''')
parser.add_argument('input_svmdata', help='.dat')
parser.add_argument('testset_svmdata', help='.dat')
parser.add_argument('--n_components', type=int, default=6,
help='number of components')
parser.add_argument('--n_mixture', type=int, default=10,
help='mixture of GMM')
##========##
## Import ##
##========##
import sys
import os
import re
import scipy
import csv
import sklearn.hmm
herepath = os.path.dirname(sys.argv[0])
sys.path.append(herepath+"/..")
import egovision
def main(args):
##==========##
## Constant ##
##==========##
SUFFIX = "_hmm"
flag_proba = True
##======##
## init ##
##======##
inputfilename = args.input_svmdata
testsetfilename = args.testset_svmdata
n_components = args.n_components
n_mixture = args.n_mixture
base,ext = os.path.splitext(testsetfilename)
outputfilename = base+SUFFIX+"_predict_labeled.csv"
outputpredictname = base+SUFFIX+"_predict.dat"
probabilities_filename = base+SUFFIX+"_probabilities.csv"
##======##
## main ##
##======##
if inputfilename.find(".csv")>0:
inputdata = scipy.array(egovision.readcsvtolist_float(inputfilename))
else:
inputdata,inputlabels = egovision.read_libsvmdata(inputfilename)
if testsetfilename.find(".csv")>0:
testdata= scipy.array(egovision.readcsvtolist_float(testsetfilename))
else:
testdata, testlabels = egovision.read_libsvmdata(testsetfilename)
classifier = sklearn.hmm.GMMHMM(n_components,n_mixture, covariance_type="diag", n_iter=1000)
# tmpdata = []
# for i in xrange(0, len(inputdata)-3199, 3200):
# print i
# tmpdata.append(inputdata[i:i+3200, :])
# inputdata = scipy.array(tmpdata)
print inputdata.shape
#print inputdata.shape
classifier.fit([inputdata])
predictlabels = map(int, classifier.predict(testdata))
if flag_proba: probabilities = classifier.predict_proba(testdata)
csv.writer(open(outputpredictname, "w+")).writerows( map(lambda x: [x], predictlabels) )
if flag_proba: csv.writer(open(probabilities_filename, "w+")).writerows( probabilities )
exit(0)
if __name__ == '__main__':
args = parser.parse_args()
print args
main(args)
| bsd-2-clause |
MohammedWasim/scikit-learn | sklearn/ensemble/tests/test_weight_boosting.py | 83 | 17276 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.base import BaseEstimator
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(LogisticRegression(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
| bsd-3-clause |
urinieto/hier_eval | segment_tree.py | 2 | 51887 | # CREATED:2013-08-13 12:02:42 by Brian McFee <brm2132@columbia.edu>
'''
Evaluation criteria for structural segmentation fall into two categories:
boundary annotation and structural annotation. Boundary annotation is the task
of predicting the times at which structural changes occur, such as when a verse
transitions to a refrain. Metrics for boundary annotation compare estimated
segment boundaries to reference boundaries. Structural annotation is the task
of assigning labels to detected segments. The estimated labels may be
arbitrary strings - such as A, B, C, - and they need not describe functional
concepts. Metrics for structural annotation are similar to those use for
clustering data.
Conventions
-----------
Both boundary and structural annotation metrics require two dimensional arrays
with two columns, one for boundary start times and one for boundary end times.
Structural annotation further require lists of reference and estimated segment
labels which must have a length which is equal to the number of rows in the
corresponding list of boundary edges. In both tasks, we assume that
annotations express a partitioning of the track into intervals. The function
:func:`mir_eval.util.adjust_intervals` can be used to pad or crop the segment
boundaries to span the duration of the entire track.
Metrics
-------
* :func:`mir_eval.segment.detection`: An estimated boundary is considered
correct if it falls within a window around a reference boundary
* :func:`mir_eval.segment.deviation`: Computes the median absolute time
difference from a reference boundary to its nearest estimated boundary, and
vice versa
* :func:`mir_eval.segment.pairwise`: For classifying pairs of sampled time
instants as belonging to the same structural component
* :func:`mir_eval.segment.rand_index`: Clusters reference and estimated
annotations and compares them by the Rand Index
* :func:`mir_eval.segment.ari`: Computes the Rand index, adjusted for chance
* :func:`mir_eval.segment.nce`: Interprets sampled reference and estimated
labels as samples of random variables :math:`Y_R, Y_E` from which the
conditional entropy of :math:`Y_R` given :math:`Y_E` (Under-Segmentation) and
:math:`Y_E` given :math:`Y_R` (Over-Segmentation) are estimated
* :func:`mir_eval.segment.mutual_information`: Computes the standard,
normalized, and adjusted mutual information of sampled reference and
estimated segments
'''
import numpy as np
import scipy.stats
import scipy.sparse
import scipy.misc
import scipy.special
import collections
import warnings
from mir_eval import util
# For hierarchical eval
import tree
def validate_boundary(reference_intervals, estimated_intervals, trim):
'''Checks that the input annotations to a segment boundary estimation
metric (i.e. one that only takes in segment intervals) look like valid
segment times, and throws helpful errors if not.
:parameters:
- reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
- estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
- trim : bool
will the start and end events be trimmed?
:raises:
- ValueError
Thrown when the provided annotations are not valid.
'''
if trim:
# If we're trimming, then we need at least 2 intervals
min_size = 2
else:
# If we're not trimming, then we only need one interval
min_size = 1
if len(reference_intervals) < min_size:
warnings.warn("Reference intervals are empty.")
if len(estimated_intervals) < min_size:
warnings.warn("Estimated intervals are empty.")
for intervals in [reference_intervals, estimated_intervals]:
util.validate_intervals(intervals)
def validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels):
'''Checks that the input annotations to a structure estimation metric (i.e.
one that takes in both segment boundaries and their labels) look like valid
segment times and labels, and throws helpful errors if not.
:parameters:
- reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
:raises:
- ValueError
Thrown when the provided annotations are not valid.
'''
for (intervals, labels) in [(reference_intervals, reference_labels),
(estimated_intervals, estimated_labels)]:
util.validate_intervals(intervals)
if intervals.shape[0] != len(labels):
raise ValueError('Number of intervals does not match number '
'of labels')
# Check only when intervals are non-empty
if intervals.size > 0:
# Make sure intervals start at 0
if not np.allclose(intervals[0, 0], 0.0):
raise ValueError('Segment intervals do not start at 0')
if reference_intervals.size == 0:
warnings.warn("Reference intervals are empty.")
if estimated_intervals.size == 0:
warnings.warn("Estimated intervals are empty.")
# Check only when intervals are non-empty
if reference_intervals.size > 0 and estimated_intervals.size > 0:
if not np.allclose(reference_intervals[-1, 1],
estimated_intervals[-1, 1]):
raise ValueError('End times do not match')
def detection(reference_intervals, estimated_intervals,
window=0.5, beta=1.0, trim=False):
'''Boundary detection hit-rate.
A hit is counted whenever an reference boundary is within ``window`` of a
estimated boundary. Note that each boundary is matched at most once: this
is achieved by computing the size of a maximal matching between reference
and estimated boundary points, subject to the window constraint.
:usage:
>>> ref_intervals, _ = mir_eval.io.load_labeled_intervals('ref.lab')
>>> est_intervals, _ = mir_eval.io.load_labeled_intervals('est.lab')
>>> # With 0.5s windowing
>>> P05, R05, F05 = mir_eval.boundary.detection(ref_intervals,
est_intervals,
window=0.5)
>>> # With 3s windowing
>>> P3, R3, F3 = mir_eval.boundary.detection(ref_intervals,
est_intervals,
window=3)
>>> # Ignoring hits for the beginning and end of track
>>> P, R, F = mir_eval.boundary.detection(ref_intervals,
est_intervals,
window=0.5,
trim=True)
:parameters:
- reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
- estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
- window : float > 0
size of the window of 'correctness' around ground-truth beats
(in seconds)
- beta : float > 0
weighting constant for F-measure.
- trim : boolean
if ``True``, the first and last boundary times are ignored.
Typically, these denote start (0) and end-markers.
:returns:
- precision : float
precision of estimated predictions
- recall : float
recall of reference reference boundaries
- f_measure : float
F-measure (weighted harmonic mean of ``precision`` and ``recall``)
:raises:
- ValueError
Thrown when the provided annotations are not valid.
:references:
.. [#] D. Turnbull, G. Lanckriet, E. Pampalk, and M. Goto. A supervised
approach for detecting boundaries in music using difference
features and boosting. In Proceedings of the 8th International
Society for Music Information Retrieval Conference (ISMIR), pages
51-54, 2007.
'''
validate_boundary(reference_intervals, estimated_intervals, trim)
# Convert intervals to boundaries
reference_boundaries = util.intervals_to_boundaries(reference_intervals)
estimated_boundaries = util.intervals_to_boundaries(estimated_intervals)
# Suppress the first and last intervals
if trim:
reference_boundaries = reference_boundaries[1:-1]
estimated_boundaries = estimated_boundaries[1:-1]
# If we have no boundaries, we get no score.
if len(reference_boundaries) == 0 or len(estimated_boundaries) == 0:
return 0.0, 0.0, 0.0
matching = util.match_events(reference_boundaries,
estimated_boundaries,
window)
precision = float(len(matching)) / len(estimated_boundaries)
recall = float(len(matching)) / len(reference_boundaries)
f_measure = util.f_measure(precision, recall, beta=beta)
return precision, recall, f_measure
def deviation(reference_intervals, estimated_intervals, trim=False):
'''Compute the median deviations between reference
and estimated boundary times.
:usage:
>>> ref_intervals, _ = mir_eval.io.load_labeled_intervals('ref.lab')
>>> est_intervals, _ = mir_eval.io.load_labeled_intervals('est.lab')
>>> r_to_e, e_to_r = mir_eval.boundary.deviation(ref_intervals,
est_intervals)
:parameters:
- reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
- estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
- trim : boolean
if ``True``, the first and last intervals are ignored.
Typically, these denote start (0.0) and end-of-track markers.
:returns:
- reference_to_estimated : float
median time from each reference boundary to the
closest estimated boundary
- estimated_to_reference : float
median time from each estimated boundary to the
closest reference boundary
:raises:
- ValueError
Thrown when the provided annotations are not valid.
:references:
.. [#] D. Turnbull, G. Lanckriet, E. Pampalk, and M. Goto. A supervised
approach for detecting boundaries in music using difference
features and boosting. In Proceedings of the 8th International
Society for Music Information Retrieval Conference (ISMIR), pages
51-54, 2007.
'''
validate_boundary(reference_intervals, estimated_intervals, trim)
# Convert intervals to boundaries
reference_boundaries = util.intervals_to_boundaries(reference_intervals)
estimated_boundaries = util.intervals_to_boundaries(estimated_intervals)
# Suppress the first and last intervals
if trim:
reference_boundaries = reference_boundaries[1:-1]
estimated_boundaries = estimated_boundaries[1:-1]
# If we have no boundaries, we get no score.
if len(reference_boundaries) == 0 or len(estimated_boundaries) == 0:
return np.nan, np.nan
dist = np.abs(np.subtract.outer(reference_boundaries,
estimated_boundaries))
estimated_to_reference = np.median(dist.min(axis=0))
reference_to_estimated = np.median(dist.min(axis=1))
return reference_to_estimated, estimated_to_reference
def pairwise(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1, beta=1.0):
'''Frame-clustering segmentation evaluation by pair-wise agreement.
:usage:
>>> (ref_intervals,
ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
ref_labels,
t_min=0)
>>> (est_intervals,
est_labels) = mir_eval.util.adjust_intervals(est_intervals,
est_labels,
t_min=0,
t_max=ref_intervals.max())
>>> precision, recall, f = mir_eval.structure.pairwise(ref_intervals,
ref_labels,
est_intervals,
est_labels)
:parameters:
- reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- frame_size : float > 0
length (in seconds) of frames for clustering
- beta : float > 0
beta value for F-measure
:returns:
- precision : float > 0
Precision of detecting whether frames belong in the same cluster
- recall : float > 0
Recall of detecting whether frames belong in the same cluster
- f : float > 0
F-measure of detecting whether frames belong in the same cluster
:raises:
- ValueError
Thrown when the provided annotations are not valid.
:references:
.. [#] M. Levy and M. Sandler. Structural segmentation of musical audio
by constrained clustering. IEEE Transactions on Audio, Speech, and
Language Processing, 16(2):318-326, 2008.
'''
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Build the reference label agreement matrix
agree_ref = np.equal.outer(y_ref, y_ref)
# Count the unique pairs
n_agree_ref = (agree_ref.sum() - len(y_ref)) / 2.0
# Repeat for estimate
agree_est = np.equal.outer(y_est, y_est)
n_agree_est = (agree_est.sum() - len(y_est)) / 2.0
# Find where they agree
matches = np.logical_and(agree_ref, agree_est)
n_matches = (matches.sum() - len(y_ref)) / 2.0
precision = n_matches / n_agree_est
recall = n_matches / n_agree_ref
f_measure = util.f_measure(precision, recall, beta=beta)
return precision, recall, f_measure
def rand_index(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1, beta=1.0):
'''(Non-adjusted) Rand index.
:usage:
>>> (ref_intervals,
ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
ref_labels,
t_min=0)
>>> (est_intervals,
est_labels) = mir_eval.util.adjust_intervals(est_intervals,
est_labels,
t_min=0,
t_max=ref_intervals.max())
>>> rand_index = mir_eval.structure.rand_index(ref_intervals,
ref_labels,
est_intervals,
est_labels)
:parameters:
- reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- frame_size : float > 0
length (in seconds) of frames for clustering
- beta : float > 0
beta value for F-measure
:returns:
- rand_index : float > 0
Rand index
:raises:
- ValueError
Thrown when the provided annotations are not valid.
:references:
.. [#] W. M. Rand. Objective criteria for the evaluation of clustering
methods. Journal of the American Statistical association,
66(336):846-850, 1971.
'''
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Build the reference label agreement matrix
agree_ref = np.equal.outer(y_ref, y_ref)
# Repeat for estimate
agree_est = np.equal.outer(y_est, y_est)
# Find where they agree
matches_pos = np.logical_and(agree_ref, agree_est)
# Find where they disagree
matches_neg = np.logical_and(~agree_ref, ~agree_est)
n_pairs = len(y_ref) * (len(y_ref) - 1) / 2.0
n_matches_pos = (matches_pos.sum() - len(y_ref)) / 2.0
n_matches_neg = matches_neg.sum() / 2.0
rand = (n_matches_pos + n_matches_neg) / n_pairs
return rand
def _contingency_matrix(reference_indices, estimated_indices):
'''
Computes the contingency matrix of a true labeling vs an estimated one.
:parameters:
- reference_indices : np.ndarray
Array of reference indices
- estimated_indices : np.ndarray
Array of estimated indices
:returns:
- contingency_matrix : np.ndarray
Contingency matrix, shape=(#reference indices, #estimated indices)
.. note:: Based on sklearn.metrics.cluster.contingency_matrix
'''
ref_classes, ref_class_idx = np.unique(reference_indices,
return_inverse=True)
est_classes, est_class_idx = np.unique(estimated_indices,
return_inverse=True)
n_ref_classes = ref_classes.shape[0]
n_est_classes = est_classes.shape[0]
# Using coo_matrix is faster than histogram2d
return scipy.sparse.coo_matrix((np.ones(ref_class_idx.shape[0]),
(ref_class_idx, est_class_idx)),
shape=(n_ref_classes, n_est_classes),
dtype=np.int).toarray()
def _adjusted_rand_index(reference_indices, estimated_indices):
'''
Compute the Rand index, adjusted for change.
:parameters:
- reference_indices : np.ndarray
Array of reference indices
- estimated_indices : np.ndarray
Array of estimated indices
:returns:
- ari : float
Adjusted Rand index
.. note:: Based on sklearn.metrics.cluster.adjusted_rand_score
'''
n_samples = len(reference_indices)
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (ref_classes.shape[0] == est_classes.shape[0] == 1
or ref_classes.shape[0] == est_classes.shape[0] == 0
or (ref_classes.shape[0] == est_classes.shape[0] ==
len(reference_indices))):
return 1.0
contingency = _contingency_matrix(reference_indices, estimated_indices)
# Compute the ARI using the contingency data
sum_comb_c = sum(scipy.misc.comb(n_c, 2, exact=1) for n_c in
contingency.sum(axis=1))
sum_comb_k = sum(scipy.misc.comb(n_k, 2, exact=1) for n_k in
contingency.sum(axis=0))
sum_comb = sum((scipy.misc.comb(n_ij, 2, exact=1) for n_ij in
contingency.flatten()))
prod_comb = (sum_comb_c * sum_comb_k)/float(scipy.misc.comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c)/2.
return ((sum_comb - prod_comb)/(mean_comb - prod_comb))
def ari(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1):
'''Adjusted Rand Index (ARI) for frame clustering segmentation evaluation.
:usage:
>>> (ref_intervals,
ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
ref_labels,
t_min=0)
>>> (est_intervals,
est_labels) = mir_eval.util.adjust_intervals(est_intervals,
est_labels,
t_min=0,
t_max=ref_intervals.max())
>>> ari_score = mir_eval.structure.ari(ref_intervals, ref_labels,
est_intervals, est_labels)
:parameters:
- reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- frame_size : float > 0
length (in seconds) of frames for clustering
:returns:
- ari_score : float > 0
Adjusted Rand index between segmentations.
:raises:
- ValueError
Thrown when the provided annotations are not valid.
:references:
.. [#] W. M. Rand. Objective criteria for the evaluation of clustering
methods. Journal of the American Statistical association,
66(336):846-850, 1971.
.. note::
It is assumed that ``intervals[-1]`` == length of song
.. note::
Segment intervals will be rounded down to the nearest multiple
of frame_size.
'''
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
return _adjusted_rand_index(y_ref, y_est)
def _mutual_info_score(reference_indices, estimated_indices, contingency=None):
'''
Compute the mutual information between two sequence labelings.
:parameters:
- reference_indices : np.ndarray
Array of reference indices
- estimated_indices : np.ndarray
Array of estimated indices
- contingency : np.ndarray
Pre-computed contingency matrix. If None, one will be computed.
:returns:
- mi : float
Mutual information
.. note:: Based on sklearn.metrics.cluster.mutual_info_score
'''
if contingency is None:
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + np.log(pi.sum()) + np.log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - np.log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def _entropy(labels):
'''
Calculates the entropy for a labeling.
:parameters:
- labels : list-like
List of labels.
:returns:
- entropy : float
Entropy of the labeling.
.. note:: Based on sklearn.metrics.cluster.entropy
'''
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = np.bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - np.log(pi_sum)))
def _adjusted_mutual_info_score(reference_indices, estimated_indices):
'''
Compute the mutual information between two sequence labelings, adjusted for
chance.
:parameters:
- reference_indices : np.ndarray
Array of reference indices
- estimated_indices : np.ndarray
Array of estimated indices
:returns:
- ami : float <= 1.0
Mutual information
.. note:: Based on sklearn.metrics.cluster.adjusted_mutual_info_score
and sklearn.metrics.cluster.expected_mutual_info_score
'''
n_samples = len(reference_indices)
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (ref_classes.shape[0] == est_classes.shape[0] == 1
or ref_classes.shape[0] == est_classes.shape[0] == 0):
return 1.0
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
# Calculate the MI for the two clusterings
mi = _mutual_info_score(reference_indices, estimated_indices,
contingency=contingency)
# The following code is based on
# sklearn.metrics.cluster.expected_mutual_information
R, C = contingency.shape
N = float(n_samples)
a = np.sum(contingency, axis=1).astype(np.int32)
b = np.sum(contingency, axis=0).astype(np.int32)
# There are three major terms to the EMI equation, which are multiplied to
# and then summed over varying nij values.
# While nijs[0] will never be used, having it simplifies the indexing.
nijs = np.arange(0, max(np.max(a), np.max(b)) + 1, dtype='float')
# Stops divide by zero warnings. As its not used, no issue.
nijs[0] = 1
# term1 is nij / N
term1 = nijs / N
# term2 is log((N*nij) / (a * b)) == log(N * nij) - log(a * b)
# term2 uses the outer product
log_ab_outer = np.log(np.outer(a, b))
# term2 uses N * nij
log_Nnij = np.log(N * nijs)
# term3 is large, and involved many factorials. Calculate these in log
# space to stop overflows.
gln_a = scipy.special.gammaln(a + 1)
gln_b = scipy.special.gammaln(b + 1)
gln_Na = scipy.special.gammaln(N - a + 1)
gln_Nb = scipy.special.gammaln(N - b + 1)
gln_N = scipy.special.gammaln(N + 1)
gln_nij = scipy.special.gammaln(nijs + 1)
# start and end values for nij terms for each summation.
start = np.array([[v - N + w for w in b] for v in a], dtype='int')
start = np.maximum(start, 1)
end = np.minimum(np.resize(a, (C, R)).T, np.resize(b, (R, C))) + 1
# emi itself is a summation over the various values.
emi = 0
for i in range(R):
for j in range(C):
for nij in range(start[i, j], end[i, j]):
term2 = log_Nnij[nij] - log_ab_outer[i, j]
# Numerators are positive, denominators are negative.
gln = (gln_a[i] + gln_b[j] + gln_Na[i] + gln_Nb[j]
- gln_N - gln_nij[nij]
- scipy.special.gammaln(a[i] - nij + 1)
- scipy.special.gammaln(b[j] - nij + 1)
- scipy.special.gammaln(N - a[i] - b[j] + nij + 1))
term3 = np.exp(gln)
emi += (term1[nij] * term2 * term3)
# Calculate entropy for each labeling
h_true, h_pred = _entropy(reference_indices), _entropy(estimated_indices)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def _normalized_mutual_info_score(reference_indices, estimated_indices):
'''
Compute the mutual information between two sequence labelings, adjusted for
chance.
:parameters:
- reference_indices : np.ndarray
Array of reference indices
- estimated_indices : np.ndarray
Array of estimated indices
:returns:
- nmi : float <= 1.0
Normalized mutual information
.. note:: Based on sklearn.metrics.cluster.normalized_mutual_info_score
'''
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (ref_classes.shape[0] == est_classes.shape[0] == 1
or ref_classes.shape[0] == est_classes.shape[0] == 0):
return 1.0
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = _mutual_info_score(reference_indices, estimated_indices,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = _entropy(reference_indices), _entropy(estimated_indices)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def mutual_information(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1):
'''Frame-clustering segmentation: mutual information metrics.
:usage:
>>> (ref_intervals,
ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
ref_labels,
t_min=0)
>>> (est_intervals,
est_labels) = mir_eval.util.adjust_intervals(est_intervals,
est_labels,
t_min=0,
t_max=ref_intervals.max())
>>> mi, ami, nmi = mir_eval.structure.mutual_information(ref_intervals,
ref_labels,
est_intervals,
est_labels)
:parameters:
- reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- frame_size : float > 0
length (in seconds) of frames for clustering
:returns:
- MI : float > 0
Mutual information between segmentations
- AMI : float
Adjusted mutual information between segmentations.
- NMI : float > 0
Normalize mutual information between segmentations
:raises:
- ValueError
Thrown when the provided annotations are not valid.
.. note::
It is assumed that `intervals[-1] == length of song`
.. note::
Segment intervals will be rounded down to the nearest multiple
of frame_size.
'''
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Mutual information
mutual_info = _mutual_info_score(y_ref, y_est)
# Adjusted mutual information
adj_mutual_info = _adjusted_mutual_info_score(y_ref, y_est)
# Normalized mutual information
norm_mutual_info = _normalized_mutual_info_score(y_ref, y_est)
return mutual_info, adj_mutual_info, norm_mutual_info
def nce(reference_intervals, reference_labels, estimated_intervals,
estimated_labels, frame_size=0.1, beta=1.0):
'''Frame-clustering segmentation: normalized conditional entropy
Computes cross-entropy of cluster assignment, normalized by the
max-entropy.
:usage:
>>> (ref_intervals,
ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
ref_labels,
t_min=0)
>>> (est_intervals,
est_labels) = mir_eval.util.adjust_intervals(est_intervals,
est_labels,
t_min=0,
t_max=ref_intervals.max())
>>> S_over, S_under, S_F = mir_eval.structure.nce(ref_intervals,
ref_labels,
est_intervals,
est_labels)
:parameters:
- reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- frame_size : float > 0
length (in seconds) of frames for clustering
- beta : float > 0
beta for F-measure
:returns:
- S_over
Over-clustering score:
``1 - H(y_est | y_ref) / log(|y_est|)``
If `|y_est|==1`, then `S_over` will be 0.
- S_under
Under-clustering score:
``1 - H(y_ref | y_est) / log(|y_ref|)``
If `|y_ref|==1`, then `S_under` will be 0.
- S_F
F-measure for (S_over, S_under)
:raises:
- ValueError
Thrown when the provided annotations are not valid.
:references:
.. [#] Hanna M. Lukashevich. "Towards Quantitative Measures of
Evaluating Song Segmentation," in Proceedings of the 9th
International Society for Music Information Retrieval Conference,
2007, pp. 375-380.
'''
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Make the contingency table: shape = (n_ref, n_est)
contingency = _contingency_matrix(y_ref, y_est).astype(float)
# Normalize by the number of frames
contingency = contingency / len(y_ref)
# Compute the marginals
p_est = contingency.sum(axis=0)
p_ref = contingency.sum(axis=1)
# H(true | prediction) = sum_j P[estimated = j] *
# sum_i P[true = i | estimated = j] log P[true = i | estimated = j]
# entropy sums over axis=0, which is true labels
true_given_est = p_est.dot(scipy.stats.entropy(contingency, base=2))
pred_given_ref = p_ref.dot(scipy.stats.entropy(contingency.T, base=2))
score_under = 0.0
if contingency.shape[0] > 1:
score_under = 1. - true_given_est / np.log2(contingency.shape[0])
score_over = 0.0
if contingency.shape[1] > 1:
score_over = 1. - pred_given_ref / np.log2(contingency.shape[1])
f_measure = util.f_measure(score_over, score_under, beta=beta)
return score_over, score_under, f_measure
def evaluate(ref_intervals, ref_labels, est_intervals, est_labels, **kwargs):
'''
Compute all metrics for the given reference and estimated annotations.
:usage:
>>> (ref_intervals,
ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> scores = mir_eval.segment.evaluate(ref_intervals, ref_labels,
est_intervals, est_labels)
:parameters:
- ref_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- ref_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- est_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- est_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
- kwargs
Additional keyword arguments which will be passed to the
appropriate metric or preprocessing functions.
:returns:
- scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
:raises:
- ValueError
Thrown when the provided annotations are not valid.
'''
# Adjust timespan of estimations relative to ground truth
ref_intervals, ref_labels = \
util.adjust_intervals(ref_intervals, labels=ref_labels, t_min=0.0)
est_intervals, est_labels = \
util.adjust_intervals(est_intervals, labels=est_labels, t_min=0.0,
t_max=ref_intervals[-1, -1])
# Now compute all the metrics
scores = collections.OrderedDict()
# Boundary detection
# Force these values for window
kwargs['window'] = .5
scores['Precision@0.5'], scores['Recall@0.5'], scores['F-measure@0.5'] = \
util.filter_kwargs(detection, ref_intervals, est_intervals, **kwargs)
kwargs['window'] = 3.0
scores['Precision@3.0'], scores['Recall@3.0'], scores['F-measure@3.0'] = \
util.filter_kwargs(detection, ref_intervals, est_intervals, **kwargs)
# Boundary deviation
scores['Ref-to-est deviation'], scores['Est-to-ref deviation'] = \
util.filter_kwargs(deviation, ref_intervals, est_intervals, **kwargs)
# Pairwise clustering
(scores['Pairwise Precision'],
scores['Pairwise Recall'],
scores['Pairwise F-measure']) = util.filter_kwargs(pairwise,
ref_intervals,
ref_labels,
est_intervals,
est_labels, **kwargs)
# Rand index
scores['Rand Index'] = util.filter_kwargs(rand_index, ref_intervals,
ref_labels, est_intervals,
est_labels, **kwargs)
# Adjusted rand index
scores['Adjusted Rand Index'] = util.filter_kwargs(ari, ref_intervals,
ref_labels,
est_intervals,
est_labels, **kwargs)
# Mutual information metrics
(scores['Mutual Information'],
scores['Adjusted Mutual Information'],
scores['Normalized Mutual Information']) = \
util.filter_kwargs(mutual_information, ref_intervals, ref_labels,
est_intervals, est_labels, **kwargs)
# Conditional entropy metrics
scores['NCE Over'], scores['NCE Under'], scores['NCE F-measure'] = \
util.filter_kwargs(nce, ref_intervals, ref_labels, est_intervals,
est_labels, **kwargs)
return scores
def hmeasure(ref_tree, est_tree, transitive=True, window=10, res=0.1,
beta=1.0):
'''
Computes the h-measure for the hierarchical segment annotations.
:parameters:
- ref_tree : tree.SegmentTree
reference hierarchical tree.
- est_tree : tree.SegmentTree
estimated hierarchical tree.
- transitive : bool
whether to compute the h-measures using transitivity or not.
- window : int
size of the window (in frames) to compute the h-measures.
- res : float > 0
frame rate in seconds.
- beta : float > 0
beta parameter for the F-measure.
:returns:
- h_over
Hierarchical oversegmentation score.
- h_under
Hierarchical undersegmentation score.
- h_measure
F-measure for (h_over, h_under)
'''
def round_time(t, res=0.1):
v = int(t / float(res)) * res
return v
def lca_matrix(tree, res):
'''
Input: a segment tree
Output: an n-by-n integer matrix indicating the height of the least
common ancestor of each pair of frames (i, j).
'''
# Figure out how many frames we need
n = int((round_time(tree.root.segment.end, res=res) -
round_time(tree.root.segment.start, res=res)) / res)
# Build a mapping of level->height
height_map = {}
# Initialize the LCA matrix
H = np.zeros( (n, n), dtype=np.uint8)
# Breadth-first traversal of the tree
queue = [tree.root]
while queue:
node = queue.pop(0)
# Get the node's level
if node.parent is not None:
height_map[node] = 1 + height_map[node.parent]
else:
height_map[node] = 0
s = int(round_time(node.segment.start, res=res) / res)
t = int(round_time(node.segment.end, res=res) / res)
H[s:t, s:t] = height_map[node]
queue.extend(node.children)
return H
def tree_gauc(H_ref, H_est, transitive, window, res):
# First, build the LCA matrices
# Make sure we have the right number of frames
assert H_ref.shape == H_est.shape
# How many frames?
n = H_ref.shape[0]
# By default, the window covers the entire track
if window is None:
window = n
# Initialize the score
score = 0.0
# Iterate over query frames
n_f = 0
for q in range(n):
# Find all pairs i,j such that H_ref[q, i] > H_ref[q, j]
R = H_ref[q, max(0, q-window):min(n, q+window)]
# And the same for the estimation
E = H_est[q, max(0, q-window):min(n, q+window)]
if transitive:
# Transitive: count comparisons across any level
S_ref = np.greater.outer(R, R)
else:
# Non-transitive: count comparisons only across immediate levels
S_ref = np.equal.outer(R, R+1)
S_est = np.greater.outer(E, E)
# Don't count (q,q) as a result
idx = min(q, window)
S_ref[idx, :] = False
S_ref[:, idx] = False
# Compute normalization constant
Z = float(S_ref.sum())
# Add up agreement for frames
if Z > 0:
score += np.sum(np.logical_and(S_ref, S_est)) / Z
n_f += 1.0
if n_f:
return score / n_f
# Convention: 0/0 = 0
return score
H_ref = lca_matrix(ref_tree, res)
H_est = lca_matrix(est_tree, res)
h_under = tree_gauc(H_ref, H_est, transitive, window, res)
h_over = tree_gauc(H_est, H_ref, transitive, window, res)
f_measure = util.f_measure(h_over, h_under, beta=beta)
return h_over, h_under, f_measure
| gpl-3.0 |
Barmaley-exe/scikit-learn | examples/linear_model/plot_iris_logistic.py | 283 | 1678 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
0todd0000/probFEApy | modelB/modelB.py | 1 | 4767 |
'''
This script implements the "Model B" analyses from:
Pataky TC, Koseki M, Cox PG (2016) Probabilistic biomechanical
finite element simulations: whole-model classical hypothesis testing
based on upcrossing geometry. PeerJ Computer Science. (in press)
!!!NOTE!!!
In order to run this script you must modify the "path2febio" variable below.
There are three sub models:
modelB0.feb
modelB1.feb
modelB2.feb
Model B0 contains an indenter with a perfectly flat surface.
Models B1 and B2 add random noise to the indenter surface's z coordinates,
with the randomness amplitude biased toward one side (B1) or the other (B2).
The main procedures implemented in this script include:
1. FEBio model file manipulation (material stiffness distribution)
2. Simulation (using the FEBio solver version 2.5)
3. FEBio results parsing
Software dependencies:
(other versions of the following packages should also work)
Non-Python software:
FEBio 2.4 (febio.org)
Python software:
Python 2.7 (python.org)
NumPy 1.10 (scipy.org)
Matplotlib 1.5 (matplotlib.org)
This script runs in 2.1 minutes on:
Mac OS X 10.11, 2.7 GHz 12-core Intel Xeon E5, 32 GB 1866 MHz DDR3 ECC
It was also tested on Windows 7 32-bit
Version 0.1 (2016.09.02)
'''
import os
import numpy as np
from matplotlib import pyplot
from xml.etree.ElementTree import ElementTree
#---------------------------------------------------------------#
### USER PARAMETERS ###
### Specify the path to the FEBio binary executable:
path2febio = '/Applications/febio/v2.5.0/bin/FEBio2'
### Default executable locations are:
### Mac OS X: /Applications/FEBio2.X.X/bin/FEBio2
### Windows: C:\\Program Files\\febio-2.X.X\\bin\\FEBio2.exe
#---------------------------------------------------------------#
def parse_logfile(fnameLOG, nElements=2048):
'''
Reads the strain and stress tensor fields from the final data record in an FEBio log file.
Arguments:
fname -- full path to the log file
Returns:
A -- an (nElement x 12) array containing the strain and stress tensor fields
'''
with open(fnameLOG, 'r') as fid:
lines = fid.readlines()
ind = []
for i,s in enumerate(lines):
if s.startswith('Data Record'):
ind.append(i)
i = ind[-1] + 5
A = [s.strip().split(' ')[1:] for s in lines[i:i+nElements]]
return np.asarray(A, dtype=float)
def simulate(fname0, k, fname1, silent=False):
'''
Simulate the model given a material value "k".
Arguments:
fname0 -- template FEB file
E -- stiffness profile: a (101,) numpy array
fname1 -- temporary FEB file to be written and simulated
Returns:
stress -- von Mises stress field of the indented surface (top elements only)
'''
write_model(fname0, k, fname1)
### simulate:
command = '"%s" -i %s' %(path2febio, fnameFEB1)
if silent:
command += ' -silent'
os.system( command )
### parse output:
fnameLOG = os.path.splitext(fnameFEB1)[0] + '.log'
A = parse_logfile(fnameLOG)
stress = tensor2effective(A[:,6:])
### reshape into an image:
nex,ney = 32,32 #numbers of elements in the x and y directions
n = nex*ney #total number of elements in the top layer
return stress[-n:].reshape([nex,ney])
def tensor2effective(Y):
'''
Compute effective strain field from a strain tensor field.
(Or compute von Mises stress field from a stress tensor field)
Arguments:
Y -- a (101,6) numpy array containing the tensor field
Returns:
y -- effective strain field (or von Mises stress field): a (101,) numpy array
'''
x0,x1,x2, a,b,c = Y.T
s = (x0-x1)**2 + (x0-x2)**2 + (x1-x2)**2 + 6*(a*a + b*b + c*c)
return (0.5*s)**0.5
def write_model(fnameFEB0, k, fnameFEB1):
'''
Write a new FEB file with a new Mooney-Rivlin parameter "k"
Arguments:
fnameFEB0 : template FEB file
k : scalar
fnameFEB1 : FEB file to be written (will be overwritten if it exists)
'''
tree = ElementTree()
tree.parse(fnameFEB0)
root = tree.getroot()
root = tree.getroot()
node = root.find('Material/material/k')
node.text = str(k)
tree.write(fnameFEB1, encoding='ISO-8859-1')
#(0) Run model:
dir0 = os.path.split(__file__)[0]
model = 1 #0, 1 or 2 (0=flat contact surface, 1&2=jagged contact surfaces)
fnameFEB0 = os.path.join( dir0 , 'modelB%d.feb' %model)
fnameFEB1 = os.path.join( dir0 , 'temp.feb')
k = 800
S = simulate(fnameFEB0, k, fnameFEB1, silent=False) #silent=True will silence FEBio output
#(1) Plot the distribution:
pyplot.close('all')
fig = pyplot.figure(figsize=(6,4))
pyplot.get_current_fig_manager().window.move(0, 0)
ax = pyplot.axes()
ax.imshow(S, interpolation='nearest')
cb = pyplot.colorbar(mappable=ax.images[0])
cb.set_label('von Mises stress (Pa)')
pyplot.show()
| mit |
magnastrazh/NEUCOGAR | nest/serotonin/research/C/nest-2.10.0/examples/nest/Potjans_2014/spike_analysis.py | 13 | 5601 | # -*- coding: utf-8 -*-
#
# spike_analysis.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# Merges spike files, produces raster plots, calculates and plots firing rates
import numpy as np
import glob
import matplotlib.pyplot as plt
import os
datapath = '../data'
# get simulation time and numbers of neurons recorded from sim_params.sli
f = open(os.path.join(datapath, 'sim_params.sli'), 'r')
for line in f:
if 't_sim' in line:
T = float(line.split()[1])
if '/record_fraction_neurons_spikes' in line:
record_frac = line.split()[1]
f.close()
f = open(os.path.join(datapath, 'sim_params.sli'), 'r')
for line in f:
if record_frac == 'true':
if 'frac_rec_spikes' in line:
frac_rec = float(line.split()[1])
else:
if 'n_rec_spikes' in line:
n_rec = int(line.split()[1])
f.close()
T_start = 200. # starting point of analysis (to avoid transients)
# load GIDs
gidfile = open(os.path.join(datapath , 'population_GIDs.dat'), 'r')
gids = []
for l in gidfile:
a = l.split()
gids.append([int(a[0]),int(a[1])])
print 'Global IDs:'
print gids
print
# number of populations
num_pops = len(gids)
print 'Number of populations:'
print num_pops
print
# first GID in each population
raw_first_gids = [gids[i][0] for i in np.arange(len(gids))]
# population sizes
pop_sizes = [gids[i][1]-gids[i][0]+1 for i in np.arange(len(gids))]
# numbers of neurons for which spikes were recorded
if record_frac == 'true':
rec_sizes = [int(pop_sizes[i]*frac_rec) for i in xrange(len(pop_sizes))]
else:
rec_sizes = [n_rec]*len(pop_sizes)
# first GID of each population once device GIDs are dropped
first_gids=[int(1 + np.sum(pop_sizes[:i])) for i in np.arange(len(pop_sizes))]
# last GID of each population once device GIDs are dropped
last_gids = [int(np.sum(pop_sizes[:i+1])) for i in np.arange(len(pop_sizes))]
# convert lists to a nicer format, i.e. [[2/3e, 2/3i], []....]
Pop_sizes =[pop_sizes[i:i+2] for i in xrange(0,len(pop_sizes),2)]
print 'Population sizes:'
print Pop_sizes
print
Raw_first_gids =[raw_first_gids[i:i+2] for i in xrange(0,len(raw_first_gids),2)]
First_gids = [first_gids[i:i+2] for i in xrange(0,len(first_gids),2)]
Last_gids = [last_gids[i:i+2] for i in xrange(0,len(last_gids),2)]
# total number of neurons in the simulation
num_neurons = last_gids[len(last_gids)-1]
print 'Total number of neurons:'
print num_neurons
print
# load spikes from gdf files, correct GIDs and merge them in population files,
# and store spike trains
# will contain neuron id resolved spike trains
neuron_spikes = [[] for i in np.arange(num_neurons+1)]
# container for population-resolved spike data
spike_data= [[[],[]],[[],[]],[[],[]],[[],[]],[[],[]],[[],[]],[[],[]],[[],[]]]
counter = 0
for layer in ['0','1','2','3']:
for population in ['0','1']:
output = os.path.join(datapath, 'population_spikes-{}-{}.gdf'.format(layer, population))
file_pattern = os.path.join(datapath, 'spikes_{}_{}*'.format(layer, population))
files = glob.glob(file_pattern)
print 'Merge '+str(len(files))+' spike files from L'+layer+'P'+population
if files:
merged_file = open(output,'w')
for f in files:
data = open(f,'r')
for l in data :
a = l.split()
a[0] = int(a[0])
a[1] = float(a[1])
raw_first_gid = Raw_first_gids[int(layer)][int(population)]
first_gid = First_gids[int(layer)][int(population)]
a[0] = a[0] - raw_first_gid + first_gid
if(a[1] > T_start): # discard data in the start-up phase
spike_data[counter][0].append(num_neurons-a[0])
spike_data[counter][1].append(a[1]-T_start)
neuron_spikes[a[0]].append(a[1]-T_start)
converted_line = str(a[0]) + '\t' + str(a[1]) +'\n'
merged_file.write(converted_line)
data.close()
merged_file.close()
counter +=1
clrs=['0','0.5','0','0.5','0','0.5','0','0.5']
plt.ion()
# raster plot
plt.figure(1)
counter = 1
for j in np.arange(num_pops):
for i in np.arange(first_gids[j],first_gids[j]+rec_sizes[j]):
plt.plot(neuron_spikes[i],np.ones_like(neuron_spikes[i])+sum(rec_sizes)-counter,'k o',ms=1, mfc=clrs[j],mec=clrs[j])
counter+=1
plt.xlim(0,T-T_start)
plt.ylim(0,sum(rec_sizes))
plt.xlabel(r'time (ms)')
plt.ylabel(r'neuron id')
plt.savefig(os.path.join(datapath, 'rasterplot.png'))
# firing rates
rates = []
temp = 0
for i in np.arange(num_pops):
for j in np.arange(first_gids[i], last_gids[i]):
temp+= len(neuron_spikes[j])
rates.append(temp/(rec_sizes[i]*(T-T_start))*1e3)
temp = 0
print
print 'Firing rates:'
print rates
plt.figure(2)
ticks= np.arange(num_pops)
plt.bar(ticks, rates, width=0.9, color='k')
xticklabels = ['L2/3e','L2/3i','L4e','L4i','L5e','L5i','L6e','L6i']
plt.setp(plt.gca(), xticks=ticks+0.5, xticklabels=xticklabels)
plt.xlabel(r'subpopulation')
plt.ylabel(r'firing rate (spikes/s)')
plt.savefig(os.path.join(datapath, 'firing_rates.png'))
plt.show()
| gpl-2.0 |
andnovar/ggplot | ggplot/tests/test_scale_facet_wrap.py | 12 | 4935 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from . import cleanup, get_assert_same_ggplot
assert_same_ggplot = get_assert_same_ggplot(__file__)
from nose.tools import assert_true, assert_raises
from ggplot import *
import matplotlib.pyplot as plt
@cleanup
def test_not_turn_off_first_axis():
# as per GH47, the first axis was switched off when we had a square number of axis
import pandas as pd
# 4 plots
dat4 = pd.DataFrame({'x': range(40), 'y': range(40), 'w': (list('abcd')* 10)})
gg = ggplot(aes(x = 'x', y = 'y'), data=dat4) + geom_line() + facet_wrap('w')
assert_same_ggplot(gg, "first_ax_not_off")
@cleanup
def test_scale_facet_wrap_visual():
p = ggplot(aes(x="price"), data=diamonds) + geom_histogram()
assert_same_ggplot(p + facet_wrap("cut", scales="free"), "free")
assert_same_ggplot(p + facet_wrap("cut", scales="free_x"), "free_x")
assert_same_ggplot(p + facet_wrap("cut", scales="free_y"), "free_y")
assert_same_ggplot(p + facet_wrap("cut", scales=None), "none")
def test_scale_facet_wrap_exception():
with assert_raises(Exception):
# need at least one variable
facet_wrap()
def test_add_scale_returns_new_ggplot_object():
# an older implementation set values on the original ggplot object and only made a deepcopy on the last step.
# Actually all geoms/... should have such a test...
p = ggplot(aes(x="price"), data=diamonds) + geom_histogram()
c, r = p.n_columns, p.n_rows
p2 = p + facet_wrap("cut", scales="free")
cn, rn = p.n_columns, p.n_rows
c2, r2 = p2.n_columns, p2.n_rows
assert_true(c==cn and r==rn, "Original object changed!")
assert_true(c!=c2 or r!=r2, "New object not changed!")
@cleanup
def test_scale_facet_wrap_internals():
def convertText(t):
"""Return a float for the text value of a matplotlib Text object."""
try:
return float(t.get_text())
except:
# don't mask the error, just let the assert raise the test failure
return 0
def empty(t):
"""Return True if the Text object is an empty string."""
return len(t.get_text().strip()) == 0
p = ggplot(aes(x="price"), data=diamonds) + geom_histogram()
# Only p2 has the new measures for column!
p2 = p + facet_wrap("cut", scales="free")
print(p2)
columns = p2.n_columns
fig = plt.gcf()
# When the scales are free, every plot should have x and y labels. Don't
# test the tick values because each plot is free to set its own.
for ax in fig.axes:
assert_true(len(ax.get_xticklabels()) > 0)
assert_true(len(ax.get_yticklabels()) > 0)
print(p + facet_wrap("cut", scales="free_x"))
fig = plt.gcf()
yticks = fig.axes[0].get_yticks()
for pos, ax in enumerate(fig.axes):
# When only the x-axis is free, all plots should have the same y scale
assert_true(all(ax.get_yticks() == yticks))
if pos % columns == 0:
# Only plots in the first column should have y labels
assert_true(all(list(map(convertText, ax.get_yticklabels())) == yticks))
else:
# Plots in all other columns should have no labels
assert_true(all(map(empty, ax.get_yticklabels())))
# Every plot should have labels on its x-axis
assert_true(len(ax.get_xticklabels()) > 0)
print(p + facet_wrap("cut", scales="free_y"))
fig = plt.gcf()
xticks = fig.axes[0].get_xticks()
subplots = len(fig.axes)
for pos, ax in enumerate(fig.axes):
assert_true(all(ax.get_xticks() == xticks))
if subplots - pos > columns:
# Only the bottom plot of each column gets x labels. So only the
# last N plots (where N = number of columns) get labels.
assert_true(all(map(empty, ax.get_xticklabels())))
else:
assert_true(all(list(map(convertText, ax.get_xticklabels())) == xticks))
# All plots should have y labels
assert_true(len(ax.get_yticklabels()) > 0)
print(p + facet_wrap("cut", scales=None))
fig = plt.gcf()
xticks = fig.axes[0].get_xticks()
yticks = fig.axes[0].get_yticks()
for pos, ax in enumerate(fig.axes):
# Every plot should have the same x and y scales
assert_true(all(ax.get_xticks() == xticks))
assert_true(all(ax.get_yticks() == yticks))
# Repeat the tests for labels from both free_x and free_y
if subplots - pos > columns:
assert_true(all(map(empty, ax.get_xticklabels())))
else:
assert_true(all(list(map(convertText, ax.get_xticklabels())) == xticks))
if pos % columns == 0:
assert_true(all(list(map(convertText, ax.get_yticklabels())) == yticks))
else:
assert_true(all(map(empty, ax.get_yticklabels())))
| bsd-2-clause |
zzcclp/spark | python/run-tests.py | 15 | 13614 | #!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from argparse import ArgumentParser
import os
import re
import shutil
import subprocess
import sys
import tempfile
from threading import Thread, Lock
import time
import uuid
import queue as Queue
from multiprocessing import Manager
# Append `SPARK_HOME/dev` to the Python path so that we can import the sparktestsupport module
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../dev/"))
from sparktestsupport import SPARK_HOME # noqa (suppress pep8 warnings)
from sparktestsupport.shellutils import which, subprocess_check_output # noqa
from sparktestsupport.modules import all_modules, pyspark_sql # noqa
python_modules = dict((m.name, m) for m in all_modules if m.python_test_goals if m.name != 'root')
def print_red(text):
print('\033[31m' + text + '\033[0m')
SKIPPED_TESTS = None
LOG_FILE = os.path.join(SPARK_HOME, "python/unit-tests.log")
FAILURE_REPORTING_LOCK = Lock()
LOGGER = logging.getLogger()
# Find out where the assembly jars are located.
# TODO: revisit for Scala 2.13
for scala in ["2.12"]:
build_dir = os.path.join(SPARK_HOME, "assembly", "target", "scala-" + scala)
if os.path.isdir(build_dir):
SPARK_DIST_CLASSPATH = os.path.join(build_dir, "jars", "*")
break
else:
raise RuntimeError("Cannot find assembly build directory, please build Spark first.")
def run_individual_python_test(target_dir, test_name, pyspark_python):
env = dict(os.environ)
env.update({
'SPARK_DIST_CLASSPATH': SPARK_DIST_CLASSPATH,
'SPARK_TESTING': '1',
'SPARK_PREPEND_CLASSES': '1',
'PYSPARK_PYTHON': which(pyspark_python),
'PYSPARK_DRIVER_PYTHON': which(pyspark_python),
# Preserve legacy nested timezone behavior for pyarrow>=2, remove after SPARK-32285
'PYARROW_IGNORE_TIMEZONE': '1',
})
# Create a unique temp directory under 'target/' for each run. The TMPDIR variable is
# recognized by the tempfile module to override the default system temp directory.
tmp_dir = os.path.join(target_dir, str(uuid.uuid4()))
while os.path.isdir(tmp_dir):
tmp_dir = os.path.join(target_dir, str(uuid.uuid4()))
os.mkdir(tmp_dir)
env["TMPDIR"] = tmp_dir
metastore_dir = os.path.join(tmp_dir, str(uuid.uuid4()))
while os.path.isdir(metastore_dir):
metastore_dir = os.path.join(metastore_dir, str(uuid.uuid4()))
os.mkdir(metastore_dir)
# Also override the JVM's temp directory by setting driver and executor options.
java_options = "-Djava.io.tmpdir={0} -Dio.netty.tryReflectionSetAccessible=true".format(tmp_dir)
spark_args = [
"--conf", "spark.driver.extraJavaOptions='{0}'".format(java_options),
"--conf", "spark.executor.extraJavaOptions='{0}'".format(java_options),
"--conf", "spark.sql.warehouse.dir='{0}'".format(metastore_dir),
"pyspark-shell"
]
env["PYSPARK_SUBMIT_ARGS"] = " ".join(spark_args)
LOGGER.info("Starting test(%s): %s", pyspark_python, test_name)
start_time = time.time()
try:
per_test_output = tempfile.TemporaryFile()
retcode = subprocess.Popen(
[os.path.join(SPARK_HOME, "bin/pyspark")] + test_name.split(),
stderr=per_test_output, stdout=per_test_output, env=env).wait()
shutil.rmtree(tmp_dir, ignore_errors=True)
except:
LOGGER.exception("Got exception while running %s with %s", test_name, pyspark_python)
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(1)
duration = time.time() - start_time
# Exit on the first failure.
if retcode != 0:
try:
with FAILURE_REPORTING_LOCK:
with open(LOG_FILE, 'ab') as log_file:
per_test_output.seek(0)
log_file.writelines(per_test_output)
per_test_output.seek(0)
for line in per_test_output:
decoded_line = line.decode("utf-8", "replace")
if not re.match('[0-9]+', decoded_line):
print(decoded_line, end='')
per_test_output.close()
except:
LOGGER.exception("Got an exception while trying to print failed test output")
finally:
print_red("\nHad test failures in %s with %s; see logs." % (test_name, pyspark_python))
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
else:
skipped_counts = 0
try:
per_test_output.seek(0)
# Here expects skipped test output from unittest when verbosity level is
# 2 (or --verbose option is enabled).
decoded_lines = map(lambda line: line.decode("utf-8", "replace"), iter(per_test_output))
skipped_tests = list(filter(
lambda line: re.search(r'test_.* \(pyspark\..*\) ... (skip|SKIP)', line),
decoded_lines))
skipped_counts = len(skipped_tests)
if skipped_counts > 0:
key = (pyspark_python, test_name)
assert SKIPPED_TESTS is not None
SKIPPED_TESTS[key] = skipped_tests
per_test_output.close()
except:
import traceback
print_red("\nGot an exception while trying to store "
"skipped test output:\n%s" % traceback.format_exc())
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
if skipped_counts != 0:
LOGGER.info(
"Finished test(%s): %s (%is) ... %s tests were skipped", pyspark_python, test_name,
duration, skipped_counts)
else:
LOGGER.info(
"Finished test(%s): %s (%is)", pyspark_python, test_name, duration)
def get_default_python_executables():
python_execs = [x for x in ["python3.6", "pypy3"] if which(x)]
if "python3.6" not in python_execs:
p = which("python3")
if not p:
LOGGER.error("No python3 executable found. Exiting!")
os._exit(1)
else:
python_execs.insert(0, p)
return python_execs
def parse_opts():
parser = ArgumentParser(
prog="run-tests"
)
parser.add_argument(
"--python-executables", type=str, default=','.join(get_default_python_executables()),
help="A comma-separated list of Python executables to test against (default: %(default)s)"
)
parser.add_argument(
"--modules", type=str,
default=",".join(sorted(python_modules.keys())),
help="A comma-separated list of Python modules to test (default: %(default)s)"
)
parser.add_argument(
"-p", "--parallelism", type=int, default=4,
help="The number of suites to test in parallel (default %(default)d)"
)
parser.add_argument(
"--verbose", action="store_true",
help="Enable additional debug logging"
)
group = parser.add_argument_group("Developer Options")
group.add_argument(
"--testnames", type=str,
default=None,
help=(
"A comma-separated list of specific modules, classes and functions of doctest "
"or unittest to test. "
"For example, 'pyspark.sql.foo' to run the module as unittests or doctests, "
"'pyspark.sql.tests FooTests' to run the specific class of unittests, "
"'pyspark.sql.tests FooTests.test_foo' to run the specific unittest in the class. "
"'--modules' option is ignored if they are given.")
)
args, unknown = parser.parse_known_args()
if unknown:
parser.error("Unsupported arguments: %s" % ' '.join(unknown))
if args.parallelism < 1:
parser.error("Parallelism cannot be less than 1")
return args
def _check_coverage(python_exec):
# Make sure if coverage is installed.
try:
subprocess_check_output(
[python_exec, "-c", "import coverage"],
stderr=open(os.devnull, 'w'))
except:
print_red("Coverage is not installed in Python executable '%s' "
"but 'COVERAGE_PROCESS_START' environment variable is set, "
"exiting." % python_exec)
sys.exit(-1)
def main():
opts = parse_opts()
if opts.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
should_test_modules = opts.testnames is None
logging.basicConfig(stream=sys.stdout, level=log_level, format="%(message)s")
LOGGER.info("Running PySpark tests. Output is in %s", LOG_FILE)
if os.path.exists(LOG_FILE):
os.remove(LOG_FILE)
python_execs = opts.python_executables.split(',')
LOGGER.info("Will test against the following Python executables: %s", python_execs)
if should_test_modules:
modules_to_test = []
for module_name in opts.modules.split(','):
if module_name in python_modules:
modules_to_test.append(python_modules[module_name])
else:
print("Error: unrecognized module '%s'. Supported modules: %s" %
(module_name, ", ".join(python_modules)))
sys.exit(-1)
LOGGER.info("Will test the following Python modules: %s", [x.name for x in modules_to_test])
else:
testnames_to_test = opts.testnames.split(',')
LOGGER.info("Will test the following Python tests: %s", testnames_to_test)
task_queue = Queue.PriorityQueue()
for python_exec in python_execs:
# Check if the python executable has coverage installed when 'COVERAGE_PROCESS_START'
# environmental variable is set.
if "COVERAGE_PROCESS_START" in os.environ:
_check_coverage(python_exec)
python_implementation = subprocess_check_output(
[python_exec, "-c", "import platform; print(platform.python_implementation())"],
universal_newlines=True).strip()
LOGGER.info("%s python_implementation is %s", python_exec, python_implementation)
LOGGER.info("%s version is: %s", python_exec, subprocess_check_output(
[python_exec, "--version"], stderr=subprocess.STDOUT, universal_newlines=True).strip())
if should_test_modules:
for module in modules_to_test:
if python_implementation not in module.excluded_python_implementations:
for test_goal in module.python_test_goals:
heavy_tests = ['pyspark.streaming.tests', 'pyspark.mllib.tests',
'pyspark.tests', 'pyspark.sql.tests', 'pyspark.ml.tests',
'pyspark.pandas.tests']
if any(map(lambda prefix: test_goal.startswith(prefix), heavy_tests)):
priority = 0
else:
priority = 100
task_queue.put((priority, (python_exec, test_goal)))
else:
for test_goal in testnames_to_test:
task_queue.put((0, (python_exec, test_goal)))
# Create the target directory before starting tasks to avoid races.
target_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'target'))
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
def process_queue(task_queue):
while True:
try:
(priority, (python_exec, test_goal)) = task_queue.get_nowait()
except Queue.Empty:
break
try:
run_individual_python_test(target_dir, test_goal, python_exec)
finally:
task_queue.task_done()
start_time = time.time()
for _ in range(opts.parallelism):
worker = Thread(target=process_queue, args=(task_queue,))
worker.daemon = True
worker.start()
try:
task_queue.join()
except (KeyboardInterrupt, SystemExit):
print_red("Exiting due to interrupt")
sys.exit(-1)
total_duration = time.time() - start_time
LOGGER.info("Tests passed in %i seconds", total_duration)
for key, lines in sorted(SKIPPED_TESTS.items()):
pyspark_python, test_name = key
LOGGER.info("\nSkipped tests in %s with %s:" % (test_name, pyspark_python))
for line in lines:
LOGGER.info(" %s" % line.rstrip())
if __name__ == "__main__":
SKIPPED_TESTS = Manager().dict()
main()
| apache-2.0 |
billy-inn/scikit-learn | examples/mixture/plot_gmm.py | 248 | 2817 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
gmm = mixture.GMM(n_components=5, covariance_type='full')
gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([(gmm, 'GMM'),
(dpgmm, 'Dirichlet Process GMM')]):
splot = plt.subplot(2, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.show()
| bsd-3-clause |
hetaodie/hetaodie.github.io | assets/media/uda-ml/fjd/ccjl/DBSCAN 解决/dbscan_lab_helper.py | 1 | 2801 | import matplotlib.pyplot as plt
import numpy as np
from itertools import cycle, islice
from sklearn import cluster
figsize = (10,10)
point_size=150
point_border=0.8
def plot_dataset(dataset, xlim=(-15, 15), ylim=(-15, 15)):
plt.figure(figsize=figsize)
plt.scatter(dataset[:,0], dataset[:,1], s=point_size, color="#00B3E9", edgecolor='black', lw=point_border)
plt.xlim(xlim)
plt.ylim(ylim)
plt.show()
def plot_clustered_dataset(dataset, y_pred, xlim=(-15, 15), ylim=(-15, 15), neighborhood=False, epsilon=0.5):
fig, ax = plt.subplots(figsize=figsize)
colors = np.array(list(islice(cycle(['#df8efd', '#78c465', '#ff8e34',
'#f65e97', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']),
int(max(y_pred) + 1))))
colors = np.append(colors, '#BECBD6')
if neighborhood:
for point in dataset:
circle1 = plt.Circle(point, epsilon, color='#666666', fill=False, zorder=0, alpha=0.3)
ax.add_artist(circle1)
ax.scatter(dataset[:, 0], dataset[:, 1], s=point_size, color=colors[y_pred], zorder=10, edgecolor='black', lw=point_border)
plt.xlim(xlim)
plt.ylim(ylim)
plt.show()
def plot_dbscan_grid(dataset, eps_values, min_samples_values):
fig = plt.figure(figsize=(16, 20))
plt.subplots_adjust(left=.02, right=.98, bottom=0.001, top=.96, wspace=.05,
hspace=0.25)
plot_num = 1
for i, min_samples in enumerate(min_samples_values):
for j, eps in enumerate(eps_values):
ax = fig.add_subplot( len(min_samples_values) , len(eps_values), plot_num)
dbscan = cluster.DBSCAN(eps=eps, min_samples=min_samples)
y_pred_2 = dbscan.fit_predict(dataset)
colors = np.array(list(islice(cycle(['#df8efd', '#78c465', '#ff8e34',
'#f65e97', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']),
int(max(y_pred_2) + 1))))
colors = np.append(colors, '#BECBD6')
for point in dataset:
circle1 = plt.Circle(point, eps, color='#666666', fill=False, zorder=0, alpha=0.3)
ax.add_artist(circle1)
ax.text(0, -0.03, 'Epsilon: {} \nMin_samples: {}'.format(eps, min_samples), transform=ax.transAxes, fontsize=16, va='top')
ax.scatter(dataset[:, 0], dataset[:, 1], s=50, color=colors[y_pred_2], zorder=10, edgecolor='black', lw=0.5)
plt.xticks(())
plt.yticks(())
plt.xlim(-14, 5)
plt.ylim(-12, 7)
plot_num = plot_num + 1
plt.show() | mit |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/matplotlib/tri/triinterpolate.py | 10 | 66366 | """
Interpolation inside triangular grids.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
from matplotlib.tri import Triangulation
from matplotlib.tri.trifinder import TriFinder
from matplotlib.tri.tritools import TriAnalyzer
import numpy as np
import warnings
__all__ = ('TriInterpolator', 'LinearTriInterpolator', 'CubicTriInterpolator')
class TriInterpolator(object):
"""
Abstract base class for classes used to perform interpolation on
triangular grids.
Derived classes implement the following methods:
- ``__call__(x, y)`` ,
where x, y are array_like point coordinates of the same shape, and
that returns a masked array of the same shape containing the
interpolated z-values.
- ``gradient(x, y)`` ,
where x, y are array_like point coordinates of the same
shape, and that returns a list of 2 masked arrays of the same shape
containing the 2 derivatives of the interpolator (derivatives of
interpolated z values with respect to x and y).
"""
def __init__(self, triangulation, z, trifinder=None):
if not isinstance(triangulation, Triangulation):
raise ValueError("Expected a Triangulation object")
self._triangulation = triangulation
self._z = np.asarray(z)
if self._z.shape != self._triangulation.x.shape:
raise ValueError("z array must have same length as triangulation x"
" and y arrays")
if trifinder is not None and not isinstance(trifinder, TriFinder):
raise ValueError("Expected a TriFinder object")
self._trifinder = trifinder or self._triangulation.get_trifinder()
# Default scaling factors : 1.0 (= no scaling)
# Scaling may be used for interpolations for which the order of
# magnitude of x, y has an impact on the interpolant definition.
# Please refer to :meth:`_interpolate_multikeys` for details.
self._unit_x = 1.0
self._unit_y = 1.0
# Default triangle renumbering: None (= no renumbering)
# Renumbering may be used to avoid unecessary computations
# if complex calculations are done inside the Interpolator.
# Please refer to :meth:`_interpolate_multikeys` for details.
self._tri_renum = None
# __call__ and gradient docstrings are shared by all subclasses
# (except, if needed, relevant additions).
# However these methods are only implemented in subclasses to avoid
# confusion in the documentation.
docstring__call__ = """
Returns a masked array containing interpolated values at the specified
x,y points.
Parameters
----------
x, y : array-like
x and y coordinates of the same shape and any number of
dimensions.
Returns
-------
z : np.ma.array
Masked array of the same shape as *x* and *y* ; values
corresponding to (*x*, *y*) points outside of the triangulation
are masked out.
"""
docstringgradient = """
Returns a list of 2 masked arrays containing interpolated derivatives
at the specified x,y points.
Parameters
----------
x, y : array-like
x and y coordinates of the same shape and any number of
dimensions.
Returns
-------
dzdx, dzdy : np.ma.array
2 masked arrays of the same shape as *x* and *y* ; values
corresponding to (x,y) points outside of the triangulation
are masked out.
The first returned array contains the values of
:math:`\\frac{\\partial z}{\\partial x}` and the second those of
:math:`\\frac{\\partial z}{\\partial y}`.
"""
def _interpolate_multikeys(self, x, y, tri_index=None,
return_keys=('z',)):
"""
Versatile (private) method defined for all TriInterpolators.
:meth:`_interpolate_multikeys` is a wrapper around method
:meth:`_interpolate_single_key` (to be defined in the child
subclasses).
:meth:`_interpolate_single_key actually performs the interpolation,
but only for 1-dimensional inputs and at valid locations (inside
unmasked triangles of the triangulation).
The purpose of :meth:`_interpolate_multikeys` is to implement the
following common tasks needed in all subclasses implementations:
- calculation of containing triangles
- dealing with more than one interpolation request at the same
location (e.g., if the 2 derivatives are requested, it is
unnecessary to compute the containing triangles twice)
- scaling according to self._unit_x, self._unit_y
- dealing with points outside of the grid (with fill value np.nan)
- dealing with multi-dimensionnal *x*, *y* arrays: flattening for
:meth:`_interpolate_params` call and final reshaping.
(Note that np.vectorize could do most of those things very well for
you, but it does it by function evaluations over successive tuples of
the input arrays. Therefore, this tends to be more time consuming than
using optimized numpy functions - e.g., np.dot - which can be used
easily on the flattened inputs, in the child-subclass methods
:meth:`_interpolate_single_key`.)
It is guaranteed that the calls to :meth:`_interpolate_single_key`
will be done with flattened (1-d) array_like input parameters `x`, `y`
and with flattened, valid `tri_index` arrays (no -1 index allowed).
Parameters
----------
x, y : array_like
x and y coordinates indicating where interpolated values are
requested.
tri_index : integer array_like, optional
Array of the containing triangle indices, same shape as
*x* and *y*. Defaults to None. If None, these indices
will be computed by a TriFinder instance.
(Note: For point outside the grid, tri_index[ipt] shall be -1).
return_keys : tuple of keys from {'z', 'dzdx', 'dzdy'}
Defines the interpolation arrays to return, and in which order.
Returns
-------
ret : list of arrays
Each array-like contains the expected interpolated values in the
order defined by *return_keys* parameter.
"""
# Flattening and rescaling inputs arrays x, y
# (initial shape is stored for output)
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
sh_ret = x.shape
if (x.shape != y.shape):
raise ValueError("x and y shall have same shapes."
" Given: {0} and {1}".format(x.shape, y.shape))
x = np.ravel(x)
y = np.ravel(y)
x_scaled = x/self._unit_x
y_scaled = y/self._unit_y
size_ret = np.size(x_scaled)
# Computes & ravels the element indexes, extract the valid ones.
if tri_index is None:
tri_index = self._trifinder(x, y)
else:
if (tri_index.shape != sh_ret):
raise ValueError(
"tri_index array is provided and shall"
" have same shape as x and y. Given: "
"{0} and {1}".format(tri_index.shape, sh_ret))
tri_index = np.ravel(tri_index)
mask_in = (tri_index != -1)
if self._tri_renum is None:
valid_tri_index = tri_index[mask_in]
else:
valid_tri_index = self._tri_renum[tri_index[mask_in]]
valid_x = x_scaled[mask_in]
valid_y = y_scaled[mask_in]
ret = []
for return_key in return_keys:
# Find the return index associated with the key.
try:
return_index = {'z': 0, 'dzdx': 1, 'dzdy': 2}[return_key]
except KeyError:
raise ValueError("return_keys items shall take values in"
" {'z', 'dzdx', 'dzdy'}")
# Sets the scale factor for f & df components
scale = [1., 1./self._unit_x, 1./self._unit_y][return_index]
# Computes the interpolation
ret_loc = np.empty(size_ret, dtype=np.float64)
ret_loc[~mask_in] = np.nan
ret_loc[mask_in] = self._interpolate_single_key(
return_key, valid_tri_index, valid_x, valid_y) * scale
ret += [np.ma.masked_invalid(ret_loc.reshape(sh_ret), copy=False)]
return ret
def _interpolate_single_key(self, return_key, tri_index, x, y):
"""
Performs the interpolation at points belonging to the triangulation
(inside an unmasked triangles).
Parameters
----------
return_index : string key from {'z', 'dzdx', 'dzdy'}
Identifies the requested values (z or its derivatives)
tri_index : 1d integer array
Valid triangle index (-1 prohibited)
x, y : 1d arrays, same shape as `tri_index`
Valid locations where interpolation is requested.
Returns
-------
ret : 1-d array
Returned array of the same size as *tri_index*
"""
raise NotImplementedError("TriInterpolator subclasses" +
"should implement _interpolate_single_key!")
class LinearTriInterpolator(TriInterpolator):
"""
A LinearTriInterpolator performs linear interpolation on a triangular grid.
Each triangle is represented by a plane so that an interpolated value at
point (x,y) lies on the plane of the triangle containing (x,y).
Interpolated values are therefore continuous across the triangulation, but
their first derivatives are discontinuous at edges between triangles.
Parameters
----------
triangulation : :class:`~matplotlib.tri.Triangulation` object
The triangulation to interpolate over.
z : array_like of shape (npoints,)
Array of values, defined at grid points, to interpolate between.
trifinder : :class:`~matplotlib.tri.TriFinder` object, optional
If this is not specified, the Triangulation's default TriFinder will
be used by calling
:func:`matplotlib.tri.Triangulation.get_trifinder`.
Methods
-------
`__call__` (x, y) : Returns interpolated values at x,y points
`gradient` (x, y) : Returns interpolated derivatives at x,y points
"""
def __init__(self, triangulation, z, trifinder=None):
TriInterpolator.__init__(self, triangulation, z, trifinder)
# Store plane coefficients for fast interpolation calculations.
self._plane_coefficients = \
self._triangulation.calculate_plane_coefficients(self._z)
def __call__(self, x, y):
return self._interpolate_multikeys(x, y, tri_index=None,
return_keys=('z',))[0]
__call__.__doc__ = TriInterpolator.docstring__call__
def gradient(self, x, y):
return self._interpolate_multikeys(x, y, tri_index=None,
return_keys=('dzdx', 'dzdy'))
gradient.__doc__ = TriInterpolator.docstringgradient
def _interpolate_single_key(self, return_key, tri_index, x, y):
if return_key == 'z':
return (self._plane_coefficients[tri_index, 0]*x +
self._plane_coefficients[tri_index, 1]*y +
self._plane_coefficients[tri_index, 2])
elif return_key == 'dzdx':
return self._plane_coefficients[tri_index, 0]
elif return_key == 'dzdy':
return self._plane_coefficients[tri_index, 1]
else:
raise ValueError("Invalid return_key: " + return_key)
class CubicTriInterpolator(TriInterpolator):
"""
A CubicTriInterpolator performs cubic interpolation on triangular grids.
In one-dimension - on a segment - a cubic interpolating function is
defined by the values of the function and its derivative at both ends.
This is almost the same in 2-d inside a triangle, except that the values
of the function and its 2 derivatives have to be defined at each triangle
node.
The CubicTriInterpolator takes the value of the function at each node -
provided by the user - and internally computes the value of the
derivatives, resulting in a smooth interpolation.
(As a special feature, the user can also impose the value of the
derivatives at each node, but this is not supposed to be the common
usage.)
Parameters
----------
triangulation : :class:`~matplotlib.tri.Triangulation` object
The triangulation to interpolate over.
z : array_like of shape (npoints,)
Array of values, defined at grid points, to interpolate between.
kind : {'min_E', 'geom', 'user'}, optional
Choice of the smoothing algorithm, in order to compute
the interpolant derivatives (defaults to 'min_E'):
- if 'min_E': (default) The derivatives at each node is computed
to minimize a bending energy.
- if 'geom': The derivatives at each node is computed as a
weighted average of relevant triangle normals. To be used for
speed optimization (large grids).
- if 'user': The user provides the argument `dz`, no computation
is hence needed.
trifinder : :class:`~matplotlib.tri.TriFinder` object, optional
If not specified, the Triangulation's default TriFinder will
be used by calling
:func:`matplotlib.tri.Triangulation.get_trifinder`.
dz : tuple of array_likes (dzdx, dzdy), optional
Used only if *kind* ='user'. In this case *dz* must be provided as
(dzdx, dzdy) where dzdx, dzdy are arrays of the same shape as *z* and
are the interpolant first derivatives at the *triangulation* points.
Methods
-------
`__call__` (x, y) : Returns interpolated values at x,y points
`gradient` (x, y) : Returns interpolated derivatives at x,y points
Notes
-----
This note is a bit technical and details the way a
:class:`~matplotlib.tri.CubicTriInterpolator` computes a cubic
interpolation.
The interpolation is based on a Clough-Tocher subdivision scheme of
the *triangulation* mesh (to make it clearer, each triangle of the
grid will be divided in 3 child-triangles, and on each child triangle
the interpolated function is a cubic polynomial of the 2 coordinates).
This technique originates from FEM (Finite Element Method) analysis;
the element used is a reduced Hsieh-Clough-Tocher (HCT)
element. Its shape functions are described in [1]_.
The assembled function is guaranteed to be C1-smooth, i.e. it is
continuous and its first derivatives are also continuous (this
is easy to show inside the triangles but is also true when crossing the
edges).
In the default case (*kind* ='min_E'), the interpolant minimizes a
curvature energy on the functional space generated by the HCT element
shape functions - with imposed values but arbitrary derivatives at each
node. The minimized functional is the integral of the so-called total
curvature (implementation based on an algorithm from [2]_ - PCG sparse
solver):
.. math::
E(z) = \\ \\frac{1}{2} \\int_{\\Omega} \\left(
\\left( \\frac{\\partial^2{z}}{\\partial{x}^2} \\right)^2 +
\\left( \\frac{\\partial^2{z}}{\\partial{y}^2} \\right)^2 +
2\\left( \\frac{\\partial^2{z}}{\\partial{y}\\partial{x}}
\\right)^2 \\right) dx\\,dy
If the case *kind* ='geom' is chosen by the user, a simple geometric
approximation is used (weighted average of the triangle normal
vectors), which could improve speed on very large grids.
References
----------
.. [1] Michel Bernadou, Kamal Hassan, "Basis functions for general
Hsieh-Clough-Tocher triangles, complete or reduced.",
International Journal for Numerical Methods in Engineering,
17(5):784 - 789. 2.01.
.. [2] C.T. Kelley, "Iterative Methods for Optimization".
"""
def __init__(self, triangulation, z, kind='min_E', trifinder=None,
dz=None):
TriInterpolator.__init__(self, triangulation, z, trifinder)
# Loads the underlying c++ _triangulation.
# (During loading, reordering of triangulation._triangles may occur so
# that all final triangles are now anti-clockwise)
self._triangulation.get_cpp_triangulation()
# To build the stiffness matrix and avoid zero-energy spurious modes
# we will only store internally the valid (unmasked) triangles and
# the necessary (used) points coordinates.
# 2 renumbering tables need to be computed and stored:
# - a triangle renum table in order to translate the result from a
# TriFinder instance into the internal stored triangle number.
# - a node renum table to overwrite the self._z values into the new
# (used) node numbering.
tri_analyzer = TriAnalyzer(self._triangulation)
(compressed_triangles, compressed_x, compressed_y, tri_renum,
node_renum) = tri_analyzer._get_compressed_triangulation(True, True)
self._triangles = compressed_triangles
self._tri_renum = tri_renum
# Taking into account the node renumbering in self._z:
node_mask = (node_renum == -1)
self._z[node_renum[~node_mask]] = self._z
self._z = self._z[~node_mask]
# Computing scale factors
self._unit_x = np.max(compressed_x) - np.min(compressed_x)
self._unit_y = np.max(compressed_y) - np.min(compressed_y)
self._pts = np.vstack((compressed_x/float(self._unit_x),
compressed_y/float(self._unit_y))).T
# Computing triangle points
self._tris_pts = self._pts[self._triangles]
# Computing eccentricities
self._eccs = self._compute_tri_eccentricities(self._tris_pts)
# Computing dof estimations for HCT triangle shape function
self._dof = self._compute_dof(kind, dz=dz)
# Loading HCT element
self._ReferenceElement = _ReducedHCT_Element()
def __call__(self, x, y):
return self._interpolate_multikeys(x, y, tri_index=None,
return_keys=('z',))[0]
__call__.__doc__ = TriInterpolator.docstring__call__
def gradient(self, x, y):
return self._interpolate_multikeys(x, y, tri_index=None,
return_keys=('dzdx', 'dzdy'))
gradient.__doc__ = TriInterpolator.docstringgradient + """
Examples
--------
An example of effective application is shown below (plot of the
direction of the vector field derivated from a known potential field):
.. plot:: mpl_examples/pylab_examples/trigradient_demo.py
"""
def _interpolate_single_key(self, return_key, tri_index, x, y):
tris_pts = self._tris_pts[tri_index]
alpha = self._get_alpha_vec(x, y, tris_pts)
ecc = self._eccs[tri_index]
dof = np.expand_dims(self._dof[tri_index], axis=1)
if return_key == 'z':
return self._ReferenceElement.get_function_values(
alpha, ecc, dof)
elif return_key in ['dzdx', 'dzdy']:
J = self._get_jacobian(tris_pts)
dzdx = self._ReferenceElement.get_function_derivatives(
alpha, J, ecc, dof)
if return_key == 'dzdx':
return dzdx[:, 0, 0]
else:
return dzdx[:, 1, 0]
else:
raise ValueError("Invalid return_key: " + return_key)
def _compute_dof(self, kind, dz=None):
"""
Computes and returns nodal dofs according to kind
Parameters
----------
kind: {'min_E', 'geom', 'user'}
Choice of the _DOF_estimator subclass to perform the gradient
estimation.
dz: tuple of array_likes (dzdx, dzdy), optional
Used only if *kind=user ; in this case passed to the
:class:`_DOF_estimator_user`.
Returns
-------
dof : array_like, shape (npts,2)
Estimation of the gradient at triangulation nodes (stored as
degree of freedoms of reduced-HCT triangle elements).
"""
if kind == 'user':
if dz is None:
raise ValueError("For a CubicTriInterpolator with "
"*kind*='user', a valid *dz* "
"argument is expected.")
TE = _DOF_estimator_user(self, dz=dz)
elif kind == 'geom':
TE = _DOF_estimator_geom(self)
elif kind == 'min_E':
TE = _DOF_estimator_min_E(self)
else:
raise ValueError("CubicTriInterpolator *kind* proposed: {0} ; "
"should be one of: "
"'user', 'geom', 'min_E'".format(kind))
return TE.compute_dof_from_df()
@staticmethod
def _get_alpha_vec(x, y, tris_pts):
"""
Fast (vectorized) function to compute barycentric coordinates alpha.
Parameters
----------
x, y : array-like of dim 1 (shape (nx,))
Coordinates of the points whose points barycentric
coordinates are requested
tris_pts : array like of dim 3 (shape: (nx,3,2))
Coordinates of the containing triangles apexes.
Returns
-------
alpha : array of dim 2 (shape (nx,3))
Barycentric coordinates of the points inside the containing
triangles.
"""
ndim = tris_pts.ndim-2
a = tris_pts[:, 1, :] - tris_pts[:, 0, :]
b = tris_pts[:, 2, :] - tris_pts[:, 0, :]
abT = np.concatenate([np.expand_dims(a, ndim+1),
np.expand_dims(b, ndim+1)], ndim+1)
ab = _transpose_vectorized(abT)
x = np.expand_dims(x, ndim)
y = np.expand_dims(y, ndim)
OM = np.concatenate([x, y], ndim) - tris_pts[:, 0, :]
metric = _prod_vectorized(ab, abT)
# Here we try to deal with the colinear cases.
# metric_inv is in this case set to the Moore-Penrose pseudo-inverse
# meaning that we will still return a set of valid barycentric
# coordinates.
metric_inv = _pseudo_inv22sym_vectorized(metric)
Covar = _prod_vectorized(ab, _transpose_vectorized(
np.expand_dims(OM, ndim)))
ksi = _prod_vectorized(metric_inv, Covar)
alpha = _to_matrix_vectorized([
[1-ksi[:, 0, 0]-ksi[:, 1, 0]], [ksi[:, 0, 0]], [ksi[:, 1, 0]]])
return alpha
@staticmethod
def _get_jacobian(tris_pts):
"""
Fast (vectorized) function to compute triangle jacobian matrix.
Parameters
----------
tris_pts : array like of dim 3 (shape: (nx,3,2))
Coordinates of the containing triangles apexes.
Returns
-------
J : array of dim 3 (shape (nx,2,2))
Barycentric coordinates of the points inside the containing
triangles.
J[itri,:,:] is the jacobian matrix at apex 0 of the triangle
itri, so that the following (matrix) relationship holds:
[dz/dksi] = [J] x [dz/dx]
with x: global coordinates
ksi: element parametric coordinates in triangle first apex
local basis.
"""
a = np.array(tris_pts[:, 1, :] - tris_pts[:, 0, :])
b = np.array(tris_pts[:, 2, :] - tris_pts[:, 0, :])
J = _to_matrix_vectorized([[a[:, 0], a[:, 1]],
[b[:, 0], b[:, 1]]])
return J
@staticmethod
def _compute_tri_eccentricities(tris_pts):
"""
Computes triangle eccentricities
Parameters
----------
tris_pts : array like of dim 3 (shape: (nx,3,2))
Coordinates of the triangles apexes.
Returns
-------
ecc : array like of dim 2 (shape: (nx,3))
The so-called eccentricity parameters [1] needed for
HCT triangular element.
"""
a = np.expand_dims(tris_pts[:, 2, :]-tris_pts[:, 1, :], axis=2)
b = np.expand_dims(tris_pts[:, 0, :]-tris_pts[:, 2, :], axis=2)
c = np.expand_dims(tris_pts[:, 1, :]-tris_pts[:, 0, :], axis=2)
# Do not use np.squeeze, this is dangerous if only one triangle
# in the triangulation...
dot_a = _prod_vectorized(_transpose_vectorized(a), a)[:, 0, 0]
dot_b = _prod_vectorized(_transpose_vectorized(b), b)[:, 0, 0]
dot_c = _prod_vectorized(_transpose_vectorized(c), c)[:, 0, 0]
# Note that this line will raise a warning for dot_a, dot_b or dot_c
# zeros, but we choose not to support triangles with duplicate points.
return _to_matrix_vectorized([[(dot_c-dot_b) / dot_a],
[(dot_a-dot_c) / dot_b],
[(dot_b-dot_a) / dot_c]])
# FEM element used for interpolation and for solving minimisation
# problem (Reduced HCT element)
class _ReducedHCT_Element():
"""
Implementation of reduced HCT triangular element with explicit shape
functions.
Computes z, dz, d2z and the element stiffness matrix for bending energy:
E(f) = integral( (d2z/dx2 + d2z/dy2)**2 dA)
*** Reference for the shape functions: ***
[1] Basis functions for general Hsieh-Clough-Tocher _triangles, complete or
reduced.
Michel Bernadou, Kamal Hassan
International Journal for Numerical Methods in Engineering.
17(5):784 - 789. 2.01
*** Element description: ***
9 dofs: z and dz given at 3 apex
C1 (conform)
"""
# 1) Loads matrices to generate shape functions as a function of
# triangle eccentricities - based on [1] p.11 '''
M = np.array([
[ 0.00, 0.00, 0.00, 4.50, 4.50, 0.00, 0.00, 0.00, 0.00, 0.00],
[-0.25, 0.00, 0.00, 0.50, 1.25, 0.00, 0.00, 0.00, 0.00, 0.00],
[-0.25, 0.00, 0.00, 1.25, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.50, 1.00, 0.00, -1.50, 0.00, 3.00, 3.00, 0.00, 0.00, 3.00],
[ 0.00, 0.00, 0.00, -0.25, 0.25, 0.00, 1.00, 0.00, 0.00, 0.50],
[ 0.25, 0.00, 0.00, -0.50, -0.25, 1.00, 0.00, 0.00, 0.00, 1.00],
[ 0.50, 0.00, 1.00, 0.00, -1.50, 0.00, 0.00, 3.00, 3.00, 3.00],
[ 0.25, 0.00, 0.00, -0.25, -0.50, 0.00, 0.00, 0.00, 1.00, 1.00],
[ 0.00, 0.00, 0.00, 0.25, -0.25, 0.00, 0.00, 1.00, 0.00, 0.50]])
M0 = np.array([
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[-1.00, 0.00, 0.00, 1.50, 1.50, 0.00, 0.00, 0.00, 0.00, -3.00],
[-0.50, 0.00, 0.00, 0.75, 0.75, 0.00, 0.00, 0.00, 0.00, -1.50],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 1.00, 0.00, 0.00, -1.50, -1.50, 0.00, 0.00, 0.00, 0.00, 3.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.50, 0.00, 0.00, -0.75, -0.75, 0.00, 0.00, 0.00, 0.00, 1.50]])
M1 = np.array([
[-0.50, 0.00, 0.00, 1.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[-0.25, 0.00, 0.00, 0.75, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.50, 0.00, 0.00, -1.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.25, 0.00, 0.00, -0.75, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]])
M2 = np.array([
[ 0.50, 0.00, 0.00, 0.00, -1.50, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.25, 0.00, 0.00, 0.00, -0.75, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[-0.50, 0.00, 0.00, 0.00, 1.50, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[-0.25, 0.00, 0.00, 0.00, 0.75, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]])
# 2) Loads matrices to rotate components of gradient & Hessian
# vectors in the reference basis of triangle first apex (a0)
rotate_dV = np.array([[ 1., 0.], [ 0., 1.],
[ 0., 1.], [-1., -1.],
[-1., -1.], [ 1., 0.]])
rotate_d2V = np.array([[1., 0., 0.], [0., 1., 0.], [ 0., 0., 1.],
[0., 1., 0.], [1., 1., 1.], [ 0., -2., -1.],
[1., 1., 1.], [1., 0., 0.], [-2., 0., -1.]])
# 3) Loads Gauss points & weights on the 3 sub-_triangles for P2
# exact integral - 3 points on each subtriangles.
# NOTE: as the 2nd derivative is discontinuous , we really need those 9
# points!
n_gauss = 9
gauss_pts = np.array([[13./18., 4./18., 1./18.],
[ 4./18., 13./18., 1./18.],
[ 7./18., 7./18., 4./18.],
[ 1./18., 13./18., 4./18.],
[ 1./18., 4./18., 13./18.],
[ 4./18., 7./18., 7./18.],
[ 4./18., 1./18., 13./18.],
[13./18., 1./18., 4./18.],
[ 7./18., 4./18., 7./18.]], dtype=np.float64)
gauss_w = np.ones([9], dtype=np.float64) / 9.
# 4) Stiffness matrix for curvature energy
E = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 2.]])
# 5) Loads the matrix to compute DOF_rot from tri_J at apex 0
J0_to_J1 = np.array([[-1., 1.], [-1., 0.]])
J0_to_J2 = np.array([[ 0., -1.], [ 1., -1.]])
def get_function_values(self, alpha, ecc, dofs):
"""
Parameters
----------
alpha : is a (N x 3 x 1) array (array of column-matrices) of
barycentric coordinates,
ecc : is a (N x 3 x 1) array (array of column-matrices) of triangle
eccentricities,
dofs : is a (N x 1 x 9) arrays (arrays of row-matrices) of computed
degrees of freedom.
Returns
-------
Returns the N-array of interpolated function values.
"""
subtri = np.argmin(alpha, axis=1)[:, 0]
ksi = _roll_vectorized(alpha, -subtri, axis=0)
E = _roll_vectorized(ecc, -subtri, axis=0)
x = ksi[:, 0, 0]
y = ksi[:, 1, 0]
z = ksi[:, 2, 0]
x_sq = x*x
y_sq = y*y
z_sq = z*z
V = _to_matrix_vectorized([
[x_sq*x], [y_sq*y], [z_sq*z], [x_sq*z], [x_sq*y], [y_sq*x],
[y_sq*z], [z_sq*y], [z_sq*x], [x*y*z]])
prod = _prod_vectorized(self.M, V)
prod += _scalar_vectorized(E[:, 0, 0],
_prod_vectorized(self.M0, V))
prod += _scalar_vectorized(E[:, 1, 0],
_prod_vectorized(self.M1, V))
prod += _scalar_vectorized(E[:, 2, 0],
_prod_vectorized(self.M2, V))
s = _roll_vectorized(prod, 3*subtri, axis=0)
return _prod_vectorized(dofs, s)[:, 0, 0]
def get_function_derivatives(self, alpha, J, ecc, dofs):
"""
Parameters
----------
*alpha* is a (N x 3 x 1) array (array of column-matrices of
barycentric coordinates)
*J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
triangle first apex)
*ecc* is a (N x 3 x 1) array (array of column-matrices of triangle
eccentricities)
*dofs* is a (N x 1 x 9) arrays (arrays of row-matrices) of computed
degrees of freedom.
Returns
-------
Returns the values of interpolated function derivatives [dz/dx, dz/dy]
in global coordinates at locations alpha, as a column-matrices of
shape (N x 2 x 1).
"""
subtri = np.argmin(alpha, axis=1)[:, 0]
ksi = _roll_vectorized(alpha, -subtri, axis=0)
E = _roll_vectorized(ecc, -subtri, axis=0)
x = ksi[:, 0, 0]
y = ksi[:, 1, 0]
z = ksi[:, 2, 0]
x_sq = x*x
y_sq = y*y
z_sq = z*z
dV = _to_matrix_vectorized([
[ -3.*x_sq, -3.*x_sq],
[ 3.*y_sq, 0.],
[ 0., 3.*z_sq],
[ -2.*x*z, -2.*x*z+x_sq],
[-2.*x*y+x_sq, -2.*x*y],
[ 2.*x*y-y_sq, -y_sq],
[ 2.*y*z, y_sq],
[ z_sq, 2.*y*z],
[ -z_sq, 2.*x*z-z_sq],
[ x*z-y*z, x*y-y*z]])
# Puts back dV in first apex basis
dV = _prod_vectorized(dV, _extract_submatrices(
self.rotate_dV, subtri, block_size=2, axis=0))
prod = _prod_vectorized(self.M, dV)
prod += _scalar_vectorized(E[:, 0, 0],
_prod_vectorized(self.M0, dV))
prod += _scalar_vectorized(E[:, 1, 0],
_prod_vectorized(self.M1, dV))
prod += _scalar_vectorized(E[:, 2, 0],
_prod_vectorized(self.M2, dV))
dsdksi = _roll_vectorized(prod, 3*subtri, axis=0)
dfdksi = _prod_vectorized(dofs, dsdksi)
# In global coordinates:
# Here we try to deal with the simpliest colinear cases, returning a
# null matrix.
J_inv = _safe_inv22_vectorized(J)
dfdx = _prod_vectorized(J_inv, _transpose_vectorized(dfdksi))
return dfdx
def get_function_hessians(self, alpha, J, ecc, dofs):
"""
Parameters
----------
*alpha* is a (N x 3 x 1) array (array of column-matrices) of
barycentric coordinates
*J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
triangle first apex)
*ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
eccentricities
*dofs* is a (N x 1 x 9) arrays (arrays of row-matrices) of computed
degrees of freedom.
Returns
-------
Returns the values of interpolated function 2nd-derivatives
[d2z/dx2, d2z/dy2, d2z/dxdy] in global coordinates at locations alpha,
as a column-matrices of shape (N x 3 x 1).
"""
d2sdksi2 = self.get_d2Sidksij2(alpha, ecc)
d2fdksi2 = _prod_vectorized(dofs, d2sdksi2)
H_rot = self.get_Hrot_from_J(J)
d2fdx2 = _prod_vectorized(d2fdksi2, H_rot)
return _transpose_vectorized(d2fdx2)
def get_d2Sidksij2(self, alpha, ecc):
"""
Parameters
----------
*alpha* is a (N x 3 x 1) array (array of column-matrices) of
barycentric coordinates
*ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
eccentricities
Returns
-------
Returns the arrays d2sdksi2 (N x 3 x 1) Hessian of shape functions
expressed in covariante coordinates in first apex basis.
"""
subtri = np.argmin(alpha, axis=1)[:, 0]
ksi = _roll_vectorized(alpha, -subtri, axis=0)
E = _roll_vectorized(ecc, -subtri, axis=0)
x = ksi[:, 0, 0]
y = ksi[:, 1, 0]
z = ksi[:, 2, 0]
d2V = _to_matrix_vectorized([
[ 6.*x, 6.*x, 6.*x],
[ 6.*y, 0., 0.],
[ 0., 6.*z, 0.],
[ 2.*z, 2.*z-4.*x, 2.*z-2.*x],
[2.*y-4.*x, 2.*y, 2.*y-2.*x],
[2.*x-4.*y, 0., -2.*y],
[ 2.*z, 0., 2.*y],
[ 0., 2.*y, 2.*z],
[ 0., 2.*x-4.*z, -2.*z],
[ -2.*z, -2.*y, x-y-z]])
# Puts back d2V in first apex basis
d2V = _prod_vectorized(d2V, _extract_submatrices(
self.rotate_d2V, subtri, block_size=3, axis=0))
prod = _prod_vectorized(self.M, d2V)
prod += _scalar_vectorized(E[:, 0, 0],
_prod_vectorized(self.M0, d2V))
prod += _scalar_vectorized(E[:, 1, 0],
_prod_vectorized(self.M1, d2V))
prod += _scalar_vectorized(E[:, 2, 0],
_prod_vectorized(self.M2, d2V))
d2sdksi2 = _roll_vectorized(prod, 3*subtri, axis=0)
return d2sdksi2
def get_bending_matrices(self, J, ecc):
"""
Parameters
----------
*J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
triangle first apex)
*ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
eccentricities
Returns
-------
Returns the element K matrices for bending energy expressed in
GLOBAL nodal coordinates.
K_ij = integral [ (d2zi/dx2 + d2zi/dy2) * (d2zj/dx2 + d2zj/dy2) dA]
tri_J is needed to rotate dofs from local basis to global basis
"""
n = np.size(ecc, 0)
# 1) matrix to rotate dofs in global coordinates
J1 = _prod_vectorized(self.J0_to_J1, J)
J2 = _prod_vectorized(self.J0_to_J2, J)
DOF_rot = np.zeros([n, 9, 9], dtype=np.float64)
DOF_rot[:, 0, 0] = 1
DOF_rot[:, 3, 3] = 1
DOF_rot[:, 6, 6] = 1
DOF_rot[:, 1:3, 1:3] = J
DOF_rot[:, 4:6, 4:6] = J1
DOF_rot[:, 7:9, 7:9] = J2
# 2) matrix to rotate Hessian in global coordinates.
H_rot, area = self.get_Hrot_from_J(J, return_area=True)
# 3) Computes stiffness matrix
# Gauss quadrature.
K = np.zeros([n, 9, 9], dtype=np.float64)
weights = self.gauss_w
pts = self.gauss_pts
for igauss in range(self.n_gauss):
alpha = np.tile(pts[igauss, :], n).reshape(n, 3)
alpha = np.expand_dims(alpha, 3)
weight = weights[igauss]
d2Skdksi2 = self.get_d2Sidksij2(alpha, ecc)
d2Skdx2 = _prod_vectorized(d2Skdksi2, H_rot)
K += weight * _prod_vectorized(_prod_vectorized(d2Skdx2, self.E),
_transpose_vectorized(d2Skdx2))
# 4) With nodal (not elem) dofs
K = _prod_vectorized(_prod_vectorized(_transpose_vectorized(DOF_rot),
K), DOF_rot)
# 5) Need the area to compute total element energy
return _scalar_vectorized(area, K)
def get_Hrot_from_J(self, J, return_area=False):
"""
Parameters
----------
*J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
triangle first apex)
Returns
-------
Returns H_rot used to rotate Hessian from local basis of first apex,
to global coordinates.
if *return_area* is True, returns also the triangle area (0.5*det(J))
"""
# Here we try to deal with the simpliest colinear cases ; a null
# energy and area is imposed.
J_inv = _safe_inv22_vectorized(J)
Ji00 = J_inv[:, 0, 0]
Ji11 = J_inv[:, 1, 1]
Ji10 = J_inv[:, 1, 0]
Ji01 = J_inv[:, 0, 1]
H_rot = _to_matrix_vectorized([
[Ji00*Ji00, Ji10*Ji10, Ji00*Ji10],
[Ji01*Ji01, Ji11*Ji11, Ji01*Ji11],
[2*Ji00*Ji01, 2*Ji11*Ji10, Ji00*Ji11+Ji10*Ji01]])
if not return_area:
return H_rot
else:
area = 0.5 * (J[:, 0, 0]*J[:, 1, 1] - J[:, 0, 1]*J[:, 1, 0])
return H_rot, area
def get_Kff_and_Ff(self, J, ecc, triangles, Uc):
"""
Builds K and F for the following elliptic formulation:
minimization of curvature energy with value of function at node
imposed and derivatives 'free'.
Builds the global Kff matrix in cco format.
Builds the full Ff vec Ff = - Kfc x Uc
Parameters
----------
*J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
triangle first apex)
*ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
eccentricities
*triangles* is a (N x 3) array of nodes indexes.
*Uc* is (N x 3) array of imposed displacements at nodes
Returns
-------
(Kff_rows, Kff_cols, Kff_vals) Kff matrix in coo format - Duplicate
(row, col) entries must be summed.
Ff: force vector - dim npts * 3
"""
ntri = np.size(ecc, 0)
vec_range = np.arange(ntri, dtype=np.int32)
c_indices = -np.ones(ntri, dtype=np.int32) # for unused dofs, -1
f_dof = [1, 2, 4, 5, 7, 8]
c_dof = [0, 3, 6]
# vals, rows and cols indices in global dof numbering
f_dof_indices = _to_matrix_vectorized([[
c_indices, triangles[:, 0]*2, triangles[:, 0]*2+1,
c_indices, triangles[:, 1]*2, triangles[:, 1]*2+1,
c_indices, triangles[:, 2]*2, triangles[:, 2]*2+1]])
expand_indices = np.ones([ntri, 9, 1], dtype=np.int32)
f_row_indices = _prod_vectorized(_transpose_vectorized(f_dof_indices),
_transpose_vectorized(expand_indices))
f_col_indices = _prod_vectorized(expand_indices, f_dof_indices)
K_elem = self.get_bending_matrices(J, ecc)
# Extracting sub-matrices
# Explanation & notations:
# * Subscript f denotes 'free' degrees of freedom (i.e. dz/dx, dz/dx)
# * Subscript c denotes 'condensated' (imposed) degrees of freedom
# (i.e. z at all nodes)
# * F = [Ff, Fc] is the force vector
# * U = [Uf, Uc] is the imposed dof vector
# [ Kff Kfc ]
# * K = [ ] is the laplacian stiffness matrix
# [ Kcf Kff ]
# * As F = K x U one gets straightforwardly: Ff = - Kfc x Uc
# Computing Kff stiffness matrix in sparse coo format
Kff_vals = np.ravel(K_elem[np.ix_(vec_range, f_dof, f_dof)])
Kff_rows = np.ravel(f_row_indices[np.ix_(vec_range, f_dof, f_dof)])
Kff_cols = np.ravel(f_col_indices[np.ix_(vec_range, f_dof, f_dof)])
# Computing Ff force vector in sparse coo format
Kfc_elem = K_elem[np.ix_(vec_range, f_dof, c_dof)]
Uc_elem = np.expand_dims(Uc, axis=2)
Ff_elem = - _prod_vectorized(Kfc_elem, Uc_elem)[:, :, 0]
Ff_indices = f_dof_indices[np.ix_(vec_range, [0], f_dof)][:, 0, :]
# Extracting Ff force vector in dense format
# We have to sum duplicate indices - using bincount
Ff = np.bincount(np.ravel(Ff_indices), weights=np.ravel(Ff_elem))
return Kff_rows, Kff_cols, Kff_vals, Ff
# :class:_DOF_estimator, _DOF_estimator_user, _DOF_estimator_geom,
# _DOF_estimator_min_E
# Private classes used to compute the degree of freedom of each triangular
# element for the TriCubicInterpolator.
class _DOF_estimator():
"""
Abstract base class for classes used to perform estimation of a function
first derivatives, and deduce the dofs for a CubicTriInterpolator using a
reduced HCT element formulation.
Derived classes implement compute_df(self,**kwargs), returning
np.vstack([dfx,dfy]).T where : dfx, dfy are the estimation of the 2
gradient coordinates.
"""
def __init__(self, interpolator, **kwargs):
if not isinstance(interpolator, CubicTriInterpolator):
raise ValueError("Expected a CubicTriInterpolator object")
self._pts = interpolator._pts
self._tris_pts = interpolator._tris_pts
self.z = interpolator._z
self._triangles = interpolator._triangles
(self._unit_x, self._unit_y) = (interpolator._unit_x,
interpolator._unit_y)
self.dz = self.compute_dz(**kwargs)
self.compute_dof_from_df()
def compute_dz(self, **kwargs):
raise NotImplementedError
def compute_dof_from_df(self):
"""
Computes reduced-HCT elements degrees of freedom, knowing the
gradient.
"""
J = CubicTriInterpolator._get_jacobian(self._tris_pts)
tri_z = self.z[self._triangles]
tri_dz = self.dz[self._triangles]
tri_dof = self.get_dof_vec(tri_z, tri_dz, J)
return tri_dof
@staticmethod
def get_dof_vec(tri_z, tri_dz, J):
"""
Computes the dof vector of a triangle, knowing the value of f, df and
of the local Jacobian at each node.
*tri_z*: array of shape (3,) of f nodal values
*tri_dz*: array of shape (3,2) of df/dx, df/dy nodal values
*J*: Jacobian matrix in local basis of apex 0
Returns dof array of shape (9,) so that for each apex iapex:
dof[iapex*3+0] = f(Ai)
dof[iapex*3+1] = df(Ai).(AiAi+)
dof[iapex*3+2] = df(Ai).(AiAi-)]
"""
npt = tri_z.shape[0]
dof = np.zeros([npt, 9], dtype=np.float64)
J1 = _prod_vectorized(_ReducedHCT_Element.J0_to_J1, J)
J2 = _prod_vectorized(_ReducedHCT_Element.J0_to_J2, J)
col0 = _prod_vectorized(J, np.expand_dims(tri_dz[:, 0, :], axis=3))
col1 = _prod_vectorized(J1, np.expand_dims(tri_dz[:, 1, :], axis=3))
col2 = _prod_vectorized(J2, np.expand_dims(tri_dz[:, 2, :], axis=3))
dfdksi = _to_matrix_vectorized([
[col0[:, 0, 0], col1[:, 0, 0], col2[:, 0, 0]],
[col0[:, 1, 0], col1[:, 1, 0], col2[:, 1, 0]]])
dof[:, 0:7:3] = tri_z
dof[:, 1:8:3] = dfdksi[:, 0]
dof[:, 2:9:3] = dfdksi[:, 1]
return dof
class _DOF_estimator_user(_DOF_estimator):
""" dz is imposed by user / Accounts for scaling if any """
def compute_dz(self, dz):
(dzdx, dzdy) = dz
dzdx = dzdx * self._unit_x
dzdy = dzdy * self._unit_y
return np.vstack([dzdx, dzdy]).T
class _DOF_estimator_geom(_DOF_estimator):
""" Fast 'geometric' approximation, recommended for large arrays. """
def compute_dz(self):
"""
self.df is computed as weighted average of _triangles sharing a common
node. On each triangle itri f is first assumed linear (= ~f), which
allows to compute d~f[itri]
Then the following approximation of df nodal values is then proposed:
f[ipt] = SUM ( w[itri] x d~f[itri] , for itri sharing apex ipt)
The weighted coeff. w[itri] are proportional to the angle of the
triangle itri at apex ipt
"""
el_geom_w = self.compute_geom_weights()
el_geom_grad = self.compute_geom_grads()
# Sum of weights coeffs
w_node_sum = np.bincount(np.ravel(self._triangles),
weights=np.ravel(el_geom_w))
# Sum of weighted df = (dfx, dfy)
dfx_el_w = np.empty_like(el_geom_w)
dfy_el_w = np.empty_like(el_geom_w)
for iapex in range(3):
dfx_el_w[:, iapex] = el_geom_w[:, iapex]*el_geom_grad[:, 0]
dfy_el_w[:, iapex] = el_geom_w[:, iapex]*el_geom_grad[:, 1]
dfx_node_sum = np.bincount(np.ravel(self._triangles),
weights=np.ravel(dfx_el_w))
dfy_node_sum = np.bincount(np.ravel(self._triangles),
weights=np.ravel(dfy_el_w))
# Estimation of df
dfx_estim = dfx_node_sum/w_node_sum
dfy_estim = dfy_node_sum/w_node_sum
return np.vstack([dfx_estim, dfy_estim]).T
def compute_geom_weights(self):
"""
Builds the (nelems x 3) weights coeffs of _triangles angles,
renormalized so that np.sum(weights, axis=1) == np.ones(nelems)
"""
weights = np.zeros([np.size(self._triangles, 0), 3])
tris_pts = self._tris_pts
for ipt in range(3):
p0 = tris_pts[:, (ipt) % 3, :]
p1 = tris_pts[:, (ipt+1) % 3, :]
p2 = tris_pts[:, (ipt-1) % 3, :]
alpha1 = np.arctan2(p1[:, 1]-p0[:, 1], p1[:, 0]-p0[:, 0])
alpha2 = np.arctan2(p2[:, 1]-p0[:, 1], p2[:, 0]-p0[:, 0])
# In the below formula we could take modulo 2. but
# modulo 1. is safer regarding round-off errors (flat triangles).
angle = np.abs(np.mod((alpha2-alpha1) / np.pi, 1.))
# Weight proportional to angle up np.pi/2 ; null weight for
# degenerated cases 0. and np.pi (Note that `angle` is normalized
# by np.pi)
weights[:, ipt] = 0.5 - np.abs(angle-0.5)
return weights
def compute_geom_grads(self):
"""
Compute the (global) gradient component of f assumed linear (~f).
returns array df of shape (nelems,2)
df[ielem].dM[ielem] = dz[ielem] i.e. df = dz x dM = dM.T^-1 x dz
"""
tris_pts = self._tris_pts
tris_f = self.z[self._triangles]
dM1 = tris_pts[:, 1, :] - tris_pts[:, 0, :]
dM2 = tris_pts[:, 2, :] - tris_pts[:, 0, :]
dM = np.dstack([dM1, dM2])
# Here we try to deal with the simpliest colinear cases: a null
# gradient is assumed in this case.
dM_inv = _safe_inv22_vectorized(dM)
dZ1 = tris_f[:, 1] - tris_f[:, 0]
dZ2 = tris_f[:, 2] - tris_f[:, 0]
dZ = np.vstack([dZ1, dZ2]).T
df = np.empty_like(dZ)
# With np.einsum : could be ej,eji -> ej
df[:, 0] = dZ[:, 0]*dM_inv[:, 0, 0] + dZ[:, 1]*dM_inv[:, 1, 0]
df[:, 1] = dZ[:, 0]*dM_inv[:, 0, 1] + dZ[:, 1]*dM_inv[:, 1, 1]
return df
class _DOF_estimator_min_E(_DOF_estimator_geom):
"""
The 'smoothest' approximation, df is computed through global minimization
of the bending energy:
E(f) = integral[(d2z/dx2 + d2z/dy2 + 2 d2z/dxdy)**2 dA]
"""
def __init__(self, Interpolator):
self._eccs = Interpolator._eccs
_DOF_estimator_geom.__init__(self, Interpolator)
def compute_dz(self):
"""
Elliptic solver for bending energy minimization.
Uses a dedicated 'toy' sparse Jacobi PCG solver.
"""
# Initial guess for iterative PCG solver.
dz_init = _DOF_estimator_geom.compute_dz(self)
Uf0 = np.ravel(dz_init)
reference_element = _ReducedHCT_Element()
J = CubicTriInterpolator._get_jacobian(self._tris_pts)
eccs = self._eccs
triangles = self._triangles
Uc = self.z[self._triangles]
# Building stiffness matrix and force vector in coo format
Kff_rows, Kff_cols, Kff_vals, Ff = reference_element.get_Kff_and_Ff(
J, eccs, triangles, Uc)
# Building sparse matrix and solving minimization problem
# We could use scipy.sparse direct solver ; however to avoid this
# external dependency an implementation of a simple PCG solver with
# a simplendiagonal Jocabi preconditioner is implemented.
tol = 1.e-10
n_dof = Ff.shape[0]
Kff_coo = _Sparse_Matrix_coo(Kff_vals, Kff_rows, Kff_cols,
shape=(n_dof, n_dof))
Kff_coo.compress_csc()
Uf, err = _cg(A=Kff_coo, b=Ff, x0=Uf0, tol=tol)
# If the PCG did not converge, we return the best guess between Uf0
# and Uf.
err0 = np.linalg.norm(Kff_coo.dot(Uf0) - Ff)
if err0 < err:
# Maybe a good occasion to raise a warning here ?
warnings.warn("In TriCubicInterpolator initialization, PCG sparse"
" solver did not converge after 1000 iterations. "
"`geom` approximation is used instead of `min_E`")
Uf = Uf0
# Building dz from Uf
dz = np.empty([self._pts.shape[0], 2], dtype=np.float64)
dz[:, 0] = Uf[::2]
dz[:, 1] = Uf[1::2]
return dz
# The following private :class:_Sparse_Matrix_coo and :func:_cg provide
# a PCG sparse solver for (symmetric) elliptic problems.
class _Sparse_Matrix_coo(object):
def __init__(self, vals, rows, cols, shape):
"""
Creates a sparse matrix in coo format
*vals*: arrays of values of non-null entries of the matrix
*rows*: int arrays of rows of non-null entries of the matrix
*cols*: int arrays of cols of non-null entries of the matrix
*shape*: 2-tuple (n,m) of matrix shape
"""
self.n, self.m = shape
self.vals = np.asarray(vals, dtype=np.float64)
self.rows = np.asarray(rows, dtype=np.int32)
self.cols = np.asarray(cols, dtype=np.int32)
def dot(self, V):
"""
Dot product of self by a vector *V* in sparse-dense to dense format
*V* dense vector of shape (self.m,)
"""
assert V.shape == (self.m,)
# For a more generic implementation we could use below kw argument
# minlength=self.m of bincount ; however:
# - it is new in numpy 1.6
# - it is unecessary when each row have at least 1 entry in global
# matrix, which is the case here.
return np.bincount(self.rows, weights=self.vals*V[self.cols])
def compress_csc(self):
"""
Compress rows, cols, vals / summing duplicates. Sort for csc format.
"""
_, unique, indices = np.unique(
self.rows + self.n*self.cols,
return_index=True, return_inverse=True)
self.rows = self.rows[unique]
self.cols = self.cols[unique]
self.vals = np.bincount(indices, weights=self.vals)
def compress_csr(self):
"""
Compress rows, cols, vals / summing duplicates. Sort for csr format.
"""
_, unique, indices = np.unique(
self.m*self.rows + self.cols,
return_index=True, return_inverse=True)
self.rows = self.rows[unique]
self.cols = self.cols[unique]
self.vals = np.bincount(indices, weights=self.vals)
def to_dense(self):
"""
Returns a dense matrix representing self.
Mainly for debugging purposes.
"""
ret = np.zeros([self.n, self.m], dtype=np.float64)
nvals = self.vals.size
for i in range(nvals):
ret[self.rows[i], self.cols[i]] += self.vals[i]
return ret
def __str__(self):
return self.to_dense().__str__()
@property
def diag(self):
"""
Returns the (dense) vector of the diagonal elements.
"""
in_diag = (self.rows == self.cols)
diag = np.zeros(min(self.n, self.n), dtype=np.float64) # default 0.
diag[self.rows[in_diag]] = self.vals[in_diag]
return diag
def _cg(A, b, x0=None, tol=1.e-10, maxiter=1000):
"""
Use Preconditioned Conjugate Gradient iteration to solve A x = b
A simple Jacobi (diagonal) preconditionner is used.
Parameters
----------
A: _Sparse_Matrix_coo
*A* must have been compressed before by compress_csc or
compress_csr method.
b: array
Right hand side of the linear system.
Returns
-------
x: array.
The converged solution.
err: float
The absolute error np.linalg.norm(A.dot(x) - b)
Other parameters
----------------
x0: array.
Starting guess for the solution.
tol: float.
Tolerance to achieve. The algorithm terminates when the relative
residual is below tol.
maxiter: integer.
Maximum number of iterations. Iteration will stop
after maxiter steps even if the specified tolerance has not
been achieved.
"""
n = b.size
assert A.n == n
assert A.m == n
b_norm = np.linalg.norm(b)
# Jacobi pre-conditioner
kvec = A.diag
# For diag elem < 1e-6 we keep 1e-6.
kvec = np.where(kvec > 1.e-6, kvec, 1.e-6)
# Initial guess
if x0 is None:
x = np.zeros(n)
else:
x = x0
r = b - A.dot(x)
w = r/kvec
p = np.zeros(n)
beta = 0.0
rho = np.dot(r, w)
k = 0
# Following C. T. Kelley
while (np.sqrt(abs(rho)) > tol*b_norm) and (k < maxiter):
p = w + beta*p
z = A.dot(p)
alpha = rho/np.dot(p, z)
r = r - alpha*z
w = r/kvec
rhoold = rho
rho = np.dot(r, w)
x = x + alpha*p
beta = rho/rhoold
#err = np.linalg.norm(A.dot(x) - b) # absolute accuracy - not used
k += 1
err = np.linalg.norm(A.dot(x) - b)
return x, err
# The following private functions:
# :func:`_inv22_vectorized`
# :func:`_safe_inv22_vectorized`
# :func:`_pseudo_inv22sym_vectorized`
# :func:`_prod_vectorized`
# :func:`_scalar_vectorized`
# :func:`_transpose_vectorized`
# :func:`_roll_vectorized`
# :func:`_to_matrix_vectorized`
# :func:`_extract_submatrices`
# provide fast numpy implementation of some standard operations on arrays of
# matrices - stored as (:, n_rows, n_cols)-shaped np.arrays.
def _inv22_vectorized(M):
"""
Inversion of arrays of (2,2) matrices.
"""
assert (M.ndim == 3)
assert (M.shape[-2:] == (2, 2))
M_inv = np.empty_like(M)
delta_inv = np.reciprocal(M[:, 0, 0]*M[:, 1, 1] - M[:, 0, 1]*M[:, 1, 0])
M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv
M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv
M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv
M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv
return M_inv
# Development note: Dealing with pathologic 'flat' triangles in the
# CubicTriInterpolator code and impact on (2,2)-matrix inversion functions
# :func:`_safe_inv22_vectorized` and :func:`_pseudo_inv22sym_vectorized`.
#
# Goals:
# 1) The CubicTriInterpolator should be able to handle flat or almost flat
# triangles without raising an error,
# 2) These degenerated triangles should have no impact on the automatic dof
# calculation (associated with null weight for the _DOF_estimator_geom and
# with null energy for the _DOF_estimator_min_E),
# 3) Linear patch test should be passed exactly on degenerated meshes,
# 4) Interpolation (with :meth:`_interpolate_single_key` or
# :meth:`_interpolate_multi_key`) shall be correctly handled even *inside*
# the pathologic triangles, to interact correctly with a TriRefiner class.
#
# Difficulties:
# Flat triangles have rank-deficient *J* (so-called jacobian matrix) and
# *metric* (the metric tensor = J x J.T). Computation of the local
# tangent plane is also problematic.
#
# Implementation:
# Most of the time, when computing the inverse of a rank-deficient matrix it
# is safe to simply return the null matrix (which is the implementation in
# :func:`_safe_inv22_vectorized`). This is because of point 2), itself
# enforced by:
# - null area hence null energy in :class:`_DOF_estimator_min_E`
# - angles close or equal to 0 or np.pi hence null weight in
# :class:`_DOF_estimator_geom`.
# Note that the function angle -> weight is continuous and maximum for an
# angle np.pi/2 (refer to :meth:`compute_geom_weights`)
# The exception is the computation of barycentric coordinates, which is done
# by inversion of the *metric* matrix. In this case, we need to compute a set
# of valid coordinates (1 among numerous possibilities), to ensure point 4).
# We benefit here from the symmetry of metric = J x J.T, which makes it easier
# to compute a pseudo-inverse in :func:`_pseudo_inv22sym_vectorized`
def _safe_inv22_vectorized(M):
"""
Inversion of arrays of (2,2) matrices, returns 0 for rank-deficient
matrices.
*M* : array of (2,2) matrices to inverse, shape (n,2,2)
"""
assert M.ndim == 3
assert M.shape[-2:] == (2, 2)
M_inv = np.empty_like(M)
prod1 = M[:, 0, 0]*M[:, 1, 1]
delta = prod1 - M[:, 0, 1]*M[:, 1, 0]
# We set delta_inv to 0. in case of a rank deficient matrix ; a
# rank-deficient input matrix *M* will lead to a null matrix in output
rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))
if np.all(rank2):
# Normal 'optimized' flow.
delta_inv = 1./delta
else:
# 'Pathologic' flow.
delta_inv = np.zeros(M.shape[0])
delta_inv[rank2] = 1./delta[rank2]
M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv
M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv
M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv
M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv
return M_inv
def _pseudo_inv22sym_vectorized(M):
"""
Inversion of arrays of (2,2) SYMMETRIC matrices ; returns the
(Moore-Penrose) pseudo-inverse for rank-deficient matrices.
In case M is of rank 1, we have M = trace(M) x P where P is the orthogonal
projection on Im(M), and we return trace(M)^-1 x P == M / trace(M)**2
In case M is of rank 0, we return the null matrix.
*M* : array of (2,2) matrices to inverse, shape (n,2,2)
"""
assert M.ndim == 3
assert M.shape[-2:] == (2, 2)
M_inv = np.empty_like(M)
prod1 = M[:, 0, 0]*M[:, 1, 1]
delta = prod1 - M[:, 0, 1]*M[:, 1, 0]
rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))
if np.all(rank2):
# Normal 'optimized' flow.
M_inv[:, 0, 0] = M[:, 1, 1] / delta
M_inv[:, 0, 1] = -M[:, 0, 1] / delta
M_inv[:, 1, 0] = -M[:, 1, 0] / delta
M_inv[:, 1, 1] = M[:, 0, 0] / delta
else:
# 'Pathologic' flow.
# Here we have to deal with 2 sub-cases
# 1) First sub-case: matrices of rank 2:
delta = delta[rank2]
M_inv[rank2, 0, 0] = M[rank2, 1, 1] / delta
M_inv[rank2, 0, 1] = -M[rank2, 0, 1] / delta
M_inv[rank2, 1, 0] = -M[rank2, 1, 0] / delta
M_inv[rank2, 1, 1] = M[rank2, 0, 0] / delta
# 2) Second sub-case: rank-deficient matrices of rank 0 and 1:
rank01 = ~rank2
tr = M[rank01, 0, 0] + M[rank01, 1, 1]
tr_zeros = (np.abs(tr) < 1.e-8)
sq_tr_inv = (1.-tr_zeros) / (tr**2+tr_zeros)
#sq_tr_inv = 1. / tr**2
M_inv[rank01, 0, 0] = M[rank01, 0, 0] * sq_tr_inv
M_inv[rank01, 0, 1] = M[rank01, 0, 1] * sq_tr_inv
M_inv[rank01, 1, 0] = M[rank01, 1, 0] * sq_tr_inv
M_inv[rank01, 1, 1] = M[rank01, 1, 1] * sq_tr_inv
return M_inv
def _prod_vectorized(M1, M2):
"""
Matrix product between arrays of matrices, or a matrix and an array of
matrices (*M1* and *M2*)
"""
sh1 = M1.shape
sh2 = M2.shape
assert len(sh1) >= 2
assert len(sh2) >= 2
assert sh1[-1] == sh2[-2]
ndim1 = len(sh1)
t1_index = list(xrange(ndim1-2)) + [ndim1-1, ndim1-2]
return np.sum(np.transpose(M1, t1_index)[..., np.newaxis] *
M2[..., np.newaxis, :], -3)
def _scalar_vectorized(scalar, M):
"""
Scalar product between scalars and matrices.
"""
return scalar[:, np.newaxis, np.newaxis]*M
def _transpose_vectorized(M):
"""
Transposition of an array of matrices *M*.
"""
ndim = M.ndim
assert ndim == 3
return np.transpose(M, [0, ndim-1, ndim-2])
def _roll_vectorized(M, roll_indices, axis):
"""
Rolls an array of matrices along an axis according to an array of indices
*roll_indices*
*axis* can be either 0 (rolls rows) or 1 (rolls columns).
"""
assert axis in [0, 1]
ndim = M.ndim
assert ndim == 3
ndim_roll = roll_indices.ndim
assert ndim_roll == 1
sh = M.shape
r, c = sh[-2:]
assert sh[0] == roll_indices.shape[0]
vec_indices = np.arange(sh[0], dtype=np.int32)
# Builds the rolled matrix
M_roll = np.empty_like(M)
if axis == 0:
for ir in range(r):
for ic in range(c):
M_roll[:, ir, ic] = M[vec_indices, (-roll_indices+ir) % r, ic]
elif axis == 1:
for ir in range(r):
for ic in range(c):
M_roll[:, ir, ic] = M[vec_indices, ir, (-roll_indices+ic) % c]
return M_roll
def _to_matrix_vectorized(M):
"""
Builds an array of matrices from individuals np.arrays of identical
shapes.
*M*: ncols-list of nrows-lists of shape sh.
Returns M_res np.array of shape (sh, nrow, ncols) so that:
M_res[...,i,j] = M[i][j]
"""
assert isinstance(M, (tuple, list))
assert all([isinstance(item, (tuple, list)) for item in M])
c_vec = np.asarray([len(item) for item in M])
assert np.all(c_vec-c_vec[0] == 0)
r = len(M)
c = c_vec[0]
M00 = np.asarray(M[0][0])
dt = M00.dtype
sh = [M00.shape[0], r, c]
M_ret = np.empty(sh, dtype=dt)
for irow in range(r):
for icol in range(c):
M_ret[:, irow, icol] = np.asarray(M[irow][icol])
return M_ret
def _extract_submatrices(M, block_indices, block_size, axis):
"""
Extracts selected blocks of a matrices *M* depending on parameters
*block_indices* and *block_size*.
Returns the array of extracted matrices *Mres* so that:
M_res[...,ir,:] = M[(block_indices*block_size+ir), :]
"""
assert block_indices.ndim == 1
assert axis in [0, 1]
r, c = M.shape
if axis == 0:
sh = [block_indices.shape[0], block_size, c]
elif axis == 1:
sh = [block_indices.shape[0], r, block_size]
dt = M.dtype
M_res = np.empty(sh, dtype=dt)
if axis == 0:
for ir in range(block_size):
M_res[:, ir, :] = M[(block_indices*block_size+ir), :]
elif axis == 1:
for ic in range(block_size):
M_res[:, :, ic] = M[:, (block_indices*block_size+ic)]
return M_res
| apache-2.0 |
jmschrei/scikit-learn | sklearn/metrics/tests/test_ranking.py | 16 | 41687 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.exceptions import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
expected_auc = _auc(y_true, probas_pred)
for drop in [True, False]:
fpr, tpr, thresholds = roc_curve(y_true, probas_pred,
drop_intermediate=drop)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred, drop_intermediate=True)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=False)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_roc_curve_drop_intermediate():
# Test that drop_intermediate drops the correct thresholds
y_true = [0, 0, 0, 0, 1, 1]
y_score = [0., 0.2, 0.5, 0.6, 0.7, 1.0]
tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
assert_array_almost_equal(thresholds, [1., 0.7, 0.])
# Test dropping thresholds with repeating scores
y_true = [0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1]
y_score = [0., 0.1, 0.6, 0.6, 0.7, 0.8, 0.9,
0.6, 0.7, 0.8, 0.9, 0.9, 1.0]
tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
assert_array_almost_equal(thresholds,
[1.0, 0.9, 0.7, 0.6, 0.])
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| bsd-3-clause |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/pandas/tests/formats/test_printing.py | 8 | 4905 | # -*- coding: utf-8 -*-
import nose
from pandas import compat
import pandas.formats.printing as printing
import pandas.formats.format as fmt
import pandas.util.testing as tm
import pandas.core.config as cf
_multiprocess_can_split_ = True
def test_adjoin():
data = [['a', 'b', 'c'], ['dd', 'ee', 'ff'], ['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = printing.adjoin(2, *data)
assert (adjoined == expected)
def test_repr_binary_type():
import string
letters = string.ascii_letters
btype = compat.binary_type
try:
raw = btype(letters, encoding=cf.get_option('display.encoding'))
except TypeError:
raw = btype(letters)
b = compat.text_type(compat.bytes_to_str(raw))
res = printing.pprint_thing(b, quote_strings=True)
tm.assert_equal(res, repr(b))
res = printing.pprint_thing(b, quote_strings=False)
tm.assert_equal(res, b)
class TestFormattBase(tm.TestCase):
def test_adjoin(self):
data = [['a', 'b', 'c'], ['dd', 'ee', 'ff'], ['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = printing.adjoin(2, *data)
self.assertEqual(adjoined, expected)
def test_adjoin_unicode(self):
data = [[u'あ', 'b', 'c'], ['dd', u'ええ', 'ff'], ['ggg', 'hhh', u'いいい']]
expected = u'あ dd ggg\nb ええ hhh\nc ff いいい'
adjoined = printing.adjoin(2, *data)
self.assertEqual(adjoined, expected)
adj = fmt.EastAsianTextAdjustment()
expected = u"""あ dd ggg
b ええ hhh
c ff いいい"""
adjoined = adj.adjoin(2, *data)
self.assertEqual(adjoined, expected)
cols = adjoined.split('\n')
self.assertEqual(adj.len(cols[0]), 13)
self.assertEqual(adj.len(cols[1]), 13)
self.assertEqual(adj.len(cols[2]), 16)
expected = u"""あ dd ggg
b ええ hhh
c ff いいい"""
adjoined = adj.adjoin(7, *data)
self.assertEqual(adjoined, expected)
cols = adjoined.split('\n')
self.assertEqual(adj.len(cols[0]), 23)
self.assertEqual(adj.len(cols[1]), 23)
self.assertEqual(adj.len(cols[2]), 26)
def test_justify(self):
adj = fmt.EastAsianTextAdjustment()
def just(x, *args, **kwargs):
# wrapper to test single str
return adj.justify([x], *args, **kwargs)[0]
self.assertEqual(just('abc', 5, mode='left'), 'abc ')
self.assertEqual(just('abc', 5, mode='center'), ' abc ')
self.assertEqual(just('abc', 5, mode='right'), ' abc')
self.assertEqual(just(u'abc', 5, mode='left'), 'abc ')
self.assertEqual(just(u'abc', 5, mode='center'), ' abc ')
self.assertEqual(just(u'abc', 5, mode='right'), ' abc')
self.assertEqual(just(u'パンダ', 5, mode='left'), u'パンダ')
self.assertEqual(just(u'パンダ', 5, mode='center'), u'パンダ')
self.assertEqual(just(u'パンダ', 5, mode='right'), u'パンダ')
self.assertEqual(just(u'パンダ', 10, mode='left'), u'パンダ ')
self.assertEqual(just(u'パンダ', 10, mode='center'), u' パンダ ')
self.assertEqual(just(u'パンダ', 10, mode='right'), u' パンダ')
def test_east_asian_len(self):
adj = fmt.EastAsianTextAdjustment()
self.assertEqual(adj.len('abc'), 3)
self.assertEqual(adj.len(u'abc'), 3)
self.assertEqual(adj.len(u'パンダ'), 6)
self.assertEqual(adj.len(u'パンダ'), 5)
self.assertEqual(adj.len(u'パンダpanda'), 11)
self.assertEqual(adj.len(u'パンダpanda'), 10)
def test_ambiguous_width(self):
adj = fmt.EastAsianTextAdjustment()
self.assertEqual(adj.len(u'¡¡ab'), 4)
with cf.option_context('display.unicode.ambiguous_as_wide', True):
adj = fmt.EastAsianTextAdjustment()
self.assertEqual(adj.len(u'¡¡ab'), 6)
data = [[u'あ', 'b', 'c'], ['dd', u'ええ', 'ff'],
['ggg', u'¡¡ab', u'いいい']]
expected = u'あ dd ggg \nb ええ ¡¡ab\nc ff いいい'
adjoined = adj.adjoin(2, *data)
self.assertEqual(adjoined, expected)
# TODO: fix this broken test
# def test_console_encode():
# """
# On Python 2, if sys.stdin.encoding is None (IPython with zmq frontend)
# common.console_encode should encode things as utf-8.
# """
# if compat.PY3:
# raise nose.SkipTest
# with tm.stdin_encoding(encoding=None):
# result = printing.console_encode(u"\u05d0")
# expected = u"\u05d0".encode('utf-8')
# assert (result == expected)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| apache-2.0 |
antoinecarme/pyaf | tests/basic_checks/issue_46_negative_horizon.py | 1 | 3171 | import numpy as np
import pandas as pd
import pyaf.ForecastEngine as autof
# the goal of these tests is to make pyaf as robust as possible against very small/bad datasets
# pyaf should automatically produce reasonable/naive/trivial models in these cases.
# it should not fail in any case (normal behavior expected)
def test_fake_model_1_row(iHorizon_train , iHorizon_apply):
# one row dataset => always constant forecast
df = pd.DataFrame([[0 , 0.54543]], columns = ['date' , 'signal'])
lEngine = autof.cForecastEngine()
lEngine.train(df , 'date' , 'signal', iHorizon_train);
# print(lEngine.mSignalDecomposition.mBestModel.mTimeInfo.info())
print(lEngine.mSignalDecomposition.mBestModel.getFormula())
print("PERFS_MAPE_MASE", lEngine.mSignalDecomposition.mBestModel.mForecastPerf.mMAPE,
lEngine.mSignalDecomposition.mBestModel.mForecastPerf.mMASE, )
# print(df.head())
df1 = lEngine.forecast(df , iHorizon_apply)
# print(df1.columns)
Forecast_DF = df1[['date' , 'signal', 'signal' + '_Forecast', 'signal_Residue', 'signal_Forecast_Lower_Bound',
'signal_Forecast_Upper_Bound']]
# print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(iHorizon_apply));
def test_fake_model_2_rows(iHorizon_train , iHorizon_apply):
# one row dataset => always constant forecast
df = pd.DataFrame([[0 , 0.54543] , [1 , 0.43]], columns = ['date' , 'signal'])
lEngine = autof.cForecastEngine()
lEngine.train(df , 'date' , 'signal', iHorizon_train);
# print(lEngine.mSignalDecomposition.mBestModel.mTimeInfo.info())
print(lEngine.mSignalDecomposition.mBestModel.getFormula())
print("PERFS_MAPE_MASE", lEngine.mSignalDecomposition.mBestModel.mForecastPerf.mMAPE,
lEngine.mSignalDecomposition.mBestModel.mForecastPerf.mMASE, )
# print(df.head())
df1 = lEngine.forecast(df , iHorizon_apply)
# print(df1.columns)
Forecast_DF = df1[['date' , 'signal', 'signal' + '_Forecast', 'signal_Residue', 'signal_Forecast_Lower_Bound',
'signal_Forecast_Upper_Bound']]
# print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(iHorizon_apply));
def test_fake_model_1_row_fail(iHorizon_train , iHorizon_apply):
try:
test_fake_model_1_row(iHorizon_train, iHorizon_apply)
raise Exception("NOT_OK")
except Exception as e:
# should fail
print(str(e));
if(str(e) == "NOT_OK"):
raise
pass
def test_fake_model_2_rows_fail(iHorizon_train , iHorizon_apply):
try:
test_fake_model_2_rows(iHorizon_train, iHorizon_apply)
raise Exception("NOT_OK")
except Exception as e:
# should fail
print(str(e));
if(str(e) == "NOT_OK"):
raise
pass
test_fake_model_1_row_fail( 0, -1)
test_fake_model_1_row_fail( 2, 0)
test_fake_model_1_row_fail( 2, 0)
test_fake_model_1_row_fail( -1, -2)
test_fake_model_1_row_fail( -2, -10)
test_fake_model_1_row_fail( -20, -10)
test_fake_model_2_rows_fail( -1, -4)
test_fake_model_2_rows_fail( -6, -2)
test_fake_model_2_rows_fail( -6, -1)
test_fake_model_2_rows_fail( -1 , -7)
| bsd-3-clause |
LohithBlaze/scikit-learn | sklearn/ensemble/tests/test_voting_classifier.py | 140 | 6926 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.grid_search import GridSearchCV
from sklearn import datasets
from sklearn import cross_validation
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
def test_majority_label_iris():
"""Check classification by majority label on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
scores = cross_validation.cross_val_score(eclf,
X,
y,
cv=5,
scoring='accuracy')
assert_almost_equal(scores.mean(), 0.95, decimal=2)
def test_tie_situation():
"""Check voting classifier selects smaller class label in tie situation."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)],
voting='hard')
assert_equal(clf1.fit(X, y).predict(X)[73], 2)
assert_equal(clf2.fit(X, y).predict(X)[73], 1)
assert_equal(eclf.fit(X, y).predict(X)[73], 1)
def test_weights_iris():
"""Check classification by average probabilities on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 2, 10])
scores = cross_validation.cross_val_score(eclf,
X,
y,
cv=5,
scoring='accuracy')
assert_almost_equal(scores.mean(), 0.93, decimal=2)
def test_predict_on_toy_problem():
"""Manually check predicted class labels for toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5],
[-1.2, -1.4],
[-3.4, -2.2],
[1.1, 1.2],
[2.1, 1.4],
[3.1, 2.3]])
y = np.array([1, 1, 1, 2, 2, 2])
assert_equal(all(clf1.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf2.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf3.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
def test_predict_proba_on_toy_problem():
"""Calculate predicted probabilities on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
clf1_res = np.array([[0.59790391, 0.40209609],
[0.57622162, 0.42377838],
[0.50728456, 0.49271544],
[0.40241774, 0.59758226]])
clf2_res = np.array([[0.8, 0.2],
[0.8, 0.2],
[0.2, 0.8],
[0.3, 0.7]])
clf3_res = np.array([[0.9985082, 0.0014918],
[0.99845843, 0.00154157],
[0., 1.],
[0., 1.]])
t00 = (2*clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4
t11 = (2*clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4
t21 = (2*clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4
t31 = (2*clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[2, 1, 1])
eclf_res = eclf.fit(X, y).predict_proba(X)
assert_almost_equal(t00, eclf_res[0][0], decimal=1)
assert_almost_equal(t11, eclf_res[1][1], decimal=1)
assert_almost_equal(t21, eclf_res[2][1], decimal=1)
assert_almost_equal(t31, eclf_res[3][1], decimal=1)
try:
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
eclf.fit(X, y).predict_proba(X)
except AttributeError:
pass
else:
raise AssertionError('AttributeError for voting == "hard"'
' and with predict_proba not raised')
def test_multilabel():
"""Check if error is raised for multilabel classification."""
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=123)
clf = OneVsRestClassifier(SVC(kernel='linear'))
eclf = VotingClassifier(estimators=[('ovr', clf)], voting='hard')
try:
eclf.fit(X, y)
except NotImplementedError:
return
def test_gridsearch():
"""Check GridSearch support."""
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft')
params = {'lr__C': [1.0, 100.0],
'voting': ['soft', 'hard'],
'weights': [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]]}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
grid.fit(iris.data, iris.target)
| bsd-3-clause |
3manuek/scikit-learn | sklearn/preprocessing/tests/test_data.py | 113 | 38432 | import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils.testing import assert_almost_equal, clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.utils.validation import DataConversionWarning
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
def test_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X = np.ones(5)
assert_array_equal(scale(X, with_mean=False), X)
def test_standard_scaler_numerical_stability():
"""Test numerical stability of scaling"""
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# Constant feature.
X = np.zeros(5)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [np.nan, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [np.inf, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_robust_scaler_2d_arrays():
"""Test robust scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
"""Check RobustScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
"""Check MaxAbsScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# sparse data
X_csr = sparse.csr_matrix(X)
X_trans = scaler.fit_transform(X_csr)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans.A, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv.A)
def test_maxabs_scaler_large_negative_value():
"""Check MaxAbsScaler on toy data with a large negative value"""
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]])
)
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
| bsd-3-clause |
ahaberlie/MetPy | examples/sigma_to_pressure_interpolation.py | 3 | 3544 | # Copyright (c) 2017,2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
===============================
Sigma to Pressure Interpolation
===============================
By using `metpy.calc.log_interp`, data with sigma as the vertical coordinate can be
interpolated to isobaric coordinates.
"""
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
from netCDF4 import Dataset, num2date
from metpy.cbook import get_test_data
from metpy.interpolate import log_interpolate_1d
from metpy.plots import add_metpy_logo, add_timestamp
from metpy.units import units
######################################
# **Data**
#
# The data for this example comes from the outer domain of a WRF-ARW model forecast
# initialized at 1200 UTC on 03 June 1980. Model data courtesy Matthew Wilson, Valparaiso
# University Department of Geography and Meteorology.
data = Dataset(get_test_data('wrf_example.nc', False))
lat = data.variables['lat'][:]
lon = data.variables['lon'][:]
time = data.variables['time']
vtimes = num2date(time[:], time.units)
temperature = units.Quantity(data.variables['temperature'][:], 'degC')
pres = units.Quantity(data.variables['pressure'][:], 'Pa')
hgt = units.Quantity(data.variables['height'][:], 'meter')
####################################
# Array of desired pressure levels
plevs = [700.] * units.hPa
#####################################
# **Interpolate The Data**
#
# Now that the data is ready, we can interpolate to the new isobaric levels. The data is
# interpolated from the irregular pressure values for each sigma level to the new input
# mandatory isobaric levels. `mpcalc.log_interp` will interpolate over a specified dimension
# with the `axis` argument. In this case, `axis=1` will correspond to interpolation on the
# vertical axis. The interpolated data is output in a list, so we will pull out each
# variable for plotting.
height, temp = log_interpolate_1d(plevs, pres, hgt, temperature, axis=1)
####################################
# **Plotting the Data for 700 hPa.**
# Set up our projection
crs = ccrs.LambertConformal(central_longitude=-100.0, central_latitude=45.0)
# Set the forecast hour
FH = 1
# Create the figure and grid for subplots
fig = plt.figure(figsize=(17, 12))
add_metpy_logo(fig, 470, 320, size='large')
# Plot 700 hPa
ax = plt.subplot(111, projection=crs)
ax.add_feature(cfeature.COASTLINE.with_scale('50m'), linewidth=0.75)
ax.add_feature(cfeature.STATES, linewidth=0.5)
# Plot the heights
cs = ax.contour(lon, lat, height[FH, 0, :, :], transform=ccrs.PlateCarree(),
colors='k', linewidths=1.0, linestyles='solid')
ax.clabel(cs, fontsize=10, inline=1, inline_spacing=7,
fmt='%i', rightside_up=True, use_clabeltext=True)
# Contour the temperature
cf = ax.contourf(lon, lat, temp[FH, 0, :, :], range(-20, 20, 1), cmap=plt.cm.RdBu_r,
transform=ccrs.PlateCarree())
cb = fig.colorbar(cf, orientation='horizontal', extend='max', aspect=65, shrink=0.5,
pad=0.05, extendrect='True')
cb.set_label('Celsius', size='x-large')
ax.set_extent([-106.5, -90.4, 34.5, 46.75], crs=ccrs.PlateCarree())
# Make the axis title
ax.set_title('{:.0f} hPa Heights (m) and Temperature (C)'.format(plevs[0].m), loc='center',
fontsize=10)
# Set the figure title
fig.suptitle('WRF-ARW Forecast VALID: {:s} UTC'.format(str(vtimes[FH])), fontsize=14)
add_timestamp(ax, vtimes[FH], y=0.02, high_contrast=True)
plt.show()
| bsd-3-clause |
pablosv/dynamic_multifarious | tools/plot.py | 1 | 5429 | """
This file contains several plotting functions
"""
import matplotlib as mpl
mpl.use('Agg')
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import evaluation as ev
def plot_lattice(lat, blocks, mode='color-species', title='L_000.npy'):
"""
This function reads the type of coloring and chooses
the corresponding plotter.
"""
cases = {'color-species': plot_lattice_color_species,
'color-structures': plot_lattice_color_structures,
'color-structures2': plot_lattice_color_structures2}
return cases[mode](lat, blocks, title)
def plot_lattice_color_species(lat, blocks, title):
"""
we begin by creating a colormap in which each of the M
components of the structures is made of a different color.
"""
colors = [(1.0,1.0,1.0),(.0,.0,.0)]
colors.extend(mpl.cm.jet(np.linspace(0, 1, blocks.M-1)))
cmap = mpl.colors.ListedColormap(colors)
plt.matshow(lat, cmap=cmap)
return 0
def plot_lattice_color_structures(lat, blocks, title):
"""
we begin by creating a colormap in which each of the structures
is made of a different color.
"""
col = ['w']
col.extend(mpl.cm.hsv(np.linspace(0, 1, 1+blocks.m)))
cmap = mpl.colors.ListedColormap(col)
# Calculate the overlap lattice
ms = ev.overlap(lat, blocks)
# Create a
lat_s = np.zeros_like(lat)
for i in range(blocks.L):
for j in range(blocks.L):
if lat[i,j]==0: lat_s[i,j] = 0 # empty site
else: lat_s[i,j] = 1 + np.argmax(ms[i, j, :]) # site w
plt.matshow(lat_s, cmap=cmap, vmin=0, vmax=1+blocks.m)
return 0
def plot_lattice_color_structures2(lat, blocks, title):
"""
we begin by creating a colormap in which each of the structures
is made of a different color.
"""
col = ['w', '0.5', 'k']
col.extend(mpl.cm.hsv(np.linspace(0, 1, 1+blocks.m)))
cmap = mpl.colors.ListedColormap(col)
# Calculate the overlap lattice
ms = ev.overlap(lat, blocks)
# Create a
lat_s = np.zeros_like(lat)
for i in range(blocks.L):
for j in range(blocks.L):
if lat[i,j]==0: lat_s[i,j] = -2 # empty site
elif (ms[i, j, :]!=1).all(): lat_s[i,j] = -1# monomer or ...
elif (ms[i, j, :]==1).sum()>1: lat_s[i,j] = 0 # site
else: lat_s[i,j] = 1 + np.argmax(ms[i, j, :]) # site with
plt.matshow(lat_s, cmap=cmap, vmin=-2, vmax=1+blocks.m)
plt.title(title)
return 0
def plot_structures(blocks, mode = 'color-species'):
"""
This function reads the type of coloring and chooses
the corresponding plotter.
"""
cases = {'color-species': plot_structures_color_species,
'color-structures': plot_structures_color_species,
'color-structures2': plot_structures_color_species}
return cases[mode](blocks)
def plot_structures_color_species(blocks):
colors = [(1.0,1.0,1.0),(.0,.0,.0)]
colors.extend(mpl.cm.jet(np.linspace(0, 1, blocks.M-1)))
cmap = mpl.colors.ListedColormap(colors)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(6, 3))
ax1.matshow(blocks.structures[:,:,0], cmap=cmap)
ax1.set_title('Structure 0')
ax1.set_xticklabels('')
ax1.set_yticklabels('')
ax2.matshow(blocks.structures[:,:,1], cmap=cmap)
ax2.set_title('Structure 1')
ax2.set_xticklabels('')
ax2.set_yticklabels('')
return
def plot_errors(errors, blocks, show=True, path='./tmp', name='/errors.png'):
"""
This function plots the errors with all the target structures over time,
each one with a different color.
"""
fig = plt.figure()
ax = fig.add_subplot(111)
plt.ylabel('errors, $\epsilon^{S}$',fontdict={'fontsize':20})
plt.xlabel('time',fontdict={'fontsize':20})
plt.axis([0, errors.shape[0], -.2, 1.2])
col = mpl.cm.hsv(np.linspace(0, 1, 1+blocks.m))
for i in range(len(errors[0,:])):
plt.plot(errors[:,i], label="$S=${0}".format(i), color=col[i])
plt.legend(title='errors, $\epsilon^{S}$', ncol=2, shadow=True,\
fancybox=True)
ax.xaxis.set_major_locator(mpl.ticker.MultipleLocator(10))
ax.xaxis.grid(True,'major')
plt.savefig( path + name , bbox_inches='tight')
if show==True: fig.show()
return 0
def plot_phase(errors, blocks, show=True, path='./tmp', name='/phase.png'):
"""
This function plots the trajectory of the assemblies in the phase space of
the errors. In practice, it's not very informative.
"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
gains_r = 1-errors[:,0]
gains_g = 1-errors[:,1]
gains_b = 1-errors[:,2]
ax.scatter(gains_g,
gains_r,
gains_b,
facecolors=1-errors, alpha=.05,s=70.)
ax.set_xlim3d([0,1.])
ax.set_ylim3d([1.,0])
ax.set_zlim3d([0,1.])
ax.set_aspect(.9,'box')
ax.set_xlabel('$1-\epsilon_G$')
ax.set_ylabel('$1-\epsilon_R$')
ax.set_zlabel('$1-\epsilon_B$')
plt.savefig( path + name , bbox_inches='tight')
if show==True: fig.show()
return 0
def plot_conformations(conformations, lattice, title='No_title'):
"""
Plot the weights of all elements
"""
normalized_conformations = (conformations.T*np.sign(lattice).T).T
normalized_conformations += (np.sign(lattice)-1) * 2
min_c = -2
max_c = np.max(normalized_conformations)
fig = plt.figure()
ax = fig.add_subplot(111)
conformations_plot = ax.matshow(normalized_conformations,
vmin=min_c, vmax=max_c)
plt.title(title)
fig.colorbar(conformations_plot)
fig.show()
return 0
| gpl-3.0 |
ran5515/DeepDecision | tensorflow/python/estimator/inputs/queues/feeding_functions.py | 46 | 15782 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for enqueuing data from arrays and pandas `DataFrame`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import types as tp
import numpy as np
import six
from tensorflow.python.estimator.inputs.queues import feeding_queue_runner as fqr
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _get_integer_indices_for_next_batch(
batch_indices_start, batch_size, epoch_end, array_length,
current_epoch, total_epochs):
"""Returns the integer indices for next batch.
If total epochs is not None and current epoch is the final epoch, the end
index of the next batch should not exceed the `epoch_end` (i.e., the final
batch might not have size `batch_size` to avoid overshooting the last epoch).
Args:
batch_indices_start: Integer, the index to start next batch.
batch_size: Integer, size of batches to return.
epoch_end: Integer, the end index of the epoch. The epoch could start from a
random position, so `epoch_end` provides the end index for that.
array_length: Integer, the length of the array.
current_epoch: Integer, the epoch number has been emitted.
total_epochs: Integer or `None`, the total number of epochs to emit. If
`None` will run forever.
Returns:
A tuple of a list with integer indices for next batch and `current_epoch`
value after the next batch.
Raises:
OutOfRangeError if `current_epoch` is not less than `total_epochs`.
"""
if total_epochs is not None and current_epoch >= total_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % current_epoch)
batch_indices_end = batch_indices_start + batch_size
batch_indices = [j % array_length for j in
range(batch_indices_start, batch_indices_end)]
epoch_end_indices = [i for i, x in enumerate(batch_indices) if x == epoch_end]
current_epoch += len(epoch_end_indices)
if total_epochs is None or current_epoch < total_epochs:
return (batch_indices, current_epoch)
# Now we might have emitted more data for expected epochs. Need to trim.
final_epoch_end_inclusive = epoch_end_indices[
-(current_epoch - total_epochs + 1)]
batch_indices = batch_indices[:final_epoch_end_inclusive + 1]
return (batch_indices, total_epochs)
class _ArrayFeedFn(object):
"""Creates feed dictionaries from numpy arrays."""
def __init__(self,
placeholders,
array,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != 2:
raise ValueError("_array_feed_fn expects 2 placeholders; got {}.".format(
len(placeholders)))
self._placeholders = placeholders
self._array = array
self._max = len(array)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
return {
self._placeholders[0]: integer_indexes,
self._placeholders[1]: self._array[integer_indexes]
}
class _OrderedDictNumpyFeedFn(object):
"""Creates feed dictionaries from `OrderedDict`s of numpy arrays."""
def __init__(self,
placeholders,
ordered_dict_of_arrays,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(ordered_dict_of_arrays) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(ordered_dict_of_arrays), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._ordered_dict_of_arrays = ordered_dict_of_arrays
self._max = len(next(iter(ordered_dict_of_arrays.values())))
for _, v in ordered_dict_of_arrays.items():
if len(v) != self._max:
raise ValueError("Array lengths must match.")
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
feed_dict = {self._index_placeholder: integer_indexes}
cols = [
column[integer_indexes]
for column in self._ordered_dict_of_arrays.values()
]
feed_dict.update(dict(zip(self._col_placeholders, cols)))
return feed_dict
class _PandasFeedFn(object):
"""Creates feed dictionaries from pandas `DataFrames`."""
def __init__(self,
placeholders,
dataframe,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(dataframe.columns) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(dataframe.columns), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._dataframe = dataframe
self._max = len(dataframe)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
result = self._dataframe.iloc[integer_indexes]
cols = [result[col].values for col in result.columns]
feed_dict = dict(zip(self._col_placeholders, cols))
feed_dict[self._index_placeholder] = result.index.values
return feed_dict
class _GeneratorFeedFn(object):
"""Creates feed dictionaries from `Generator` of `dicts` of numpy arrays."""
def __init__(self,
placeholders,
generator,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
first_sample = next(generator())
if len(placeholders) != len(first_sample):
raise ValueError("Expected {} placeholders; got {}.".format(
len(first_sample), len(placeholders)))
self._keys = sorted(list(first_sample.keys()))
self._col_placeholders = placeholders
self._generator_function = generator
self._iterator = generator()
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
list_dict = {}
list_dict_size = 0
while list_dict_size < self._batch_size:
try:
data_row = next(self._iterator)
except StopIteration:
self._epoch += 1
self._iterator = self._generator_function()
data_row = next(self._iterator)
for index, key in enumerate(self._keys):
if key not in data_row.keys():
raise KeyError("key mismatch between dicts emitted by GenFun"
"Expected {} keys; got {}".format(
self._keys, data_row.keys()))
list_dict.setdefault(self._col_placeholders[index],
list()).append(data_row[key])
list_dict_size += 1
feed_dict = {key: np.asarray(item) for key, item in list(list_dict.items())}
return feed_dict
def _enqueue_data(data,
capacity,
shuffle=False,
min_after_dequeue=None,
num_threads=1,
seed=None,
name="enqueue_input",
enqueue_size=1,
num_epochs=None):
"""Creates a queue filled from a numpy array or pandas `DataFrame`.
Returns a queue filled with the rows of the given (`OrderedDict` of) array
or `DataFrame`. In the case of a pandas `DataFrame`, the first enqueued
`Tensor` corresponds to the index of the `DataFrame`. For (`OrderedDict` of)
numpy arrays, the first enqueued `Tensor` contains the row number.
Args:
data: a numpy `ndarray`, `OrderedDict` of numpy arrays, or a generator
yielding `dict`s of numpy arrays or pandas `DataFrame` that will be read
into the queue.
capacity: the capacity of the queue.
shuffle: whether or not to shuffle the rows of the array.
min_after_dequeue: minimum number of elements that can remain in the queue
after a dequeue operation. Only used when `shuffle` is true. If not set,
defaults to `capacity` / 4.
num_threads: number of threads used for reading and enqueueing.
seed: used to seed shuffling and reader starting points.
name: a scope name identifying the data.
enqueue_size: the number of rows to enqueue per step.
num_epochs: limit enqueuing to a specified number of epochs, if provided.
Returns:
A queue filled with the rows of the given (`OrderedDict` of) array or
`DataFrame`.
Raises:
TypeError: `data` is not a Pandas `DataFrame`, an `OrderedDict` of numpy
arrays, a numpy `ndarray`, or a generator producing these.
"""
with ops.name_scope(name):
if isinstance(data, np.ndarray):
types = [dtypes.int64, dtypes.as_dtype(data.dtype)]
queue_shapes = [(), data.shape[1:]]
get_feed_fn = _ArrayFeedFn
elif isinstance(data, collections.OrderedDict):
types = [dtypes.int64] + [
dtypes.as_dtype(col.dtype) for col in data.values()
]
queue_shapes = [()] + [col.shape[1:] for col in data.values()]
get_feed_fn = _OrderedDictNumpyFeedFn
elif isinstance(data, tp.FunctionType):
x_first_el = six.next(data())
x_first_keys = sorted(x_first_el.keys())
x_first_values = [x_first_el[key] for key in x_first_keys]
types = [dtypes.as_dtype(col.dtype) for col in x_first_values]
queue_shapes = [col.shape for col in x_first_values]
get_feed_fn = _GeneratorFeedFn
elif HAS_PANDAS and isinstance(data, pd.DataFrame):
types = [
dtypes.as_dtype(dt) for dt in [data.index.dtype] + list(data.dtypes)
]
queue_shapes = [() for _ in types]
get_feed_fn = _PandasFeedFn
else:
raise TypeError(
"data must be either a numpy array or pandas DataFrame if pandas is "
"installed; got {}".format(type(data).__name__))
# TODO(jamieas): TensorBoard warnings for all warnings below once available.
if num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with num_epochs and num_threads > 1. "
"num_epochs is applied per thread, so this will produce more "
"epochs than you probably intend. "
"If you want to limit epochs, use one thread.")
if shuffle and num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with shuffle=True, num_threads > 1, and "
"num_epochs. This will create multiple threads, all reading the "
"array/dataframe in order adding to the same shuffling queue; the "
"results will likely not be sufficiently shuffled.")
if not shuffle and num_threads > 1:
logging.warning(
"enqueue_data was called with shuffle=False and num_threads > 1. "
"This will create multiple threads, all reading the "
"array/dataframe in order. If you want examples read in order, use"
" one thread; if you want multiple threads, enable shuffling.")
if shuffle:
min_after_dequeue = int(capacity / 4 if min_after_dequeue is None else
min_after_dequeue)
queue = data_flow_ops.RandomShuffleQueue(
capacity,
min_after_dequeue,
dtypes=types,
shapes=queue_shapes,
seed=seed)
else:
min_after_dequeue = 0 # just for the summary text
queue = data_flow_ops.FIFOQueue(
capacity, dtypes=types, shapes=queue_shapes)
enqueue_ops = []
feed_fns = []
for i in range(num_threads):
# Note the placeholders have no shapes, so they will accept any
# enqueue_size. enqueue_many below will break them up.
placeholders = [array_ops.placeholder(t) for t in types]
enqueue_ops.append(queue.enqueue_many(placeholders))
seed_i = None if seed is None else (i + 1) * seed
feed_fns.append(
get_feed_fn(
placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs))
runner = fqr._FeedingQueueRunner( # pylint: disable=protected-access
queue=queue, enqueue_ops=enqueue_ops, feed_fns=feed_fns)
queue_runner.add_queue_runner(runner)
full = (math_ops.cast(
math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) * (1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = ("queue/%sfraction_over_%d_of_%d_full" %
(queue.name, min_after_dequeue,
capacity - min_after_dequeue))
summary.scalar(summary_name, full)
return queue
| apache-2.0 |
RomainBrault/scikit-learn | examples/neighbors/plot_regression.py | 349 | 1402 | """
============================
Nearest Neighbors regression
============================
Demonstrate the resolution of a regression problem
using a k-Nearest Neighbor and the interpolation of the
target using both barycenter and constant weights.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause (C) INRIA
###############################################################################
# Generate sample data
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
np.random.seed(0)
X = np.sort(5 * np.random.rand(40, 1), axis=0)
T = np.linspace(0, 5, 500)[:, np.newaxis]
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 1 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
n_neighbors = 5
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(T)
plt.subplot(2, 1, i + 1)
plt.scatter(X, y, c='k', label='data')
plt.plot(T, y_, c='g', label='prediction')
plt.axis('tight')
plt.legend()
plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors,
weights))
plt.show()
| bsd-3-clause |
ecell/epdp_ecell4_impl_temporary | samples/reversible/plot.py | 3 | 2585 | #!/usr/bin/env/python
# PYTHONPATH=../.. python plot.py rev.-2.out p_rev.-2.tsv 0.000000125 rev.-1.out p_rev.-1.tsv 0.00000125 rev.0.out p_rev.0.tsv 0.0000125 rev.1.out p_rev.1.tsv 0.000125 rev.2.out p_rev.2.tsv 0.00125 rev.3.out p_rev.3.tsv 0.0125
# rev.-3.out p_rev.-3.tsv 0.0000000125
import sys
import numpy
import scipy.io
from matplotlib.pylab import *
#import _gfrd
infilename = sys.argv[1]
N_A = 6.0221367e23
sigma = 5e-9
#r0 = sigma
D_tot = 2e-12
#kf = 10 * sigma * D
tau = sigma*sigma / D_tot
rmin = sigma
def load_data(filename):
infile = open(filename)
data = array([float(x) for x in infile.read().split()], numpy.float)
infile.close()
return data
def plot_sol(filename, t):
rmax = 3.1 * math.sqrt(6 * D_tot * t) + rmin
data = scipy.io.read_array(filename)
rarray, parray = numpy.transpose(data)
mask = numpy.less_equal(rarray, rmax)
rarray = numpy.compress(mask, rarray)
parray = numpy.compress(mask, parray)
return loglog(rarray / sigma, parray * sigma, 'k-')[0]
def plot_hist(data, T, i):
bins = 30
nonreactions = numpy.compress(data >= sigma, data)
print 'max', max(nonreactions)
hist, r = numpy.histogram(numpy.log(nonreactions),
bins=bins)
r = r[:-1] # new numpy.histogram returns len(r)=len(hist)+1
histsum = hist.sum()
S_sim = float(len(nonreactions)) / len(data)
print 'S_sim', S_sim
hist = hist.astype(numpy.float)
r = numpy.concatenate([r, [r[-1] - r[-2]]])
r = numpy.exp(r)
xticks = r[1:]-r[:-1]
hist /= len(data) * xticks
r = r[:-1] + (xticks * .5)
#print 'x', x
#pStyles = ['o', '^', 'v', '<', '>', 's', '+']
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
loglog(r / sigma, hist * sigma, colors[i] + 'o',
label=r'$T = \tau^{%d}$' % round(math.log10(T/tau)))
if __name__ == '__main__':
axes([.14,.15,.84,.82])
for i in range(len(sys.argv[1:])/3):
simfilename = sys.argv[i*3+1]
solfilename = sys.argv[i*3+2]
T = float(sys.argv[i*3+3])
print simfilename,solfilename,T
data = load_data(simfilename)
plot_hist(data, T, i)
solline = plot_sol(solfilename, T)
xlabel(r'$r / \sigma$', size=28)
ylabel(r'$p_{rev}$', size=28)
xlim(0.9, 2.2e2)
ylim(2e-6, 2e1)
xticks([1, 10, 100], ['1', '10', '100'], size=22)
yticks(size=18)
solline.set_label(r'theory')
#legend(handlelen=0.02, pad=0.02,handletextsep=0.01, labelsep=0.001)
#grid()
savefig('rev.eps')
show()
| gpl-2.0 |
Sentient07/scikit-learn | sklearn/metrics/cluster/tests/test_bicluster.py | 394 | 1770 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| bsd-3-clause |
ideaplat/Tback | machinelearning/svmregression/model.py | 1 | 5636 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
from data.dbaccess import normalize
from data.db import get_db_session, Pinkunhu2015
class SVRRegressionModel(object):
""" 使用线性回归预测下一年人均年收入 """
# 提取的属性
features = [
'tv', 'washing_machine', 'fridge',
'reason', 'is_danger_house', 'is_back_poor', 'is_debt', 'standard',
'arable_land', 'debt_total', 'living_space', 'member_count',
'person_year_total_income', 'year_total_income',
'subsidy_total', 'wood_land', 'xin_nong_he_total', 'xin_yang_lao_total',
'call_number', 'bank_name', 'bank_number', 'help_plan'
]
# 验证的目标
target = 'ny_person_income'
def run(self):
""" 运行 """
# 获取数据
X, Y = self._fetch_data()
clf = self.get_classifier(X, Y)
# 测试
X, Y = self._fetch_test_data()
res = []
for item in range(11):
hit_ratio = self.predict(clf, X, Y, item * 0.1)
res.append([item * 0.1 * 100, hit_ratio * 100])
# 绘制误差与命中率的线性关系图
arr = np.array(res)
plt.plot(arr[:, 0], arr[:, 1]) # 绘制线
plt.plot(arr[:, 0], arr[:, 1], 'ro') # 绘制点
plt.xlabel('误差率(%)')
plt.ylabel('命中率(%)')
plt.title('使用线性回归预测下一年人均年收入效果图')
plt.show()
def get_classifier(self, X, Y):
""" 构建线性回归模型
:param X: 训练数据
:param Y: 训练数据结果
:return: 模型
"""
clf = SVR(kernel='linear')
clf.fit(X, Y)
return clf
def predict(self, clf, X, Y, deviation=0.1):
""" 用当前的模型预测
:param clf: 模型
:param X: 测试数据
:param Y: 测试数据结果
:param deviation: 允许误差率
:return: 命中率
"""
Y2 = clf.predict(X)
total, hit = len(Y), 0
for idx, v in enumerate(Y2):
if math.fabs(Y[idx] - v) <= math.fabs(Y[idx] * deviation): # 误差小于deviation,则认为预测准确
hit += 1
print 'Deviation: %d%%, Total: %d, Hit: %d, Precision: %.2f%%' % (100 * deviation, total, hit, 100.0*hit/total)
# 用 A县 的模型去预测 B县 的结果
# Deviation: 0%, Total: 40820, Hit: 0, Precision: 0.00%
# Deviation: 10%, Total: 40820, Hit: 24418, Precision: 59.82%
# Deviation: 20%, Total: 40820, Hit: 32935, Precision: 80.68%
# Deviation: 30%, Total: 40820, Hit: 36211, Precision: 88.71%
# Deviation: 40%, Total: 40820, Hit: 37367, Precision: 91.54%
# Deviation: 50%, Total: 40820, Hit: 38041, Precision: 93.19%
# Deviation: 60%, Total: 40820, Hit: 38502, Precision: 94.32%
# Deviation: 70%, Total: 40820, Hit: 38816, Precision: 95.09%
# Deviation: 80%, Total: 40820, Hit: 39071, Precision: 95.72%
# Deviation: 90%, Total: 40820, Hit: 39282, Precision: 96.23%
# Deviation: 100%, Total: 40820, Hit: 39432, Precision: 96.60%
return hit * 1.0 / total
def _fetch_data(self):
""" 获取建模数据 """
session = get_db_session()
objs = session.query(Pinkunhu2015).filter(
Pinkunhu2015.county == 'A县', Pinkunhu2015.ny_person_income != -1,
Pinkunhu2015.person_year_total_income > 0, Pinkunhu2015.person_year_total_income < 7000,
).all()
X, Y = [], []
for item in objs:
col_list = []
for col in self.features:
normalized_value = normalize(col, getattr(item, col))
col_list.append(normalized_value)
X.append(col_list)
normalized_value = normalize(self.target, getattr(item, self.target))
Y.append(normalized_value)
# # 筛掉可能有错误的数据
# 人均年收入除以100后,查看分布,少于5次的不纳入模型, 效果不佳,废弃
# df = pd.DataFrame(X, columns=self.features)
# print '#df.shape:', df.shape
# df['person_year_total_income'] = df['person_year_total_income'] / 100
# df['person_year_total_income'] = df['person_year_total_income'].astype(int)
# df['person_year_total_income'] = df['person_year_total_income'] * 100
# df = df.groupby('person_year_total_income').filter(lambda x: len(x) > 5)
# print '#df.shape:', df.shape
# X, Y = df.loc[:, self.features[:-1]], df.loc[:, self.target]
return X, Y
def _fetch_test_data(self):
""" 获取测试数据 """
session = get_db_session()
objs = session.query(Pinkunhu2015).filter(
Pinkunhu2015.county == 'B县', Pinkunhu2015.ny_person_income != -1,
Pinkunhu2015.person_year_total_income > 0, Pinkunhu2015.person_year_total_income < 7000,
).all()
X, Y = [], []
for item in objs:
col_list = []
for col in self.features:
normalized_value = normalize(col, getattr(item, col))
col_list.append(normalized_value)
X.append(col_list)
normalized_value = normalize(self.target, getattr(item, self.target))
Y.append(normalized_value)
return X, Y
if __name__ == '__main__':
m = SVRRegressionModel()
m.run()
| mit |
mncmilan/CUBA | free_mode_with_ML.py | 1 | 5037 | # free_mode_with_ML.py
# Mónica Milán (@mncmilan)
# mncmilan@gmail.com
# http://steelhummingbird.blogspot.com.es/
# This code obtains acceleration data from eZ430-Chronos watch by Texas Instruments, then it eliminates the noise in X
# and Y axis and finally it plots the resulting values. It also uses machine learning in order to detect a clap, the
# background color of the graph.
import matplotlib.pyplot as plt
import time
import warnings
import numpy as np
from libs import communications, filterings, graphics, datalog
from sklearn.preprocessing import normalize
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
warnings.filterwarnings("ignore", category=DeprecationWarning)
communication = communications.CommunicationManager()
filtering = filterings.FilteringManager()
graphic = graphics.GraphicsManager()
report = datalog.DatalogManager()
# Load dataset
train_digits = np.genfromtxt('data/clapping.csv', delimiter = ';')
train_data = train_digits[:, :-1]
train_labels = train_digits[:, -1]
train_data=normalize(train_data)
# Create a classifier
classifier = KNeighborsClassifier()
#classifier = SVC(gamma=0.001, probability=True)
#classifier = DecisionTreeClassifier(random_state=0)
#classifier = RandomForestClassifier(n_estimators = 100)
# Train the classifier
classifier.fit(train_data, train_labels)
class FreeMovementML():
communication.open_serial_port()
max_samples = 10
watch_samples_counter = -1
save_into_file = True
lower_index = 0
higher_index = 30
snapfingers = 0
old_prediction=0
x_axis_acceleration = []
y_axis_acceleration = []
z_axis_acceleration = []
test_digits = []
time_limit = 60 # Datalog time
report.create_file('probabilities.txt')
report.create_file('dataPython.txt')
report.create_file('clapping.txt')
report.record_data('dataPython.txt','t', 'x', 'y', 'z')
k = 0
t1=0
time_initial = time.time()
graphic.set_plot_parameters()
while time.time() - time_initial <= time_limit:
time_final = time.time()
bytes_to_read = communication.send_data_request()
inbyte = communication.read_data(bytes_to_read)
if (bytes_to_read == 7 and inbyte[3] == 1) or (bytes_to_read == 14 and inbyte[10] == 1):
watch_samples_counter += 1
x_axis_acceleration.append(inbyte[bytes_to_read-3])
filtering.filter_acceleration(x_axis_acceleration, watch_samples_counter)
y_axis_acceleration.append(inbyte[bytes_to_read-2])
filtering.filter_acceleration(y_axis_acceleration, watch_samples_counter)
z_axis_acceleration.append(inbyte[bytes_to_read-1])
filtering.filter_acceleration(z_axis_acceleration, watch_samples_counter)
time_final = time.time()
report.record_data('dataPython.txt',time_final - time_initial, x_axis_acceleration[watch_samples_counter],
y_axis_acceleration[watch_samples_counter], z_axis_acceleration[watch_samples_counter])
report.record_for_training(x_axis_acceleration[watch_samples_counter],
y_axis_acceleration[watch_samples_counter],
z_axis_acceleration[watch_samples_counter])
test_digits = test_digits + [x_axis_acceleration[watch_samples_counter],
y_axis_acceleration[watch_samples_counter],
z_axis_acceleration[watch_samples_counter]]
if watch_samples_counter>=9 and higher_index <= len(test_digits):
# Load the dataset
test_data = normalize(test_digits[lower_index:higher_index])
# Predict values
test_predicted = classifier.predict(test_data)
test_probabilities = classifier.predict_proba(test_data)
report.record_probabilities(int(test_predicted[0]), test_probabilities,test_digits[lower_index:higher_index])
if test_predicted == 1 and old_prediction==0:
snapfingers += 1
graphic.change_color()
lower_index += 15
higher_index += 15
print(snapfingers)
t1=time.time()
else:
print(time.time()-t1)
graphic.restore_color()
lower_index += 3
higher_index += 3
old_prediction=test_predicted
graphic.plot_data(x_axis_acceleration[watch_samples_counter], y_axis_acceleration[watch_samples_counter])
plt.pause(0.05) # 50m
report.next_line()
k +=1 #communication.close_serial_port()
| apache-2.0 |
ArtjomIASM/MultitankGUI | MultiTankGui_one_level/graphcanvas.py | 1 | 5456 | import traceback, sys
import matplotlib
import numpy as np
import random
matplotlib.use("Qt4Agg")
from matplotlib.figure import Figure
from matplotlib.animation import TimedAnimation
from matplotlib.lines import Line2D
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from time import time
from threading import Lock
from matplotlib.ticker import FuncFormatter
from config import *
class GraphCanvas(FigureCanvas, TimedAnimation):
TAIL_LEN_SEC = 2
TIME_MARGIN = 5
def __init__(self, graphXLimit, graphYLimit, graphYLevelMin):
print("Using: matplotlib v." + matplotlib.__version__)
timeSpanFormatter = FuncFormatter(self.formatTime)
self.graphXLimit = graphXLimit
self.graphYLimit = graphYLimit
self.graphYLevelMin = graphYLevelMin
self.valuesXYP = ([0.0], [0.0], -1)
self.dataLock = Lock()
# The window
self.fig = Figure(dpi=100)
self.ax1 = self.fig.add_subplot(111)
# self.ax1 settings
self.ax1.set_xlabel('Time')
self.ax1.set_ylabel('Level')
self.ax1.grid(True)
self.ax1.xaxis.set_major_formatter(timeSpanFormatter)
self.line1 = Line2D([], [], color='blue')
self.line1_tail = Line2D([], [], color='red', linewidth=2)
self.line1_head = Line2D([], [], color='red', marker='o', markeredgecolor='r')
self.ax1.add_line(self.line1)
self.ax1.add_line(self.line1_tail)
self.ax1.add_line(self.line1_head)
self.ax1.set_xlim(0, self.graphXLimit - 1) # X axis range
self.ax1.set_ylim(self.graphYLevelMin, self.graphYLimit - 1) # Y axis range
self.line_setPoint = None
FigureCanvas.__init__(self, self.fig)
TimedAnimation.__init__(self, self.fig, interval=TIMED_INTERVAL, blit=False)
# Graph interval for delay in millisecond / Change it if you have weak cpu
# Set blit = True if need more cpu %
def closeEvent(self, event):
# Stop the animation loop on exit
FigureCanvas.closeEvent(self, event)
self._stop()
event.accept()
def setData(self, valuesX, valuesY, setPoint):
# To avoid manipulations with partially updated data, do update under lock
self.dataLock.acquire(True)
try:
self.valuesXYP = (valuesX, valuesY, setPoint)
finally:
self.dataLock.release()
def getData(self):
self.dataLock.acquire(True)
try:
return self.valuesXYP
finally:
self.dataLock.release()
def _init_draw(self):
lines = [self.line1, self.line1_tail, self.line1_head]
for line2D in lines:
line2D.set_data([], [])
if None != self.line_setPoint:
self.line_setPoint.set_data([], [])
def _step(self, *args):
try: # Extends the _step() method for the TimedAnimation class.
TimedAnimation._step(self, *args)
except Exception as e:
print("Failed to execute animation. Animation stopped!")
traceback.print_exc(file=sys.stdout)
TimedAnimation._stop(self)
pass
def new_frame_seq(self):
return iter(range(self.valuesXYP[0].__len__()))
def _draw_frame(self, framedata): # Draw graph line itself
# Print random.uniform(0, 10) # Check if drawing closed
valuesX, valuesY, setPoint = self.getData()
tailXY = (valuesX, valuesY)
self.line1.set_data(valuesX, valuesY)
lastTimeValue = valuesX[len(valuesX) - 1]
tailEndTime = lastTimeValue - self.TAIL_LEN_SEC
for i in reversed(range(len(valuesX))):
if valuesX[i] < tailEndTime:
tailStartIdx = min(i + 1, len(valuesX))
tailEndIndex = len(valuesX)
if (tailStartIdx == tailEndIndex and len(valuesX) > 1):
tailStartIdx = len(valuesX) - 2
tailXY = (valuesX[tailStartIdx: tailEndIndex], valuesY[tailStartIdx: tailEndIndex])
break
self.line1_tail.set_data(tailXY)
self.line1_head.set_data([valuesX[len(valuesX) - 1]], [valuesY[len(valuesY) - 1]])
lastTimeValueRounded = round(lastTimeValue, 0)
# Maximum and minimum values for x
xMin = max(0, lastTimeValueRounded - self.graphXLimit + self.TIME_MARGIN)
xMax = max(self.graphXLimit, lastTimeValueRounded + self.TIME_MARGIN)
self.ax1.set_xlim(xMin, xMax) # X axis range
# Create set point line
if self.line_setPoint is None and -1 != setPoint:
self.line_setPoint = Line2D([], [], color='black')
self.ax1.add_line(self.line_setPoint)
lstArtists = [self.line1, self.line1_tail, self.line1_head, self.ax1]
if None != self.line_setPoint:
self.line_setPoint.set_data([xMin, xMax], [setPoint, setPoint])
lstArtists.append(self.line_setPoint)
self._drawn_artists = lstArtists
def formatTime(self, value, position):
nHours, nRemainder = divmod(int(value), 3600)
nMinutes, nSeconds = divmod(nRemainder, 60)
if (nHours > 0):
return '%02d:02%d:02%d' % (nHours, nMinutes, nSeconds)
else:
return '%02d:%02d' % (nMinutes, nSeconds)
| mit |
procoder317/scikit-learn | sklearn/externals/joblib/parallel.py | 79 | 35628 | """
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
from __future__ import division
import os
import sys
import gc
import warnings
from math import sqrt
import functools
import time
import threading
import itertools
from numbers import Integral
try:
import cPickle as pickle
except:
import pickle
from ._multiprocessing_helpers import mp
if mp is not None:
from .pool import MemmapingPool
from multiprocessing.pool import ThreadPool
from .format_stack import format_exc, format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_kbytes
from ._compat import _basestring
VALID_BACKENDS = ['multiprocessing', 'threading']
# Environment variables to protect against bad situations when nesting
JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__"
# In seconds, should be big enough to hide multiprocessing dispatching
# overhead.
# This settings was found by running benchmarks/bench_auto_batching.py
# with various parameters on various platforms.
MIN_IDEAL_BATCH_DURATION = .2
# Should not be too high to avoid stragglers: long jobs running alone
# on a single worker while other workers have no work to process any more.
MAX_IDEAL_BATCH_DURATION = 2
# Under Python 3.4+ use the 'forkserver' start method by default: this makes it
# possible to avoid crashing 3rd party libraries that manage an internal thread
# pool that does not tolerate forking
if hasattr(mp, 'get_start_method'):
method = os.environ.get('JOBLIB_START_METHOD')
if (method is None and mp.get_start_method() == 'fork'
and 'forkserver' in mp.get_all_start_methods()):
method = 'forkserver'
DEFAULT_MP_CONTEXT = mp.get_context(method=method)
else:
DEFAULT_MP_CONTEXT = None
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice):
self.items = list(iterator_slice)
self._size = len(self.items)
def __call__(self):
return [func(*args, **kwargs) for func, args, kwargs in self.items]
def __len__(self):
return self._size
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count():
""" Return the number of CPUs.
"""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
class WorkerInterrupt(Exception):
""" An exception that is not KeyboardInterrupt to allow subprocesses
to be interrupted.
"""
pass
###############################################################################
class SafeFunction(object):
""" Wraps a function to make it exception with full traceback in
their representation.
Useful for parallel computing with multiprocessing, for which
exceptions cannot be captured.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt()
except:
e_type, e_value, e_tb = sys.exc_info()
text = format_exc(e_type, e_value, e_tb, context=10,
tb_offset=1)
if issubclass(e_type, TransportableException):
raise
else:
raise TransportableException(text, e_type)
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class ImmediateComputeBatch(object):
"""Sequential computation of a batch of tasks.
This replicates the async computation API but actually does not delay
the computations when joblib.Parallel runs in sequential mode.
"""
def __init__(self, batch):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = batch()
def get(self):
return self.results
###############################################################################
class BatchCompletionCallBack(object):
"""Callback used by joblib.Parallel's multiprocessing backend.
This callable is executed by the parent process whenever a worker process
has returned the results of a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
"""
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
self.parallel.n_completed_tasks += self.batch_size
this_batch_duration = time.time() - self.dispatch_timestamp
if (self.parallel.batch_size == 'auto'
and self.batch_size == self.parallel._effective_batch_size):
# Update the smoothed streaming estimate of the duration of a batch
# from dispatch to completion
old_duration = self.parallel._smoothed_batch_duration
if old_duration == 0:
# First record of duration for this batch size after the last
# reset.
new_duration = this_batch_duration
else:
# Update the exponentially weighted average of the duration of
# batch for the current effective size.
new_duration = 0.8 * old_duration + 0.2 * this_batch_duration
self.parallel._smoothed_batch_duration = new_duration
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int, default: 1
The maximum number of concurrently running jobs, such as the number
of Python worker processes when backend="multiprocessing"
or the size of the thread-pool when backend="threading".
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend: str or None, default: 'multiprocessing'
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the multiprocessing workers shoud never starve.
batch_size: int or 'auto', default: 'auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, multiprocessing
can be slower than sequential computation because of the overhead.
Batching fast computations together can mitigate this.
The ``'auto'`` strategy keeps track of the time it takes for a batch
to complete, and dynamically adjusts the batch size to keep the time
on the order of half a second, using a heuristic. The initial batch
size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes int, str, or None, optional, 1M by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages::
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process::
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called a 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages::
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend='multiprocessing', verbose=0,
pre_dispatch='2 * n_jobs', batch_size='auto',
temp_folder=None, max_nbytes='1M', mmap_mode='r'):
self.verbose = verbose
self._mp_context = DEFAULT_MP_CONTEXT
if backend is None:
# `backend=None` was supported in 0.8.2 with this effect
backend = "multiprocessing"
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._mp_context = backend
backend = "multiprocessing"
if backend not in VALID_BACKENDS:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, VALID_BACKENDS))
self.backend = backend
self.n_jobs = n_jobs
if (batch_size == 'auto'
or isinstance(batch_size, Integral) and batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
self.pre_dispatch = pre_dispatch
self._temp_folder = temp_folder
if isinstance(max_nbytes, _basestring):
self._max_nbytes = 1024 * memstr_to_kbytes(max_nbytes)
else:
self._max_nbytes = max_nbytes
self._mmap_mode = mmap_mode
# Not starting the pool in the __init__ is a design decision, to be
# able to close it ASAP, and not burden the user with closing it
# unless they choose to use the context manager API with a with block.
self._pool = None
self._output = None
self._jobs = list()
self._managed_pool = False
# This lock is used coordinate the main thread of this process with
# the async callback thread of our the pool.
self._lock = threading.Lock()
def __enter__(self):
self._managed_pool = True
self._initialize_pool()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._terminate_pool()
self._managed_pool = False
def _effective_n_jobs(self):
n_jobs = self.n_jobs
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
elif mp is None or n_jobs is None:
# multiprocessing is not available or disabled, fallback
# to sequential mode
return 1
elif n_jobs < 0:
n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
return n_jobs
def _initialize_pool(self):
"""Build a process or thread pool and return the number of workers"""
n_jobs = self._effective_n_jobs()
# The list of exceptions that we will capture
self.exceptions = [TransportableException]
if n_jobs == 1:
# Sequential mode: do not use a pool instance to avoid any
# useless dispatching overhead
self._pool = None
elif self.backend == 'threading':
self._pool = ThreadPool(n_jobs)
elif self.backend == 'multiprocessing':
if mp.current_process().daemon:
# Daemonic processes cannot have children
self._pool = None
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' setting n_jobs=1',
stacklevel=3)
return 1
elif threading.current_thread().name != 'MainThread':
# Prevent posix fork inside in non-main posix threads
self._pool = None
warnings.warn(
'Multiprocessing backed parallel loops cannot be nested'
' below threads, setting n_jobs=1',
stacklevel=3)
return 1
else:
already_forked = int(os.environ.get(JOBLIB_SPAWNED_PROCESS, 0))
if already_forked:
raise ImportError('[joblib] Attempting to do parallel computing '
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect your main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information'
)
# Set an environment variable to avoid infinite loops
os.environ[JOBLIB_SPAWNED_PROCESS] = '1'
# Make sure to free as much memory as possible before forking
gc.collect()
poolargs = dict(
max_nbytes=self._max_nbytes,
mmap_mode=self._mmap_mode,
temp_folder=self._temp_folder,
verbose=max(0, self.verbose - 50),
context_id=0, # the pool is used only for one call
)
if self._mp_context is not None:
# Use Python 3.4+ multiprocessing context isolation
poolargs['context'] = self._mp_context
self._pool = MemmapingPool(n_jobs, **poolargs)
# We are using multiprocessing, we also want to capture
# KeyboardInterrupts
self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt])
else:
raise ValueError("Unsupported backend: %s" % self.backend)
return n_jobs
def _terminate_pool(self):
if self._pool is not None:
self._pool.close()
self._pool.terminate() # terminate does a join()
self._pool = None
if self.backend == 'multiprocessing':
os.environ.pop(JOBLIB_SPAWNED_PROCESS, 0)
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
if self._pool is None:
job = ImmediateComputeBatch(batch)
self._jobs.append(job)
self.n_dispatched_batches += 1
self.n_dispatched_tasks += len(batch)
self.n_completed_tasks += len(batch)
if not _verbosity_filter(self.n_dispatched_batches, self.verbose):
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(time.time() - self._start_time)
))
else:
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
job = self._pool.apply_async(SafeFunction(batch), callback=cb)
self._jobs.append(job)
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self.batch_size == 'auto' and self.backend == 'threading':
# Batching is never beneficial with the threading backend
batch_size = 1
elif self.batch_size == 'auto':
old_batch_size = self._effective_batch_size
batch_duration = self._smoothed_batch_duration
if (batch_duration > 0 and
batch_duration < MIN_IDEAL_BATCH_DURATION):
# The current batch size is too small: the duration of the
# processing of a batch of task is not large enough to hide
# the scheduling overhead.
ideal_batch_size = int(
old_batch_size * MIN_IDEAL_BATCH_DURATION / batch_duration)
# Multiply by two to limit oscilations between min and max.
batch_size = max(2 * ideal_batch_size, 1)
self._effective_batch_size = batch_size
if self.verbose >= 10:
self._print("Batch computation too fast (%.4fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
elif (batch_duration > MAX_IDEAL_BATCH_DURATION and
old_batch_size >= 2):
# The current batch size is too big. If we schedule overly long
# running batches some CPUs might wait with nothing left to do
# while a couple of CPUs a left processing a few long running
# batches. Better reduce the batch size a bit to limit the
# likelihood of scheduling such stragglers.
self._effective_batch_size = batch_size = old_batch_size // 2
if self.verbose >= 10:
self._print("Batch computation too slow (%.2fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
else:
# No batch size adjustment
batch_size = old_batch_size
if batch_size != old_batch_size:
# Reset estimation of the smoothed mean batch duration: this
# estimate is updated in the multiprocessing apply_async
# CallBack as long as the batch_size is constant. Therefore
# we need to reset the estimate whenever we re-tune the batch
# size.
self._smoothed_batch_duration = 0
else:
# Fixed batch size strategy
batch_size = self.batch_size
with self._lock:
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
if not tasks:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _print(self, msg, msg_args):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# This is heuristic code to print only 'verbose' times a messages
# The challenge is that we may not know the queue length
if self._original_iterator:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(elapsed_time),
))
else:
index = self.n_dispatched_batches
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1
- self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / (index + 1) *
(self.n_dispatched_tasks - index - 1.))
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index + 1,
total_tasks,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job list can be filling up as
# we empty it and Python list are not thread-safe by default hence
# the use of the lock
with self._lock:
job = self._jobs.pop(0)
try:
self._output.extend(job.get())
except tuple(self.exceptions) as exception:
# Stop dispatching any new job in the async callback thread
self._aborting = True
if isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (this_report, exception.message)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
exception = exception_type(report)
# Kill remaining running processes without waiting for
# the results as we will raise the exception we got back
# to the caller instead of returning any result.
with self._lock:
self._terminate_pool()
if self._managed_pool:
# In case we had to terminate a managed pool, let
# us start a new one to ensure that subsequent calls
# to __call__ on the same Parallel instance will get
# a working pool as they expect.
self._initialize_pool()
raise exception
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
if not self._managed_pool:
n_jobs = self._initialize_pool()
else:
n_jobs = self._effective_n_jobs()
if self.batch_size == 'auto':
self._effective_batch_size = 1
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all' or n_jobs == 1:
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
iterator = itertools.islice(iterator, pre_dispatch)
self._start_time = time.time()
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
self._smoothed_batch_duration = 0.0
try:
self._iterating = True
while self.dispatch_one_batch(iterator):
pass
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output), len(self._output),
short_format_time(elapsed_time)))
finally:
if not self._managed_pool:
self._terminate_pool()
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
| bsd-3-clause |
kagayakidan/scikit-learn | sklearn/tests/test_discriminant_analysis.py | 19 | 11711 | try:
# Python 2 compat
reload
except NameError:
# Regular Python 3+ import
from importlib import reload
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
# Data is just 9 separable points in the plane
X6 = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y6 = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y7 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X7 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8, 3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct
# values for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = LinearDiscriminantAnalysis(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = LinearDiscriminantAnalysis(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_priors():
# Test priors (negative priors)
priors = np.array([0.5, -0.5])
clf = LinearDiscriminantAnalysis(priors=priors)
msg = "priors must be non-negative"
assert_raise_message(ValueError, msg, clf.fit, X, y)
# Test that priors passed as a list are correctly handled (run to see if
# failure)
clf = LinearDiscriminantAnalysis(priors=[0.5, 0.5])
clf.fit(X, y)
# Test that priors always sum to 1
priors = np.array([0.5, 0.6])
prior_norm = np.array([0.45, 0.55])
clf = LinearDiscriminantAnalysis(priors=priors)
clf.fit(X, y)
assert_array_almost_equal(clf.priors_, prior_norm, 2)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_lsqr = LinearDiscriminantAnalysis(solver="lsqr")
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = LinearDiscriminantAnalysis(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_explained_variance_ratio():
# Test if the sum of the normalized eigen vectors values equals 1
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_eigen.fit(X, y)
assert_almost_equal(clf_lda_eigen.explained_variance_ratio_.sum(), 1.0, 3)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = LinearDiscriminantAnalysis(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = LinearDiscriminantAnalysis(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
assert_array_equal(y_pred, y6)
# Assure that it works with 1D data
y_pred1 = clf.fit(X7, y6).predict(X7)
assert_array_equal(y_pred1, y6)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X7)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y6)
y_log_proba_pred1 = clf.predict_log_proba(X7)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X6, y7).predict(X6)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y7))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X6, y4)
def test_qda_priors():
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = QuadraticDiscriminantAnalysis(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X6, y6).predict(X6)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = QuadraticDiscriminantAnalysis().fit(X6, y6)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = QuadraticDiscriminantAnalysis().fit(X6, y6, store_covariances=True)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = QuadraticDiscriminantAnalysis()
with ignore_warnings():
y_pred = clf.fit(X2, y6).predict(X2)
assert_true(np.any(y_pred != y6))
# adding a little regularization fixes the problem
clf = QuadraticDiscriminantAnalysis(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y6)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y6)
# Case n_samples_in_a_class < n_features
clf = QuadraticDiscriminantAnalysis(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
def test_deprecated_lda_qda_deprecation():
def import_lda_module():
import sklearn.lda
# ensure that we trigger DeprecationWarning even if the sklearn.lda
# was loaded previously by another test.
reload(sklearn.lda)
return sklearn.lda
lda = assert_warns(DeprecationWarning, import_lda_module)
assert lda.LDA is LinearDiscriminantAnalysis
def import_qda_module():
import sklearn.qda
# ensure that we trigger DeprecationWarning even if the sklearn.qda
# was loaded previously by another test.
reload(sklearn.qda)
return sklearn.qda
qda = assert_warns(DeprecationWarning, import_qda_module)
assert qda.QDA is QuadraticDiscriminantAnalysis
| bsd-3-clause |
sellberg/SACLA2016A8015 | scripts/06_plot_WAXS.py | 2 | 2304 | #!/home/software/SACLA_tool/bin/python2.7
import numpy as np
import h5py
import matplotlib
import matplotlib.pyplot as plt
import argparse
import time
#import pandas as pd
import sys
# -- default parameters
run = 448571#448539
nshots = 100
threshold = 40
# -- folders
file_folder = '/UserData/fperakis/2016_6/run%d/'%(run)
src_folder = '/home/fperakis/2016_06/git/SACLA2016A8015/src/'
fig_folder = '/home/fperakis/2016_06/figs/'
# -- filenames and paths
file_name = '%d.h5'%(run)
file_path = file_folder+file_name
sys.path.insert(0, src_folder)
from img_class import *
# -- import data
fh5 = h5py.File(file_path, 'r')
#h5keys = fh5.keys()
run_key = [ k for k in fh5.keys() if k.startswith('run_') ][0]
#print fh5,run_key
#tags = fh5['/%s/detector_2d_assembled_1'%run_key].keys()[1:]
tags = fh5['/%s/detector_2d_1'%run_key].keys()[1:]
#gp = fh5.get(h5keys[1])
#subgp = gp.items()
#print subgp
# -- image generator
num_im = len(tags)
img_gen = ( fh5['%s/detector_2d_1/%s/detector_data'%(run_key,tag) ].value for tag in tags )
num_im = len(tags)
mean_int = np.zeros(num_im)
#mean_int = np.zeros(num_im)
# -- get first image
im1 = img_gen.next()
# -- make mask
#mask = np.ones(im1.shape)
#mask[im1>threshold]=0
# -- loop over shots
im = img_gen.next()#im1*mask
i=0
for im_next in img_gen:
#for ishot in range(nshots):#img_gen:
#im_next = img_gen.next()#img_gen[ishot]
t1 = time.time()
#mean_int[i] = np.average(im_next[im_next<threshold].flatten())
mean_int[i] = np.average(im_next.flatten())#[im_next<threshold].flatten())
im += im_next#*mask
i += 1
print 'R.%d | S.%d/%.d | %.1f Hz'%(run,i,num_im,1.0/(time.time() - t1))
im /= num_im
# -- run mean
#total_mean = np.average(mean_int)
# -- histogram
bi,bf,db = 5,30,0.1# min(sum_int),max(sum_int),1#0,1e6,1
hy,hx = np.histogram(mean_int,bins = np.arange(bi,bf,db))
# -- hit rate
# -- plot
plt.figure()
plt.subplot(2,1,1)
plt.imshow(np.rot90(im),vmax=24,vmin=12)
plt.colorbar()
plt.title('r.%d'%(run))
plt.subplot(2,1,2)
plt.bar(hx[:-1]-db/2.,hy,width = db)
plt.yscale('log',nonposy='clip')
plt.xlabel('mean ADUs per shot')
plt.ylabel('number of shots')
#plt.title('hit rate: %.3f percent'%(hit_rate))
plt.tight_layout()
# -- save figure
plt.savefig(fig_folder+'run%d.png'%(run))
| bsd-2-clause |
pratapvardhan/scikit-learn | sklearn/utils/estimator_checks.py | 17 | 56571 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.decomposition import NMF, ProjectedGradientNMF
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import DataConversionWarning
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from sklearn.utils.fixes import signature
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'GaussianProcessRegressor',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
# Estimators with deprecated transform methods. Should be removed in 0.19 when
# _LearntSelectorMixin is removed.
DEPRECATED_TRANSFORM = [
"RandomForestClassifier", "RandomForestRegressor", "ExtraTreesClassifier",
"ExtraTreesRegressor", "DecisionTreeClassifier",
"DecisionTreeRegressor", "ExtraTreeClassifier", "ExtraTreeRegressor",
"LinearSVC", "SGDClassifier", "SGDRegressor", "Perceptron",
"LogisticRegression", "LogisticRegressionCV",
"GradientBoostingClassifier", "GradientBoostingRegressor"]
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classifiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
yield check_classifiers_regression_target
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def check_supervised_y_no_nan(name, Estimator):
# Checks that the Estimator targets are not NaN.
rng = np.random.RandomState(888)
X = rng.randn(10, 5)
y = np.ones(10) * np.inf
y = multioutput_estimator_convert_y_2d(name, y)
errmsg = "Input contains NaN, infinity or a value too large for " \
"dtype('float64')."
try:
Estimator().fit(X, y)
except ValueError as e:
if str(e) != errmsg:
raise ValueError("Estimator {0} raised warning as expected, but "
"does not match expected error message"
.format(name))
else:
raise ValueError("Estimator {0} should have raised error on fitting "
"array y with NaN value.".format(name))
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
yield check_supervised_y_no_nan
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
if name != "GaussianProcessRegressor":
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
if name not in DEPRECATED_TRANSFORM:
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
yield check_fit2d_predict1d
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d_1feature
yield check_fit1d_1sample
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check. Estimator is a class object (not an instance).
"""
name = Estimator.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_testing_parameters(estimator):
# set parameters to speed up some estimators and
# avoid deprecated behaviour
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
warnings.simplefilter("ignore", ConvergenceWarning)
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
# NMF
if estimator.__class__.__name__ == 'NMF':
estimator.set_params(max_iter=100)
# MLP
if estimator.__class__.__name__ in ['MLPClassifier', 'MLPRegressor']:
estimator.set_params(max_iter=100)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if "decision_function_shape" in params:
# SVC
estimator.set_params(decision_function_shape='ovo')
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
if isinstance(estimator, NMF):
if not isinstance(estimator, ProjectedGradientNMF):
estimator.set_params(solver='cd')
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_testing_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_testing_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if (hasattr(estimator, "transform") and
name not in DEPRECATED_TRANSFORM):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
@ignore_warnings
def check_fit2d_predict1d(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
try:
assert_warns(DeprecationWarning,
getattr(estimator, method), X[0])
except ValueError:
pass
@ignore_warnings
def check_fit2d_1sample(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit2d_1feature(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1feature(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1sample(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = np.array([1])
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_testing_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
# fit_transform method should work on non fitted estimator
transformer_clone = clone(transformer)
X_pred = transformer_clone.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
assert_equal(len(X_pred2), n_samples)
assert_equal(len(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
if name in DEPRECATED_TRANSFORM:
funcs = ["score"]
else:
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
if name in DEPRECATED_TRANSFORM:
funcs = ["fit", "score", "partial_fit", "fit_predict"]
else:
funcs = [
"fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = [p.name for p in signature(func).parameters.values()]
assert_true(args[1] in ["y", "Y"],
"Expected y or Y as second argument for method "
"%s of %s. Got arguments: %r."
% (func_name, Estimator.__name__, args))
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
if name in DEPRECATED_TRANSFORM:
methods = ["predict", "decision_function", "predict_proba"]
else:
methods = [
"predict", "transform", "decision_function", "predict_proba"]
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in methods:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_testing_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = ("0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* "
"is required.")
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
# Checks that Estimator X's do not contain NaN or inf.
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if (hasattr(estimator, "transform") and
name not in DEPRECATED_TRANSFORM):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
@ignore_warnings
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
if name in DEPRECATED_TRANSFORM:
check_methods = ["predict", "decision_function", "predict_proba"]
else:
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_random_state(estimator)
set_testing_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
if not hasattr(alg, 'partial_fit'):
# check again as for mlp this depends on algorithm
return
set_testing_parameters(alg)
try:
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
except NotImplementedError:
return
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_testing_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_testing_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
@ignore_warnings # Warnings are raised by decision function
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_testing_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_testing_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_testing_parameters(regressor_1)
set_testing_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_testing_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_testing_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the original estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_testing_parameters(estimator_1)
set_testing_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LinearDiscriminantAnalysis()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
def param_filter(p):
"""Identify hyper parameters of an estimator"""
return (p.name != 'self'
and p.kind != p.VAR_KEYWORD
and p.kind != p.VAR_POSITIONAL)
init_params = [p for p in signature(init).parameters.values()
if param_filter(p)]
except (TypeError, ValueError):
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they can need a non-default argument
init_params = init_params[1:]
for init_param in init_params:
assert_not_equal(init_param.default, init_param.empty,
"parameter %s for %s has no default value"
% (init_param.name, type(estimator).__name__))
assert_in(type(init_param.default),
[str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if init_param.name not in params.keys():
# deprecated parameter, not in get_params
assert_true(init_param.default is None)
continue
param_value = params[init_param.name]
if isinstance(param_value, np.ndarray):
assert_array_equal(param_value, init_param.default)
else:
assert_equal(param_value, init_param.default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if "MultiTask" in name:
return np.reshape(y, (-1, 1))
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteration
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = np.reshape(y_, (-1, 1))
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
# HuberRegressor depends on scipy.optimize.fmin_l_bfgs_b
# which doesn't return a n_iter for old versions of SciPy.
if not (name == 'HuberRegressor' and estimator.n_iter_ is None):
assert_greater_equal(estimator.n_iter_, 1)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater_equal(iter_, 1)
else:
assert_greater_equal(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV', 'RandomizedSearchCV', 'SelectFromModel'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
def check_classifiers_regression_target(name, Estimator):
# Check if classifier throws an exception when fed regression targets
boston = load_boston()
X, y = boston.data, boston.target
e = Estimator()
msg = 'Unknown label type: '
assert_raises_regex(ValueError, msg, e.fit, X, y)
| bsd-3-clause |
PrashntS/scikit-learn | examples/linear_model/plot_ard.py | 248 | 2622 | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="ARD estimate")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
sarahgrogan/scikit-learn | sklearn/externals/joblib/parallel.py | 79 | 35628 | """
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
from __future__ import division
import os
import sys
import gc
import warnings
from math import sqrt
import functools
import time
import threading
import itertools
from numbers import Integral
try:
import cPickle as pickle
except:
import pickle
from ._multiprocessing_helpers import mp
if mp is not None:
from .pool import MemmapingPool
from multiprocessing.pool import ThreadPool
from .format_stack import format_exc, format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_kbytes
from ._compat import _basestring
VALID_BACKENDS = ['multiprocessing', 'threading']
# Environment variables to protect against bad situations when nesting
JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__"
# In seconds, should be big enough to hide multiprocessing dispatching
# overhead.
# This settings was found by running benchmarks/bench_auto_batching.py
# with various parameters on various platforms.
MIN_IDEAL_BATCH_DURATION = .2
# Should not be too high to avoid stragglers: long jobs running alone
# on a single worker while other workers have no work to process any more.
MAX_IDEAL_BATCH_DURATION = 2
# Under Python 3.4+ use the 'forkserver' start method by default: this makes it
# possible to avoid crashing 3rd party libraries that manage an internal thread
# pool that does not tolerate forking
if hasattr(mp, 'get_start_method'):
method = os.environ.get('JOBLIB_START_METHOD')
if (method is None and mp.get_start_method() == 'fork'
and 'forkserver' in mp.get_all_start_methods()):
method = 'forkserver'
DEFAULT_MP_CONTEXT = mp.get_context(method=method)
else:
DEFAULT_MP_CONTEXT = None
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice):
self.items = list(iterator_slice)
self._size = len(self.items)
def __call__(self):
return [func(*args, **kwargs) for func, args, kwargs in self.items]
def __len__(self):
return self._size
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count():
""" Return the number of CPUs.
"""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
class WorkerInterrupt(Exception):
""" An exception that is not KeyboardInterrupt to allow subprocesses
to be interrupted.
"""
pass
###############################################################################
class SafeFunction(object):
""" Wraps a function to make it exception with full traceback in
their representation.
Useful for parallel computing with multiprocessing, for which
exceptions cannot be captured.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt()
except:
e_type, e_value, e_tb = sys.exc_info()
text = format_exc(e_type, e_value, e_tb, context=10,
tb_offset=1)
if issubclass(e_type, TransportableException):
raise
else:
raise TransportableException(text, e_type)
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class ImmediateComputeBatch(object):
"""Sequential computation of a batch of tasks.
This replicates the async computation API but actually does not delay
the computations when joblib.Parallel runs in sequential mode.
"""
def __init__(self, batch):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = batch()
def get(self):
return self.results
###############################################################################
class BatchCompletionCallBack(object):
"""Callback used by joblib.Parallel's multiprocessing backend.
This callable is executed by the parent process whenever a worker process
has returned the results of a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
"""
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
self.parallel.n_completed_tasks += self.batch_size
this_batch_duration = time.time() - self.dispatch_timestamp
if (self.parallel.batch_size == 'auto'
and self.batch_size == self.parallel._effective_batch_size):
# Update the smoothed streaming estimate of the duration of a batch
# from dispatch to completion
old_duration = self.parallel._smoothed_batch_duration
if old_duration == 0:
# First record of duration for this batch size after the last
# reset.
new_duration = this_batch_duration
else:
# Update the exponentially weighted average of the duration of
# batch for the current effective size.
new_duration = 0.8 * old_duration + 0.2 * this_batch_duration
self.parallel._smoothed_batch_duration = new_duration
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int, default: 1
The maximum number of concurrently running jobs, such as the number
of Python worker processes when backend="multiprocessing"
or the size of the thread-pool when backend="threading".
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend: str or None, default: 'multiprocessing'
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the multiprocessing workers shoud never starve.
batch_size: int or 'auto', default: 'auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, multiprocessing
can be slower than sequential computation because of the overhead.
Batching fast computations together can mitigate this.
The ``'auto'`` strategy keeps track of the time it takes for a batch
to complete, and dynamically adjusts the batch size to keep the time
on the order of half a second, using a heuristic. The initial batch
size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes int, str, or None, optional, 1M by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages::
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process::
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called a 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages::
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend='multiprocessing', verbose=0,
pre_dispatch='2 * n_jobs', batch_size='auto',
temp_folder=None, max_nbytes='1M', mmap_mode='r'):
self.verbose = verbose
self._mp_context = DEFAULT_MP_CONTEXT
if backend is None:
# `backend=None` was supported in 0.8.2 with this effect
backend = "multiprocessing"
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._mp_context = backend
backend = "multiprocessing"
if backend not in VALID_BACKENDS:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, VALID_BACKENDS))
self.backend = backend
self.n_jobs = n_jobs
if (batch_size == 'auto'
or isinstance(batch_size, Integral) and batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
self.pre_dispatch = pre_dispatch
self._temp_folder = temp_folder
if isinstance(max_nbytes, _basestring):
self._max_nbytes = 1024 * memstr_to_kbytes(max_nbytes)
else:
self._max_nbytes = max_nbytes
self._mmap_mode = mmap_mode
# Not starting the pool in the __init__ is a design decision, to be
# able to close it ASAP, and not burden the user with closing it
# unless they choose to use the context manager API with a with block.
self._pool = None
self._output = None
self._jobs = list()
self._managed_pool = False
# This lock is used coordinate the main thread of this process with
# the async callback thread of our the pool.
self._lock = threading.Lock()
def __enter__(self):
self._managed_pool = True
self._initialize_pool()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._terminate_pool()
self._managed_pool = False
def _effective_n_jobs(self):
n_jobs = self.n_jobs
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
elif mp is None or n_jobs is None:
# multiprocessing is not available or disabled, fallback
# to sequential mode
return 1
elif n_jobs < 0:
n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
return n_jobs
def _initialize_pool(self):
"""Build a process or thread pool and return the number of workers"""
n_jobs = self._effective_n_jobs()
# The list of exceptions that we will capture
self.exceptions = [TransportableException]
if n_jobs == 1:
# Sequential mode: do not use a pool instance to avoid any
# useless dispatching overhead
self._pool = None
elif self.backend == 'threading':
self._pool = ThreadPool(n_jobs)
elif self.backend == 'multiprocessing':
if mp.current_process().daemon:
# Daemonic processes cannot have children
self._pool = None
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' setting n_jobs=1',
stacklevel=3)
return 1
elif threading.current_thread().name != 'MainThread':
# Prevent posix fork inside in non-main posix threads
self._pool = None
warnings.warn(
'Multiprocessing backed parallel loops cannot be nested'
' below threads, setting n_jobs=1',
stacklevel=3)
return 1
else:
already_forked = int(os.environ.get(JOBLIB_SPAWNED_PROCESS, 0))
if already_forked:
raise ImportError('[joblib] Attempting to do parallel computing '
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect your main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information'
)
# Set an environment variable to avoid infinite loops
os.environ[JOBLIB_SPAWNED_PROCESS] = '1'
# Make sure to free as much memory as possible before forking
gc.collect()
poolargs = dict(
max_nbytes=self._max_nbytes,
mmap_mode=self._mmap_mode,
temp_folder=self._temp_folder,
verbose=max(0, self.verbose - 50),
context_id=0, # the pool is used only for one call
)
if self._mp_context is not None:
# Use Python 3.4+ multiprocessing context isolation
poolargs['context'] = self._mp_context
self._pool = MemmapingPool(n_jobs, **poolargs)
# We are using multiprocessing, we also want to capture
# KeyboardInterrupts
self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt])
else:
raise ValueError("Unsupported backend: %s" % self.backend)
return n_jobs
def _terminate_pool(self):
if self._pool is not None:
self._pool.close()
self._pool.terminate() # terminate does a join()
self._pool = None
if self.backend == 'multiprocessing':
os.environ.pop(JOBLIB_SPAWNED_PROCESS, 0)
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
if self._pool is None:
job = ImmediateComputeBatch(batch)
self._jobs.append(job)
self.n_dispatched_batches += 1
self.n_dispatched_tasks += len(batch)
self.n_completed_tasks += len(batch)
if not _verbosity_filter(self.n_dispatched_batches, self.verbose):
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(time.time() - self._start_time)
))
else:
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
job = self._pool.apply_async(SafeFunction(batch), callback=cb)
self._jobs.append(job)
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self.batch_size == 'auto' and self.backend == 'threading':
# Batching is never beneficial with the threading backend
batch_size = 1
elif self.batch_size == 'auto':
old_batch_size = self._effective_batch_size
batch_duration = self._smoothed_batch_duration
if (batch_duration > 0 and
batch_duration < MIN_IDEAL_BATCH_DURATION):
# The current batch size is too small: the duration of the
# processing of a batch of task is not large enough to hide
# the scheduling overhead.
ideal_batch_size = int(
old_batch_size * MIN_IDEAL_BATCH_DURATION / batch_duration)
# Multiply by two to limit oscilations between min and max.
batch_size = max(2 * ideal_batch_size, 1)
self._effective_batch_size = batch_size
if self.verbose >= 10:
self._print("Batch computation too fast (%.4fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
elif (batch_duration > MAX_IDEAL_BATCH_DURATION and
old_batch_size >= 2):
# The current batch size is too big. If we schedule overly long
# running batches some CPUs might wait with nothing left to do
# while a couple of CPUs a left processing a few long running
# batches. Better reduce the batch size a bit to limit the
# likelihood of scheduling such stragglers.
self._effective_batch_size = batch_size = old_batch_size // 2
if self.verbose >= 10:
self._print("Batch computation too slow (%.2fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
else:
# No batch size adjustment
batch_size = old_batch_size
if batch_size != old_batch_size:
# Reset estimation of the smoothed mean batch duration: this
# estimate is updated in the multiprocessing apply_async
# CallBack as long as the batch_size is constant. Therefore
# we need to reset the estimate whenever we re-tune the batch
# size.
self._smoothed_batch_duration = 0
else:
# Fixed batch size strategy
batch_size = self.batch_size
with self._lock:
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
if not tasks:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _print(self, msg, msg_args):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# This is heuristic code to print only 'verbose' times a messages
# The challenge is that we may not know the queue length
if self._original_iterator:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(elapsed_time),
))
else:
index = self.n_dispatched_batches
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1
- self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / (index + 1) *
(self.n_dispatched_tasks - index - 1.))
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index + 1,
total_tasks,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job list can be filling up as
# we empty it and Python list are not thread-safe by default hence
# the use of the lock
with self._lock:
job = self._jobs.pop(0)
try:
self._output.extend(job.get())
except tuple(self.exceptions) as exception:
# Stop dispatching any new job in the async callback thread
self._aborting = True
if isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (this_report, exception.message)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
exception = exception_type(report)
# Kill remaining running processes without waiting for
# the results as we will raise the exception we got back
# to the caller instead of returning any result.
with self._lock:
self._terminate_pool()
if self._managed_pool:
# In case we had to terminate a managed pool, let
# us start a new one to ensure that subsequent calls
# to __call__ on the same Parallel instance will get
# a working pool as they expect.
self._initialize_pool()
raise exception
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
if not self._managed_pool:
n_jobs = self._initialize_pool()
else:
n_jobs = self._effective_n_jobs()
if self.batch_size == 'auto':
self._effective_batch_size = 1
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all' or n_jobs == 1:
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
iterator = itertools.islice(iterator, pre_dispatch)
self._start_time = time.time()
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
self._smoothed_batch_duration = 0.0
try:
self._iterating = True
while self.dispatch_one_batch(iterator):
pass
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output), len(self._output),
short_format_time(elapsed_time)))
finally:
if not self._managed_pool:
self._terminate_pool()
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
| bsd-3-clause |
ndawe/rootpy | docs/sphinxext/ipython_directive.py | 2 | 27193 | # -*- coding: utf-8 -*-
"""Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives).
By default this directive assumes that your prompts are unchanged IPython ones,
but this can be customized. The configurable options that can be placed in
conf.py are
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import cStringIO
import os
import re
import sys
import tempfile
import ast
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
import matplotlib
import sphinx
from docutils.parsers.rst import directives
from docutils import nodes
from docutils.parsers.rst import Directive
matplotlib.use('Agg')
# Our own
from IPython import Config, InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
inputline += '\n' + nextline[Nc:]
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self):
self.cout = cStringIO.StringIO()
# Create config object for IPython
config = Config()
config.Global.display_banner = False
config.Global.exec_lines = ['import numpy as np',
'from pylab import *'
]
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize ipython, but don't start its mainloop
IP = InteractiveShell.instance(config=config, profile_dir=profile)
# io.stdout redirect must be done *after* instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
#print "input='%s'"%self.input
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
source_raw = splitter.source_raw_reset()[1]
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""Process data block for INPUT token."""
decorator, input, rest = data
image_file = None
image_directive = None
#print 'INPUT:', data # dbg
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = decorator=='@doctest' or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i==0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=True)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=True)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
elif is_semicolon: # get spacing right
ret.append('')
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, image_file,
image_directive)
#print 'OUTPUT', output # dbg
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, image_file):
"""Process data block for OUTPUT token."""
if is_doctest:
submitted = data.strip()
found = output
if found is not None:
found = found.strip()
# XXX - fperez: in 0.11, 'output' never comes with the prompt
# in it, just the actual output text. So I think all this code
# can be nuked...
# the above comment does not appear to be accurate... (minrk)
ind = found.find(output_prompt)
if ind<0:
e='output prompt="%s" does not match out line=%s' % \
(output_prompt, found)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
if found!=submitted:
e = ('doctest failure for input_lines="%s" with '
'found_output="%s" and submitted output="%s"' %
(input_lines, found, submitted) )
raise RuntimeError(e)
#print 'doctest PASSED for input_lines="%s" with found_output="%s" and submitted output="%s"'%(input_lines, found, submitted)
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = 'plt.gcf().savefig("%s")'%image_file
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin%lineno
output_prompt = self.promptout%lineno
image_file = None
image_directive = None
for token, data in block:
if token==COMMENT:
out_data = self.process_comment(data)
elif token==INPUT:
(out_data, input_lines, output, is_doctest, image_file,
image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token==OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
image_file)
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
if self._pyplot_imported:
return
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive conent
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with lines checking for multiline
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception: # on a multiline
multiline = True
multiline_start = lineno
else: # still on a multiline
modified = u'%s %s' % (continuation, line)
output.append(modified)
try:
mod = ast.parse(
'\n'.join(content[multiline_start:lineno+1]))
if isinstance(mod.body[0], ast.FunctionDef):
# check to see if we have the whole function
for element in mod.body[0].body:
if isinstance(element, ast.Return):
multiline = False
else:
output.append(u'')
multiline = False
except Exception:
pass
if savefig: # clear figure if plotted
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
return output
class IpythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
}
shell = EmbeddedSphinxShell()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
confdir = self.state.document.settings.env.app.confdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path
if isinstance(savefig_dir, list):
savefig_dir = savefig_dir[0] # safe to assume only one path?
savefig_dir = os.path.join(confdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
return savefig_dir, source_dir, rgxin, rgxout, promptin, promptout
def setup(self):
# reset the execution count if we haven't processed this doc
#NOTE: this may be borked if there are multiple seen_doc tmp files
#check time stamp?
seen_docs = [i for i in os.listdir(tempfile.tempdir)
if i.startswith('seen_doc')]
if seen_docs:
fname = os.path.join(tempfile.tempdir, seen_docs[0])
docs = open(fname).read().split('\n')
if not self.state.document.current_source in docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
else: # haven't processed any docs yet
docs = []
# get config values
(savefig_dir, source_dir, rgxin,
rgxout, promptin, promptout) = self.get_config_options()
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
# write the filename to a tempfile because it's been "seen" now
if not self.state.document.current_source in docs:
fd, fname = tempfile.mkstemp(prefix="seen_doc", text=True)
fout = open(fname, 'a')
fout.write(self.state.document.current_source+'\n')
fout.close()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython','']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
#text = '\n'.join(lines)
#figs = '\n'.join(figures)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
#print lines
if len(lines)>2:
if debug:
print '\n'.join(lines)
else: #NOTE: this raises some errors, what's it for?
#print 'INSERTING %d lines'%len(lines)
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
text = '\n'.join(lines)
txtnode = nodes.literal_block(text, text)
txtnode['language'] = 'ipython'
#imgnode = nodes.image(figs)
# cleanup
self.teardown()
return []#, imgnode]
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IpythonDirective)
app.add_config_value('ipython_savefig_dir', None, True)
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), True)
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), True)
app.add_config_value('ipython_promptin', 'In [%d]:', True)
app.add_config_value('ipython_promptout', 'Out[%d]:', True)
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
ipython_directive('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print 'All OK? Check figures in _static/'
| bsd-3-clause |
yydxlv/data-science-from-scratch | code/neural_networks.py | 54 | 6622 | from __future__ import division
from collections import Counter
from functools import partial
from linear_algebra import dot
import math, random
import matplotlib
import matplotlib.pyplot as plt
def step_function(x):
return 1 if x >= 0 else 0
def perceptron_output(weights, bias, x):
"""returns 1 if the perceptron 'fires', 0 if not"""
return step_function(dot(weights, x) + bias)
def sigmoid(t):
return 1 / (1 + math.exp(-t))
def neuron_output(weights, inputs):
return sigmoid(dot(weights, inputs))
def feed_forward(neural_network, input_vector):
"""takes in a neural network (represented as a list of lists of lists of weights)
and returns the output from forward-propagating the input"""
outputs = []
for layer in neural_network:
input_with_bias = input_vector + [1] # add a bias input
output = [neuron_output(neuron, input_with_bias) # compute the output
for neuron in layer] # for this layer
outputs.append(output) # and remember it
# the input to the next layer is the output of this one
input_vector = output
return outputs
def backpropagate(network, input_vector, target):
hidden_outputs, outputs = feed_forward(network, input_vector)
# the output * (1 - output) is from the derivative of sigmoid
output_deltas = [output * (1 - output) * (output - target[i])
for i, output in enumerate(outputs)]
# adjust weights for output layer (network[-1])
for i, output_neuron in enumerate(network[-1]):
for j, hidden_output in enumerate(hidden_outputs + [1]):
output_neuron[j] -= output_deltas[i] * hidden_output
# back-propagate errors to hidden layer
hidden_deltas = [hidden_output * (1 - hidden_output) *
dot(output_deltas, [n[i] for n in network[-1]])
for i, hidden_output in enumerate(hidden_outputs)]
# adjust weights for hidden layer (network[0])
for i, hidden_neuron in enumerate(network[0]):
for j, input in enumerate(input_vector + [1]):
hidden_neuron[j] -= hidden_deltas[i] * input
def patch(x, y, hatch, color):
"""return a matplotlib 'patch' object with the specified
location, crosshatch pattern, and color"""
return matplotlib.patches.Rectangle((x - 0.5, y - 0.5), 1, 1,
hatch=hatch, fill=False, color=color)
def show_weights(neuron_idx):
weights = network[0][neuron_idx]
abs_weights = map(abs, weights)
grid = [abs_weights[row:(row+5)] # turn the weights into a 5x5 grid
for row in range(0,25,5)] # [weights[0:5], ..., weights[20:25]]
ax = plt.gca() # to use hatching, we'll need the axis
ax.imshow(grid, # here same as plt.imshow
cmap=matplotlib.cm.binary, # use white-black color scale
interpolation='none') # plot blocks as blocks
# cross-hatch the negative weights
for i in range(5): # row
for j in range(5): # column
if weights[5*i + j] < 0: # row i, column j = weights[5*i + j]
# add black and white hatches, so visible whether dark or light
ax.add_patch(patch(j, i, '/', "white"))
ax.add_patch(patch(j, i, '\\', "black"))
plt.show()
if __name__ == "__main__":
raw_digits = [
"""11111
1...1
1...1
1...1
11111""",
"""..1..
..1..
..1..
..1..
..1..""",
"""11111
....1
11111
1....
11111""",
"""11111
....1
11111
....1
11111""",
"""1...1
1...1
11111
....1
....1""",
"""11111
1....
11111
....1
11111""",
"""11111
1....
11111
1...1
11111""",
"""11111
....1
....1
....1
....1""",
"""11111
1...1
11111
1...1
11111""",
"""11111
1...1
11111
....1
11111"""]
def make_digit(raw_digit):
return [1 if c == '1' else 0
for row in raw_digit.split("\n")
for c in row.strip()]
inputs = map(make_digit, raw_digits)
targets = [[1 if i == j else 0 for i in range(10)]
for j in range(10)]
random.seed(0) # to get repeatable results
input_size = 25 # each input is a vector of length 25
num_hidden = 5 # we'll have 5 neurons in the hidden layer
output_size = 10 # we need 10 outputs for each input
# each hidden neuron has one weight per input, plus a bias weight
hidden_layer = [[random.random() for __ in range(input_size + 1)]
for __ in range(num_hidden)]
# each output neuron has one weight per hidden neuron, plus a bias weight
output_layer = [[random.random() for __ in range(num_hidden + 1)]
for __ in range(output_size)]
# the network starts out with random weights
network = [hidden_layer, output_layer]
# 10,000 iterations seems enough to converge
for __ in range(10000):
for input_vector, target_vector in zip(inputs, targets):
backpropagate(network, input_vector, target_vector)
def predict(input):
return feed_forward(network, input)[-1]
for i, input in enumerate(inputs):
outputs = predict(input)
print i, [round(p,2) for p in outputs]
print """.@@@.
...@@
..@@.
...@@
.@@@."""
print [round(x, 2) for x in
predict( [0,1,1,1,0, # .@@@.
0,0,0,1,1, # ...@@
0,0,1,1,0, # ..@@.
0,0,0,1,1, # ...@@
0,1,1,1,0]) # .@@@.
]
print
print """.@@@.
@..@@
.@@@.
@..@@
.@@@."""
print [round(x, 2) for x in
predict( [0,1,1,1,0, # .@@@.
1,0,0,1,1, # @..@@
0,1,1,1,0, # .@@@.
1,0,0,1,1, # @..@@
0,1,1,1,0]) # .@@@.
]
print
| unlicense |
devanshdalal/scikit-learn | examples/svm/plot_separating_hyperplane.py | 294 | 1273 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
jcesardasilva/agpy | doc/conf.py | 6 | 9638 | # -*- coding: utf-8 -*-
#
# agpy documentation build configuration file, created by
# sphinx-quickstart on Wed Dec 21 22:31:14 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
from astropy.sphinx.conf import *
del html_style # I don't want theirs because I don't have it
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sphinx.ext.inheritance_diagram',
'numpydoc',
'sphinx.ext.ifconfig',
'sphinx.ext.intersphinx']
# 'sphinx.ext.intersphinx',
# 'sphinx.ext.doctest',
numpydoc_show_class_members = False
try:
import matplotlib.sphinxext.plot_directive
extensions += [matplotlib.sphinxext.plot_directive.__name__]
except ImportError:
warnings.warn(
"matplotlib's plot_directive could not be imported. " +
"Inline plots will not be included in the output")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'agpy'
copyright = u'2011, Adam Ginsburg'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from agpy import __version__ as version
# The short X.Y version.
version = version
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = 'obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'agogo'
html_style = 'extra.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
html_theme_options = dict(
pagewidth = '1000px',
documentwidth = '760px',
sidebarwidth = '200px',
headerbg="#666666",
headercolor1="#000000",
headercolor2="#000000",
headerlinkcolor="#FF9522",
linkcolor="#4a8f43",
textalign='left',
)
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static','_static/extra.css','_static/scipy.css','_static/astropy.css']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'agpydoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'agpy.tex', u'agpy Documentation',
u'Adam Ginsburg', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'agpy', u'agpy Documentation',
[u'Adam Ginsburg'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'agpy'
epub_author = u'Adam Ginsburg'
epub_publisher = u'Adam Ginsburg'
epub_copyright = u'2011, Adam Ginsburg'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
# imported from astropy
#intersphinx_mapping = {'python':('http://docs.python.org/', None),
# 'numpy':('http://docs.scipy.org/doc/','http://docs.scipy.org/doc/numpy/objects.inv'),
# 'np':('http://docs.scipy.org/doc/','http://docs.scipy.org/doc/numpy/objects.inv'),
# }
| mit |
mbakker7/ttim | ttim/circareasink.py | 1 | 7000 | import numpy as np
import matplotlib.pyplot as plt
from scipy.special import kv, iv
import inspect # Used for storing the input
from .element import Element
class CircAreaSink(Element):
"""
Create a circular area-sink with uniform infiltration rate in aquifer
layer 0.
Infiltration rate in length / time, positive for water entering
the aquifer.
Parameters
----------
model : Model object
model to which the element is added
xc : float
x-coordinate of center of area-sink
yc : float
y-coordinate of center of area-sink
R : radius of area-sink
tsandN : list of tuples
tuples of starting time and infiltration rate after starting time
label : string or None (default: None)
label of the area-sink
"""
def __init__(self, model, xc=0, yc=0, R=0.1, tsandN=[(0, 1)],
name='CircAreaSink', label=None):
self.storeinput(inspect.currentframe())
Element.__init__(self, model, nparam=1, nunknowns=0, layers=0,
tsandbc=tsandN, type='g', name=name, label=label)
self.xc = float(xc); self.yc = float(yc); self.R = float(R)
self.model.addelement(self)
def __repr__(self):
return self.name + ' at ' + str((self.xc, self.yc))
def initialize(self):
self.aq = self.model.aq.find_aquifer_data(self.xc, self.yc)
self.setbc()
self.setflowcoef()
# Since recharge is in layer 0, and RHS is -N
self.an = self.aq.coef[0, :] * self.flowcoef
self.an.shape = (self.aq.naq, self.model.nint, self.model.npint)
self.termin = self.aq.lab2 * self.R * self.an
self.termin2 = self.aq.lab2 ** 2 * self.an
self.terminq = self.R * self.an
self.termout = self.aq.lab2 * self.R * self.an
self.i1R = iv(1, self.R / self.aq.lab2)
self.k1R = kv(1, self.R / self.aq.lab2)
self.termoutq= self.R * self.an
self.dischargeinf = self.aq.coef[0, :] * self.flowcoef
self.dischargeinflayers = np.sum(self.dischargeinf *
self.aq.eigvec[self.layers, :, :], 1)
def setflowcoef(self):
'''Separate function so that this can be overloaded for other types'''
self.flowcoef = 1.0 / self.model.p # Step function
def potinf(self, x, y, aq=None):
'''Can be called with only one x,y value'''
if aq is None:
aq = self.model.aq.find_aquifer_data(x, y)
rv = np.zeros((self.nparam, aq.naq, self.model.nint,
self.model.npint), 'D')
if aq == self.aq:
r = np.sqrt((x - self.xc) ** 2 + (y - self.yc) ** 2)
pot = np.zeros(self.model.npint, 'D')
if r < self.R:
for i in range(self.aq.naq):
for j in range(self.model.nint):
#if r / abs(self.aq.lab2[i,j,0]) < self.rzero:
rv[0, i, j] = -self.termin[i, j] * \
self.K1RI0r(r, i, j) + self.termin2[i, j]
else:
for i in range(self.aq.naq):
for j in range(self.model.nint):
if (r - self.R) / \
abs(self.aq.lab2[i, j, 0]) < self.rzero:
rv[0, i, j, :] = self.termout[i, j, :] * \
self.I1RK0r(r, i, j)
rv.shape = (self.nparam, aq.naq, self.model.npval)
return rv
def disvecinf(self,x,y,aq=None):
'''Can be called with only one x,y value'''
if aq is None:
aq = self.model.aq.find_aquifer_data(x, y)
qx = np.zeros((self.nparam, aq.naq, self.model.npval), 'D')
qy = np.zeros((self.nparam, aq.naq, self.model.npval), 'D')
if aq == self.aq:
qr = np.zeros((self.nparam, aq.naq, self.model.nint,
self.model.npint), 'D')
r = np.sqrt((x - self.xc) ** 2 + (y - self.yc) ** 2)
if r < self.R:
for i in range(self.aq.naq):
for j in range(self.model.nint):
#if r / abs(self.aq.lab2[i,j,0]) < self.rzero:
qr[0, i, j] = self.terminq[i, j] * self.K1RI1r(r, i, j)
else:
for i in range(self.aq.naq):
for j in range(self.model.nint):
if (r - self.R) / \
abs(self.aq.lab2[i, j, 0]) < self.rzero:
qr[0, i, j] = self.termoutq[i, j, :] * \
self.I1RK1r(r, i, j)
qr.shape = (self.nparam, aq.naq, self.model.npval)
qx[:] = qr * (x - self.xc) / r
qy[:] = qr * (y - self.yc) / r
return qx, qy
def plot(self):
plt.plot(self.xc + self.R * np.cos(np.linspace(0, 2 * np.pi, 100)), \
self.yc + self.R * np.sin(np.linspace(0, 2 * np.pi, 100)), 'k')
def K1RI0r(self, rin, iaq, ipint):
r = rin / self.aq.lab2[iaq, ipint]
R = self.R / self.aq.lab2[iaq, ipint]
if np.isinf(self.i1R[iaq, ipint]).any():
rv = np.sqrt(1 / (4 * r * R)) * np.exp(r - R) * \
(1 + 3 / (8 * R) - 15 / (128 * R ** 2) + 315 / (3072 * R ** 3)) * \
(1 + 1 / (8 * r) + 9 / (128 * r ** 2) + 225 / (3072 * r ** 3))
else:
rv = self.k1R[iaq, ipint] * iv(0, r)
return rv
def I1RK0r(self, rin, iaq, ipint):
r = rin / self.aq.lab2[iaq, ipint]
R = self.R / self.aq.lab2[iaq, ipint]
if np.isinf(self.i1R[iaq, ipint]).any():
rv = np.sqrt(1 / (4 * r * R)) * np.exp(R - r) * \
(1 - 3 / (8 * R) - 15 / (128 * R ** 2) - 315 / (3072 * R ** 3)) * \
(1 - 1 / (8 * r) + 9 / (128 * r ** 2) - 225 / (3072 * r ** 3))
else:
rv = self.i1R[iaq, ipint] * kv(0, r)
return rv
def K1RI1r(self, rin, iaq, ipint):
r = rin / self.aq.lab2[iaq, ipint]
R = self.R / self.aq.lab2[iaq, ipint]
if np.isinf(self.i1R[iaq, ipint]).any():
rv = np.sqrt(1 / (4 * r * R)) * np.exp(r - R) * \
(1 + 3 / (8 * R) - 15 / (128 * R ** 2) + 315 / (3072 * R ** 3)) * \
(1 - 3 / (8 * r) - 15 / (128 * r ** 2) - 315 / (3072 * r ** 3))
else:
rv = self.k1R[iaq, ipint] * iv(1, r)
return rv
def I1RK1r(self, rin, iaq, ipint):
r = rin / self.aq.lab2[iaq, ipint]
R = self.R / self.aq.lab2[iaq, ipint]
if np.isinf(self.i1R[iaq, ipint]).any():
rv = np.sqrt(1 / (4 * r * R)) * np.exp(R - r) * \
(1 - 3 / (8 * R) - 15 / (128 * R ** 2) - 315 / (3072 * R ** 3)) * \
(1 + 3 / (8 * r) - 15 / (128 * r ** 2) + 315 / (3072 * r ** 3))
else:
rv = self.i1R[iaq, ipint] * kv(1, r)
return rv | mit |
marcocaccin/scikit-learn | sklearn/cluster/mean_shift_.py | 6 | 15512 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <conradlee@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Martino Sorbaro <martino.sorbaro@ed.ac.uk>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile))
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
# separate function for each seed's iterative loop
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
# For each seed, climb gradient until convergence or max_iter
bandwidth = nbrs.get_params()['radius']
stop_thresh = 1e-3 * bandwidth # when mean has converged
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
return tuple(my_mean), len(points_within)
completed_iterations += 1
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
max_iterations=None, n_jobs=1):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
.. versionadded:: 0.17
Parallel Execution using *n_jobs*.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_meanshift.py for an example.
"""
# FIXME To be removed in 0.18
if max_iterations is not None:
warnings.warn("The `max_iterations` parameter has been renamed to "
"`max_iter` from version 0.16. The `max_iterations` "
"parameter will be removed in 0.18", DeprecationWarning)
max_iter = max_iterations
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None,\
got %f" % bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=n_jobs)(
delayed(_mean_shift_single_seed)
(seed, X, nbrs, max_iter) for seed in seeds)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i] is not None:
center_intensity_dict[all_res[i][0]] = all_res[i][1]
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy \
or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f,"
" using data points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, n_jobs=1):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all, n_jobs=self.n_jobs)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
UDST/pandana | tests/test_pandash5.py | 1 | 4514 | import os
import tempfile
import pandas as pd
import pytest
import pandas.util.testing as pdt
from pandana import Network
from pandana.testing import skipifci
from pandana.loaders import pandash5 as ph5
@pytest.fixture(scope='module')
def nodes():
return pd.DataFrame(
{'x': [1, 2, 3, 4] * 3,
'y': [1] * 4 + [2] * 4 + [3] * 4})
@pytest.fixture(scope='module')
def edges():
return pd.DataFrame(
{'from': [0, 4, 5, 6, 2, 2, 6, 10, 9, 7],
'to': [4, 5, 6, 2, 1, 3, 10, 9, 8, 11]})
@pytest.fixture(scope='module')
def impedance_names():
return ['distance', 'time']
@pytest.fixture(scope='module')
def edge_weights(edges, impedance_names):
return pd.DataFrame(
{impedance_names[0]: [1] * len(edges),
impedance_names[1]: list(range(1, len(edges) + 1))})
@pytest.fixture(scope='module')
def two_way():
return True
@pytest.fixture(scope='module')
def network(nodes, edges, edge_weights, two_way):
return Network(
nodes['x'], nodes['y'], edges['from'], edges['to'], edge_weights,
two_way)
@pytest.fixture(scope='module')
def edges_df(edges, edge_weights):
return edges.join(edge_weights)
@pytest.fixture(scope='module')
def rm_nodes():
return [0, 7, 6]
@pytest.fixture
def tmpfile(request):
fname = tempfile.NamedTemporaryFile().name
def cleanup():
if os.path.exists(fname):
os.remove(fname)
request.addfinalizer(cleanup)
return fname
@skipifci
def test_remove_nodes(network, rm_nodes):
# node 0 is connected to node 4, which is in turn connected to node 5
# node 7 is connected to node 11, which has no other connections
# node 6 is connected to nodes 2, 5, and 10,
# which all have other connections
nodes, edges = ph5.remove_nodes(network, rm_nodes)
exp_nodes = pd.DataFrame(
{'x': [2, 3, 4, 1, 2, 1, 2, 3, 4],
'y': [1, 1, 1, 2, 2, 3, 3, 3, 3]},
index=[1, 2, 3, 4, 5, 8, 9, 10, 11])
exp_edges = pd.DataFrame(
{'from': [4, 2, 2, 10, 9],
'to': [5, 1, 3, 9, 8],
'distance': [1, 1, 1, 1, 1],
'time': [2, 5, 6, 8, 9]},
index=[1, 4, 5, 7, 8])
exp_edges = exp_edges[['from', 'to', 'distance', 'time']] # order columns
pdt.assert_frame_equal(nodes, exp_nodes)
pdt.assert_frame_equal(edges, exp_edges)
@skipifci
def test_network_to_pandas_hdf5(
tmpfile, network, nodes, edges_df, impedance_names, two_way):
ph5.network_to_pandas_hdf5(network, tmpfile)
store = pd.HDFStore(tmpfile)
pdt.assert_frame_equal(store['nodes'], nodes)
pdt.assert_frame_equal(store['edges'], edges_df)
pdt.assert_series_equal(store['two_way'], pd.Series([two_way]))
pdt.assert_series_equal(
store['impedance_names'], pd.Series(impedance_names))
@skipifci
def test_network_to_pandas_hdf5_removal(
tmpfile, network, impedance_names, two_way, rm_nodes):
nodes, edges = ph5.remove_nodes(network, rm_nodes)
ph5.network_to_pandas_hdf5(network, tmpfile, rm_nodes)
store = pd.HDFStore(tmpfile)
pdt.assert_frame_equal(store['nodes'], nodes)
pdt.assert_frame_equal(store['edges'], edges)
pdt.assert_series_equal(store['two_way'], pd.Series([two_way]))
pdt.assert_series_equal(
store['impedance_names'], pd.Series(impedance_names))
@skipifci
def test_network_from_pandas_hdf5(
tmpfile, network, nodes, edges_df, impedance_names, two_way):
ph5.network_to_pandas_hdf5(network, tmpfile)
new_net = ph5.network_from_pandas_hdf5(Network, tmpfile)
pdt.assert_frame_equal(new_net.nodes_df, nodes)
pdt.assert_frame_equal(new_net.edges_df, edges_df)
assert new_net._twoway == two_way
assert new_net.impedance_names == impedance_names
@skipifci
def test_network_save_load_hdf5(
tmpfile, network, impedance_names, two_way, rm_nodes):
network.save_hdf5(tmpfile, rm_nodes)
new_net = Network.from_hdf5(tmpfile)
nodes, edges = ph5.remove_nodes(network, rm_nodes)
pdt.assert_frame_equal(new_net.nodes_df, nodes)
pdt.assert_frame_equal(new_net.edges_df, edges)
assert new_net._twoway == two_way
assert new_net.impedance_names == impedance_names
# this is an odd place for this test because it's not related to HDF5,
# but my test Network is perfect.
@skipifci
def test_network_low_connectivity_nodes(network, impedance_names):
nodes = network.low_connectivity_nodes(10, 3, imp_name=impedance_names[0])
assert list(nodes) == [7, 11]
| agpl-3.0 |
jjx02230808/project0223 | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
btabibian/scikit-learn | examples/semi_supervised/plot_label_propagation_versus_svm_iris.py | 38 | 2381 | """
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, edgecolors='black')
plt.title(titles[i])
plt.suptitle("Unlabeled points are colored white", y=0.1)
plt.show()
| bsd-3-clause |
google-research/google-research | talk_about_random_splits/probing/create_test_set_from_gutenberg_word_content_main.py | 1 | 6287 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Creates test sets for the SentEval word_content task from Gutenberg data.
In the word_content task, all sentences contain exactly 1 out of a set of
mid-frequency words. This script creates new test sets conforming to the same
setup.
Please note that the number of target words in the new test set might be smaller
than the original 1000, because we might not have enough sentences for all of
the target words. This is not a problem, because it resembles exactly possible
test time scenarios.
The script assumes that the Gutenberg data was preprocessed with
fl_preprocess_gutenberg_main.
Train and dev sets are kept as is, meaning they are the original SentEval
train/dev data. This way the existing architecture can be trained as usual but
tested on new Gutenberg data.
This script doesn't do any case normalization, neither on the target words nor
on sentence duplicate detection.
This script generates the following directory structure under the
`base_out_dir`. This follows the original SentEval directory structure and thus
allows running the original scripts with no change.
base_out_dir/
probing/TASK_NAME.txt # Data for this task.
probing/TASK_NAME.txt-settings.json # Some information on the generated data.
TASK_NAME.txt contains all examples with their respective set labels attached.
This file follows the original SentEval data format.
Example call:
python -m \
talk_about_random_splits.probing.\
create_test_set_from_gutenberg_word_content_main \
--senteval_path="/tmp/senteval/task_data/probing" \
--base_out_dir="YOUR_PATH_HERE" \
--gutenberg_path="YOUR_PREPROCESSED_DATA_PATH_HERE" --alsologtostderr \
"""
import csv
import json
import os
import random
from absl import app
from absl import flags
from absl import logging
import pandas as pd
from talk_about_random_splits.probing import probing_utils
FLAGS = flags.FLAGS
flags.DEFINE_string(
'gutenberg_path', None,
'Path to the preprocessed Gutenberg data. Data must be preprocessed with '
'fl_preprocess_gutenberg_main.')
flags.DEFINE_string('senteval_path', None,
'Path to the original SentEval data in tsv format.')
flags.DEFINE_string(
'base_out_dir', None,
'Base working dir in which to create subdirs for this script\'s results.')
flags.DEFINE_string(
'split_name', 'gutenberg',
'Determines the base name of result sub-directories in `base_out_dir`.')
flags.DEFINE_integer('num_trials', 5, 'Number of trials to generate data for.')
flags.DEFINE_integer('test_set_size', 10000,
'Number of requested examples in test sets.')
_TASK_NAME = 'word_content.txt'
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
word_content_data = probing_utils.read_senteval_data(FLAGS.senteval_path,
_TASK_NAME)
target_words = set(word_content_data['target'])
senteval_sentences = set(word_content_data['text'])
target_word_to_sentences = probing_utils.get_target_word_to_sentence_mapping(
target_words, senteval_sentences,
probing_utils.get_strings_from_sharded_recordio(FLAGS.gutenberg_path))
class_count = len(target_words)
items_per_class = FLAGS.test_set_size // class_count
target_word_counts = {k: len(v) for k, v in target_word_to_sentences.items()}
valid_target_words = {
k for k, v in target_word_counts.items() if v >= items_per_class
}
logging.info('Number of target words for which we have enough data: %d.',
len(valid_target_words))
assert valid_target_words
train_set = word_content_data.loc[word_content_data['set'] == 'tr', :]
dev_set = word_content_data.loc[word_content_data['set'] == 'va', :]
experiment_base_dir = os.path.join(FLAGS.base_out_dir,
f'{FLAGS.split_name}-{{}}')
for trial_id in range(FLAGS.num_trials):
split_dir = experiment_base_dir.format(trial_id)
probing_dir = os.path.join(split_dir, 'probing')
settings_path = os.path.join(probing_dir, f'{_TASK_NAME}-settings.json')
data_out_path = os.path.join(probing_dir, _TASK_NAME)
logging.info('Starting run: %d.', trial_id)
data_sample = []
for valid_target_word in valid_target_words:
sentences = target_word_to_sentences[valid_target_word]
current_sample = random.sample(sentences, items_per_class)
data_sample.extend(
('te', valid_target_word, sample) for sample in current_sample)
test_set = pd.DataFrame(data_sample, columns=train_set.columns)
new_data = pd.concat([train_set, dev_set, test_set], ignore_index=True)
logging.info('Writing output to file: %s.', data_out_path)
os.make_dirs(probing_dir)
with open(settings_path, 'w') as settings_file:
settings = {
'task_name': _TASK_NAME,
'trial_id': trial_id,
'train_size': len(train_set),
'dev_size': len(dev_set),
'test_size': len(test_set),
'valid_target_words_size': len(valid_target_words),
'valid_target_words': sorted(valid_target_words),
}
logging.info('Settings:\n%r', settings)
json.dump(settings, settings_file, indent=2)
with open(data_out_path, 'w') as data_file:
# Don't add quoting to retain the original format unaltered.
new_data[['set', 'target', 'text']].to_csv(
data_file,
sep='\t',
header=False,
index=False,
quoting=csv.QUOTE_NONE,
doublequote=False)
if __name__ == '__main__':
flags.mark_flags_as_required(
['gutenberg_path', 'senteval_path', 'base_out_dir'])
app.run(main)
| apache-2.0 |
saiwing-yeung/scikit-learn | sklearn/linear_model/tests/test_passive_aggressive.py | 169 | 8809 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_class_weights():
# Test class weights.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100, class_weight=None,
random_state=100)
clf.fit(X2, y2)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100,
class_weight={1: 0.001},
random_state=100)
clf.fit(X2, y2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_partial_fit_weight_class_balanced():
# partial_fit with class_weight='balanced' not supported
clf = PassiveAggressiveClassifier(class_weight="balanced")
assert_raises(ValueError, clf.partial_fit, X, y, classes=np.unique(y))
def test_equal_class_weight():
X2 = [[1, 0], [1, 0], [0, 1], [0, 1]]
y2 = [0, 0, 1, 1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=1000, class_weight=None)
clf.fit(X2, y2)
# Already balanced, so "balanced" weights should have no effect
clf_balanced = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight="balanced")
clf_balanced.fit(X2, y2)
clf_weighted = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X2, y2)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
assert_almost_equal(clf.coef_, clf_balanced.coef_, decimal=2)
def test_wrong_class_weight_label():
# ValueError due to wrong class_weight label.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight={0: 0.5})
assert_raises(ValueError, clf.fit, X2, y2)
def test_wrong_class_weight_format():
# ValueError due to wrong class_weight argument type.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight=[0.5])
assert_raises(ValueError, clf.fit, X2, y2)
clf = PassiveAggressiveClassifier(class_weight="the larch")
assert_raises(ValueError, clf.fit, X2, y2)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| bsd-3-clause |
ssaeger/scikit-learn | sklearn/linear_model/tests/test_least_angle.py | 42 | 20925 | from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.model_selection import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.exceptions import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-4, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
@ignore_warnings
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = assert_warns(DeprecationWarning, estimator.decision_function, X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually guaranteed in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
for method in ['lar', 'lasso']:
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=False)
assert_true(coefs.min() < 0)
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=True)
assert_true(coefs.min() >= 0)
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'Lars': {'n_nonzero_coefs': 5},
'LassoLars': {'alpha': 0.1},
'LarsCV': {},
'LassoLarsCV': {},
'LassoLarsIC': {}}
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(estimator.coef_.min() < 0)
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(min(estimator.coef_) >= 0)
def test_lasso_lars_vs_lasso_cd_positive(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,
normalize=False, positive=True).fit(X, y)
clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,
normalize=False, positive=True).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
| bsd-3-clause |
allantu/trading-with-python | sandbox/spreadCalculations.py | 78 | 1496 | '''
Created on 28 okt 2011
@author: jev
'''
from tradingWithPython import estimateBeta, Spread, returns, Portfolio, readBiggerScreener
from tradingWithPython.lib import yahooFinance
from pandas import DataFrame, Series
import numpy as np
import matplotlib.pyplot as plt
import os
symbols = ['SPY','IWM']
y = yahooFinance.HistData('temp.csv')
y.startDate = (2007,1,1)
df = y.loadSymbols(symbols,forceDownload=False)
#df = y.downloadData(symbols)
res = readBiggerScreener('CointPairs.csv')
#---check with spread scanner
#sp = DataFrame(index=symbols)
#
#sp['last'] = df.ix[-1,:]
#sp['targetCapital'] = Series({'SPY':100,'IWM':-100})
#sp['targetShares'] = sp['targetCapital']/sp['last']
#print sp
#The dollar-neutral ratio is about 1 * IWM - 1.7 * IWM. You will get the spread = zero (or probably very near zero)
#s = Spread(symbols, histClose = df)
#print s
#s.value.plot()
#print 'beta (returns)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='returns')
#print 'beta (log)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='log')
#print 'beta (standard)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='standard')
#p = Portfolio(df)
#p.setShares([1, -1.7])
#p.value.plot()
quote = yahooFinance.getQuote(symbols)
print quote
s = Spread(symbols,histClose=df, estimateBeta = False)
s.setLast(quote['last'])
s.setShares(Series({'SPY':1,'IWM':-1.7}))
print s
#s.value.plot()
#s.plot()
fig = figure(2)
s.plot()
| bsd-3-clause |
Akshay0724/scikit-learn | examples/plot_kernel_ridge_regression.py | 26 | 6289 | """
=============================================
Comparison of kernel ridge regression and SVR
=============================================
Both kernel ridge regression (KRR) and SVR learn a non-linear function by
employing the kernel trick, i.e., they learn a linear function in the space
induced by the respective kernel which corresponds to a non-linear function in
the original space. They differ in the loss functions (ridge versus
epsilon-insensitive loss). In contrast to SVR, fitting a KRR can be done in
closed-form and is typically faster for medium-sized datasets. On the other
hand, the learned model is non-sparse and thus slower than SVR at
prediction-time.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise added to every fifth
datapoint. The first figure compares the learned model of KRR and SVR when both
complexity/regularization and bandwidth of the RBF kernel are optimized using
grid-search. The learned functions are very similar; however, fitting KRR is
approx. seven times faster than fitting SVR (both with grid-search). However,
prediction of 100000 target values is more than tree times faster with SVR
since it has learned a sparse model using only approx. 1/3 of the 100 training
datapoints as support vectors.
The next figure compares the time for fitting and prediction of KRR and SVR for
different sizes of the training set. Fitting KRR is faster than SVR for medium-
sized training sets (less than 1000 samples); however, for larger training sets
SVR scales better. With regard to prediction time, SVR is faster than
KRR for all sizes of the training set because of the learned sparse
solution. Note that the degree of sparsity and thus the prediction time depends
on the parameters epsilon and C of the SVR.
"""
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
from __future__ import division
import time
import numpy as np
from sklearn.svm import SVR
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import learning_curve
from sklearn.kernel_ridge import KernelRidge
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
#############################################################################
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - rng.rand(X.shape[0] // 5))
X_plot = np.linspace(0, 5, 100000)[:, None]
#############################################################################
# Fit regression model
train_size = 100
svr = GridSearchCV(SVR(kernel='rbf', gamma=0.1), cv=5,
param_grid={"C": [1e0, 1e1, 1e2, 1e3],
"gamma": np.logspace(-2, 2, 5)})
kr = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=5,
param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3],
"gamma": np.logspace(-2, 2, 5)})
t0 = time.time()
svr.fit(X[:train_size], y[:train_size])
svr_fit = time.time() - t0
print("SVR complexity and bandwidth selected and model fitted in %.3f s"
% svr_fit)
t0 = time.time()
kr.fit(X[:train_size], y[:train_size])
kr_fit = time.time() - t0
print("KRR complexity and bandwidth selected and model fitted in %.3f s"
% kr_fit)
sv_ratio = svr.best_estimator_.support_.shape[0] / train_size
print("Support vector ratio: %.3f" % sv_ratio)
t0 = time.time()
y_svr = svr.predict(X_plot)
svr_predict = time.time() - t0
print("SVR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], svr_predict))
t0 = time.time()
y_kr = kr.predict(X_plot)
kr_predict = time.time() - t0
print("KRR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], kr_predict))
#############################################################################
# look at the results
sv_ind = svr.best_estimator_.support_
plt.scatter(X[sv_ind], y[sv_ind], c='r', s=50, label='SVR support vectors',
zorder=2)
plt.scatter(X[:100], y[:100], c='k', label='data', zorder=1)
plt.hold('on')
plt.plot(X_plot, y_svr, c='r',
label='SVR (fit: %.3fs, predict: %.3fs)' % (svr_fit, svr_predict))
plt.plot(X_plot, y_kr, c='g',
label='KRR (fit: %.3fs, predict: %.3fs)' % (kr_fit, kr_predict))
plt.xlabel('data')
plt.ylabel('target')
plt.title('SVR versus Kernel Ridge')
plt.legend()
# Visualize training and prediction time
plt.figure()
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(X.shape[0] // 5))
sizes = np.logspace(1, 4, 7, dtype=np.int)
for name, estimator in {"KRR": KernelRidge(kernel='rbf', alpha=0.1,
gamma=10),
"SVR": SVR(kernel='rbf', C=1e1, gamma=10)}.items():
train_time = []
test_time = []
for train_test_size in sizes:
t0 = time.time()
estimator.fit(X[:train_test_size], y[:train_test_size])
train_time.append(time.time() - t0)
t0 = time.time()
estimator.predict(X_plot[:1000])
test_time.append(time.time() - t0)
plt.plot(sizes, train_time, 'o-', color="r" if name == "SVR" else "g",
label="%s (train)" % name)
plt.plot(sizes, test_time, 'o--', color="r" if name == "SVR" else "g",
label="%s (test)" % name)
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Train size")
plt.ylabel("Time (seconds)")
plt.title('Execution Time')
plt.legend(loc="best")
# Visualize learning curves
plt.figure()
svr = SVR(kernel='rbf', C=1e1, gamma=0.1)
kr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)
train_sizes, train_scores_svr, test_scores_svr = \
learning_curve(svr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="neg_mean_squared_error", cv=10)
train_sizes_abs, train_scores_kr, test_scores_kr = \
learning_curve(kr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="neg_mean_squared_error", cv=10)
plt.plot(train_sizes, -test_scores_svr.mean(1), 'o-', color="r",
label="SVR")
plt.plot(train_sizes, -test_scores_kr.mean(1), 'o-', color="g",
label="KRR")
plt.xlabel("Train size")
plt.ylabel("Mean Squared Error")
plt.title('Learning curves')
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
jim-pansn/graph-tool | doc/conf.py | 3 | 8055 | # -*- coding: utf-8 -*-
#
# graph-tool documentation build configuration file, created by
# sphinx-quickstart on Sun Oct 26 18:29:16 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed
# automatically).
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.abspath('.'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.intersphinx', 'mathjax', 'sphinx.ext.autosummary',
'numpydoc',
'sphinx.ext.extlinks',
'sphinx.ext.viewcode'
#'sphinx.ext.linkcode'
#'matplotlib.sphinxext.plot_directive'
]
mathjax_path = "MathJax/MathJax.js?config=default"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'graph-tool'
copyright = u'2015, Tiago de Paula Peixoto <tiago@skewed.de>'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from graph_tool import __version__ as gt_version
version = gt_version.split()[0]
# The full version, including alpha/beta/rc tags.
release = gt_version.split()[0]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['.build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# doctest
doctest_global_setup = open("pyenv.py").read()
# Options for HTML outputs
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
# html_style = 'default.css'
html_theme = "gt_theme"
html_theme_path = ["."]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "graph-icon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
html_use_opensearch = 'http://graph-tool.skewed.de/doc/'
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'graph-tooldoc'
# Options for LaTeX output
# ------------------------
# Grouping the document tree into LaTeX files. List of tuples (source start
# file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'graph-tool.tex', ur'graph-tool documentation',
ur'Tiago de Paula Peixoto', 'manual'),
]
latex_preamble = """
\setcounter{tocdepth}{2}
"""
latex_show_pagerefs = True
latex_show_urls = False
latex_paper_size = "a4"
latex_logo = "blockmodel.pdf"
latex_elements = {
'papersize': "a4paper",
'fontpkg': r"\usepackage{bookman}"}
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('http://docs.python.org/3', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('http://matplotlib.sourceforge.net', None),
'cairo': ('http://www.cairographics.org/documentation/pycairo/3/', None),
'ipython': ('http://ipython.org/ipython-doc/stable/', None),
'panda': ('http://pandas.pydata.org/pandas-docs/stable/', None)}
extlinks = {'ticket': ('http://graph-tool.skewed.de/tickets/ticket/%s',
'ticket '),
'doi': ('http://dx.doi.org/%s', 'DOI: '),
'arxiv': ('http://arxiv.org/abs/%s', 'arXiv: ')}
# def process_docstring(app, what, name, obj, options, lines):
# for i, line in enumerate(lines):
# if "arg1" in line and "->" in line:
# lines[i] = ""
# if "C++ signature :" in line or "graph_tool::Python" in line:
# lines[i] = ""
# def setup(app):
# app.connect('autodoc-process-docstring', process_docstring)
# plot directive
import pyenv
plot_rcparams = pyenv.rcParams
#plot_pre_code = open("pyenv.py").read()
autodoc_default_flags = ['members', 'undoc-members']
numpydoc_show_class_members = False
autodoc_docstring_signature = False
autodoc_member_order = 'bysource'
autoclass_content = 'both'
imported_members = True
def linkcode_resolve(domain, info):
if domain != 'py':
return None
if not info['module']:
return None
modname = info['module'].replace('.', '/')
return "https://git.skewed.de/count0/graph-tool/tree/master/src/%s/__init__.py" % modname
| gpl-3.0 |
jrmontag/Data-Science-45min-Intros | adaboost-101/sample_code.py | 20 | 5548 | '''
Created on Nov 28, 2010
Adaboost is short for Adaptive Boosting
@author: Peter
'''
from numpy import *
def loadSimpData():
datMat = matrix([[ 1. , 2.1],
[ 2. , 1.1],
[ 1.3, 1. ],
[ 1. , 1. ],
[ 2. , 1. ]])
classLabels = [1.0, 1.0, -1.0, -1.0, 1.0]
return datMat,classLabels
def loadDataSet(fileName): #general function to parse tab -delimited floats
numFeat = len(open(fileName).readline().split('\t')) #get number of fields
dataMat = []; labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr =[]
curLine = line.strip().split('\t')
for i in range(numFeat-1):
lineArr.append(float(curLine[i]))
dataMat.append(lineArr)
labelMat.append(float(curLine[-1]))
return dataMat,labelMat
def stumpClassify(dataMatrix,dimen,threshVal,threshIneq):#just classify the data
retArray = ones((shape(dataMatrix)[0],1))
if threshIneq == 'lt':
retArray[dataMatrix[:,dimen] <= threshVal] = -1.0
else:
retArray[dataMatrix[:,dimen] > threshVal] = -1.0
return retArray
def buildStump(dataArr,classLabels,D):
dataMatrix = mat(dataArr); labelMat = mat(classLabels).T
m,n = shape(dataMatrix)
numSteps = 10.0; bestStump = {}; bestClasEst = mat(zeros((m,1)))
minError = inf #init error sum, to +infinity
for i in range(n):#loop over all dimensions
rangeMin = dataMatrix[:,i].min(); rangeMax = dataMatrix[:,i].max();
stepSize = (rangeMax-rangeMin)/numSteps
for j in range(-1,int(numSteps)+1):#loop over all range in current dimension
for inequal in ['lt', 'gt']: #go over less than and greater than
threshVal = (rangeMin + float(j) * stepSize)
predictedVals = stumpClassify(dataMatrix,i,threshVal,inequal)#call stump classify with i, j, lessThan
errArr = mat(ones((m,1)))
errArr[predictedVals == labelMat] = 0
weightedError = D.T*errArr #calc total error multiplied by D
#print "split: dim %d, thresh %.2f, thresh ineqal: %s, the weighted error is %.3f" % (i, threshVal, inequal, weightedError)
if weightedError < minError:
minError = weightedError
bestClasEst = predictedVals.copy()
bestStump['dim'] = i
bestStump['thresh'] = threshVal
bestStump['ineq'] = inequal
return bestStump,minError,bestClasEst
def adaBoostTrainDS(dataArr,classLabels,numIt=40):
weakClassArr = []
m = shape(dataArr)[0]
D = mat(ones((m,1))/m) #init D to all equal
aggClassEst = mat(zeros((m,1)))
for i in range(numIt):
bestStump,error,classEst = buildStump(dataArr,classLabels,D)#build Stump
#print "D:",D.T
alpha = float(0.5*log((1.0-error)/max(error,1e-16)))#calc alpha, throw in max(error,eps) to account for error=0
bestStump['alpha'] = alpha
weakClassArr.append(bestStump) #store Stump Params in Array
#print "classEst: ",classEst.T
expon = multiply(-1*alpha*mat(classLabels).T,classEst) #exponent for D calc, getting messy
D = multiply(D,exp(expon)) #Calc New D for next iteration
D = D/D.sum()
#calc training error of all classifiers, if this is 0 quit for loop early (use break)
aggClassEst += alpha*classEst
#print "aggClassEst: ",aggClassEst.T
aggErrors = multiply(sign(aggClassEst) != mat(classLabels).T,ones((m,1)))
errorRate = aggErrors.sum()/m
print "total error: ",errorRate
if errorRate == 0.0: break
return weakClassArr,aggClassEst
def adaClassify(datToClass,classifierArr):
dataMatrix = mat(datToClass)#do stuff similar to last aggClassEst in adaBoostTrainDS
m = shape(dataMatrix)[0]
aggClassEst = mat(zeros((m,1)))
for i in range(len(classifierArr)):
classEst = stumpClassify(dataMatrix,classifierArr[i]['dim'],\
classifierArr[i]['thresh'],\
classifierArr[i]['ineq'])#call stump classify
aggClassEst += classifierArr[i]['alpha']*classEst
print aggClassEst
return sign(aggClassEst)
def plotROC(predStrengths, classLabels):
import matplotlib.pyplot as plt
cur = (1.0,1.0) #cursor
ySum = 0.0 #variable to calculate AUC
numPosClas = sum(array(classLabels)==1.0)
yStep = 1/float(numPosClas); xStep = 1/float(len(classLabels)-numPosClas)
sortedIndicies = predStrengths.argsort()#get sorted index, it's reverse
fig = plt.figure()
fig.clf()
ax = plt.subplot(111)
#loop through all the values, drawing a line segment at each point
for index in sortedIndicies.tolist()[0]:
if classLabels[index] == 1.0:
delX = 0; delY = yStep;
else:
delX = xStep; delY = 0;
ySum += cur[1]
#draw line from cur to (cur[0]-delX,cur[1]-delY)
ax.plot([cur[0],cur[0]-delX],[cur[1],cur[1]-delY], c='b')
cur = (cur[0]-delX,cur[1]-delY)
ax.plot([0,1],[0,1],'b--')
plt.xlabel('False positive rate'); plt.ylabel('True positive rate')
plt.title('ROC curve for AdaBoost horse colic detection system')
ax.axis([0,1,0,1])
plt.show()
print "the Area Under the Curve is: ",ySum*xStep
| unlicense |
tflovorn/scExplorer | plots/grapher.py | 1 | 5305 | import sys
import json
import math
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
from matplotlib.font_manager import FontProperties
from numpy import arange, meshgrid
_GRAPH_DEFAULTS = {"xlabel":"$x$", "ylabel":"$y$", "num_ticks":5,
"axis_label_fontsize":"x-large", "tick_formatstr":"%.2f",
"legend_fontsize":"large", "legend_loc":0, "legend_title":None,
"ymin":None, "xmax":None, "graph_filepath":None, "plot_type": "scatter",
"delta":"0.05",
"th":None,"thp":None,"t0":None,"D1":None,"Mu_h":None,"epsilon_min":None}
_SERIES_DEFAULTS = {"label":None, "style":"k."}
def parse_file(file_path):
'''Return the plot representation of the JSON file specified.'''
# -- todo : check for IOError --
return import_json(open(file_path, 'r').read())
def import_json(json_string):
'''Return the plot representation of the given JSON string.'''
graph_data = json.loads(json_string)
if isinstance(graph_data, list):
graph_data = [add_default_data(graph) for graph in graph_data]
else:
graph_data = add_default_data(graph_data)
return graph_data
def add_default_data(graph_data):
# graph-wide defaults
for key, value in _GRAPH_DEFAULTS.items():
if key not in graph_data:
graph_data[key] = value
# hack to give a fresh series list each time
if "series" not in graph_data:
graph_data["series"] = []
# series-specific defaults
for series in graph_data["series"]:
for key, value in _SERIES_DEFAULTS.items():
if key not in series:
series[key] = value
return graph_data
def make_graph(graph_data):
'''Take a dictionary representing a graph or a list of such dictionaries.
Build the graph(s), save them to file(s) (if requested), and return the
matplotlib figures.
'''
# Process a list of graphs one element at a time.
if isinstance(graph_data, list):
return [make_graph(some_graph) for some_graph in graph_data]
# If we need to make a Fermi surface plot, go to the function for that.
if graph_data["plot_type"] == "Fermi_surface":
return plot_Fermi_surface(graph_data)
# If we get here, this is not a Fermi surface plot - make scatter plot instead.
try:
dims = graph_data["dimensions"]
fig = plt.figure(figsize=(dims[0], dims[1]))
except:
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
bounds = [None, None]
# plot the data
for series in graph_data["series"]:
fig,axes,bounds = _graph_series(graph_data,series,fig,axes,bounds)
# set properties
fontprop_legend = FontProperties(size=graph_data["legend_fontsize"])
axes.legend(loc=graph_data["legend_loc"], title=graph_data["legend_title"],
prop=fontprop_legend)
axes.set_xlabel(graph_data["xlabel"], size=graph_data["axis_label_fontsize"])
axes.set_ylabel(graph_data["ylabel"], size=graph_data["axis_label_fontsize"])
axes.tick_params(axis='x', pad=7)
if graph_data["xmax"] != None and graph_data["xmax"] != "":
axes.set_xlim(right=float(graph_data["xmax"]), auto=None)
if graph_data["ymin"] != None and graph_data["ymin"] != "":
axes.set_ylim(bottom=float(graph_data["ymin"]), auto=None)
_save_figure(graph_data, fig)
return fig, axes
def _graph_series(graph_data, series, fig, axes, bounds):
# -- todo : set ticks --
axes.plot(_xData(series), _yData(series), series["style"],
label=series["label"])
return fig, axes, bounds
def _xData(series):
return [point[0] for point in series["data"]]
def _yData(series):
return [point[1] for point in series["data"]]
def _save_figure(graph_data, fig):
if graph_data["graph_filepath"] is None:
return
fig.savefig(graph_data["graph_filepath"] + ".png", bbox_inches="tight", dpi=200)
fig.savefig(graph_data["graph_filepath"] + ".eps", bbox_inches="tight", dpi=200)
# Plot a single Fermi surface.
# To make this plot, graph_data["Fermi_surface_data"] must be a dictionary
# with the keys "th", "thp", "t0", "D1", "Mu_h", and "epsilon_min".
def plot_Fermi_surface(graph_data):
delta = float(graph_data["delta"])
x = arange(-math.pi, math.pi, delta)
y = arange(-math.pi, math.pi, delta)
X, Y = meshgrid(x, y)
FS = lambda x, y: _step(_xi_h(graph_data, x, y))
Z = []
for i in range(len(y)):
Z.append([])
for j in range(len(x)):
Z[i].append(FS(x[j], y[i]))
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.set_xlabel(graph_data["xlabel"], size=graph_data["axis_label_fontsize"])
axes.set_ylabel(graph_data["ylabel"], size=graph_data["axis_label_fontsize"])
CS = axes.contour(X, Y, Z)
_save_figure(graph_data, fig)
def _xi_h(fsd, kx, ky):
sx, sy = math.sin(kx), math.sin(ky)
envVars = map(float, [fsd["th"], fsd["D1"], fsd["t0"], fsd["thp"], fsd["epsilon_min"], fsd["Mu_h"]])
th, D1, t0, thp, epsilon_min, Mu_h = envVars
eps = 2.0*th*((sx+sy)*(sx+sy) - 1.0) + 4.0*(2.0*D1*t0 - thp)*sx*sy - epsilon_min
return eps - Mu_h
def _step(x):
if x < 0.0:
return 0.0
else:
return 1.0
if __name__ == "__main__":
if len(sys.argv) > 1:
make_graph(parse_file(sys.argv[1]))
| mit |
markus-antero/Stock | data/geographic/geo.py | 1 | 10188 | '''
Created on 2.5.2017
@author: Markus.Walden
- https://media.readthedocs.org/pdf/geopy/latest/geopy.pdf
- https://freegisdata.rtwilson.com/
- https://www.ncbi.nlm.nih.gov/gds/
- http://www.gis.usu.edu/~chrisg/python/2009/
gdal
- http://www.gdal.org/
- https://pypi.python.org/pypi/GDAL
- http://desktop.arcgis.com/en/analytics/python-in-arcgis/iii-python.htm
shapefile - loaded world map
- https://github.com/GeospatialPython/pyshp
arcgis manuals
- https://github.com/Esri/arcgis-python-api/tree/master/samples/04_gis_analysts_data_scientists
- https://developers.arcgis.com/python/
- https://developers.arcgis.com/python/sample-notebooks/chennai-floods-analysis/
- https://developers.arcgis.com/documentation/
-web service
- http://resources.arcgis.com/en/help/runtime-wpf/concepts/index.html#/Welcome_to_the_help_for_developing_Operations_Dashboard_for_ArcGIS_add_ins/0170000000np000000/
python cource
- https://www.e-education.psu.edu/geog485/book/export/html/
publish data arcgis
- https://github.com/Esri/arcgis-python-api/tree/master/samples/05_content_publishers/data
aggregate
- https://developers.arcgis.com/python/guide/summarizing-feature-data/
'''
import arcgis
from arcgis.gis import GIS
from arcgis.features import find_locations
from arcgis.features.analyze_patterns import interpolate_points
from arcgis.features import use_proximity
import pandas as pd
import json
from data.geographic import gisPass, gisUser
class AddInformationUsingDataFrame(object):
'''
classdocs
Templates:
item_properties = {
"title": "Worldwide gun ownership",
"tags" : "guns,violence",
"snippet": " GSR Worldwide gun ownership",
"description": "test description",
"text": json.dumps({"featureCollection": {"layers": [dict(fc.layer)]}}),
"type": "Feature Collection",
"typeKeywords": "Data, Feature Collection, Singlelayer",
"extent" : "-102.5272,-41.7886,172.5967,64.984"
}
'''
def __init__(self, conn = "https://www.arcgis.com"):
'''
Constructor
Initiate gis instance
'''
self.gis = GIS(conn, gisUser, gisPass)
def addGisItem(self, item_properties):
item = self.gis.content.add(item_properties)
return item
def createDir(self, dir = 'packages'):
try:
self.gis.content.create_folder(dir)
return True
except:
return False
def addContent(self, source, destinationDir):
try:
self.gis.content.add({}, data=source, folder=destinationDir)
return True
except:
return False
@staticmethod
def postMapUseWikipedia(item_properties, gis, fc):
map1 = gis.map('UK')
map1.add_layer(fc, {"renderer":"ClassedSizeRenderer",
"field_name": "Guns_per_100_Residents"})
item = gis.content.add(item_properties)
return map1, item
class GisMapUpdate(AddInformationUsingDataFrame):
def __init__(self):
super.__init__()
def addSDFile(self, sd_file = "./data/Ebola_Treatment_Units.sd"):
item = self.gis.content.add({},sd_file)
new_item = item.publish()
return new_item
def readJSONFile(self, jsonFile):
with open(jsonFile,"r") as file_handle:
self.web_map_json = json.load(file_handle)
@staticmethod
def printOperationalLayer(web_scene_obj):
for layer in web_scene_obj['operationalLayers']:
print(layer['title'] + " :: " + layer['layerType'])
if layer['layerType'] == 'GroupLayer':
for sub_layer in layer['layers']:
print("\t" + sub_layer['title'] + " :: "+ sub_layer['url'])
@staticmethod
def makeMapLayer(df_toInterpolate, gis, map, featureCollection):
# 1. Create an arcgis.features.FeatureCollection object by importing the pandas dataframe with an address field
rainfall = gis.content.import_data(df_toInterpolate, {"Address" : "LOCATION"})
# 2. add dataframe to arcgis map using method add_layer
map.add_layer(rainfall, { "renderer":"ClassedSizeRenderer", "field_name":"RAINFALL" })
# 3. using the interpolate_point
interpolated_rf = interpolate_points(rainfall, field='RAINFALL')
# 4. using find location and use
floodprone_buffer = use_proximity.create_buffers(find_locations.trace_downstream(featureCollection), [ 1 ], units='Miles')
return interpolated_rf, floodprone_buffer
def main():
# Test adding Data to gis
useMyAccount = False
myGis = AddInformationUsingDataFrame()
if useMyAccount:
gis = GIS(myGis.gis)
else:
gis = GIS()
fc = gis.content.import_data(readWiki(), {"CountryCode":"Country"})
item_properties = {
"title": "Worldwide gun ownership",
"tags" : "guns,violence",
"snippet": " GSR Worldwide gun ownership",
"description": "test description",
"text": json.dumps({"featureCollection": {"layers": [dict(fc.layer)]}}),
"type": "Feature Collection",
"typeKeywords": "Data, Feature Collection, Singlelayer",
"extent" : "-102.5272,-41.7886,172.5967,64.984"
}
map1, item, gis = AddInformationUsingDataFrame.postMapUseWikipedia(item_properties = item_properties, gis = gis)
# Test webMap using
testWebMapScene()
def readWiki():
df = pd.read_html("https://en.wikipedia.org/wiki/Number_of_guns_per_capita_by_country")[1]
df.columns = df.iloc[0]
df = df.reindex(df.index.drop(0))
# change number + index to number
df.iloc[0,2] = 112.6
# change column format
converted_column = pd.to_numeric(df["Guns per 100 Residents"], errors = 'coerce')
df['Guns per 100 Residents'] = converted_column
return df
def testArcgisWeb():
'''
creates 3d object layer on buildings in montreal, canada
'''
myGis = GisMapUpdate()
myGis.readJSONFile(jsonFile = "./data/arcgis_map.json")
json1 = myGis.web_map_json
myGis.readJSONFile(jsonFile = "./data/arcgis_map_scene.json")
json2 = myGis.web_map_json
search_result = myGis.gis.content.search("title:2012 USA Median Age AND owner:esri",
item_type = "Map Service", outside_org = True)
median_age_weblayer = search_result[1]
json2['operationalLayers'][0]['itemId'] = median_age_weblayer.itemid
json2['operationalLayers'][0]['layerType'] = "ArcGISMapServiceLayer"
json2['operationalLayers'][0]['title'] = median_age_weblayer.title
json2['operationalLayers'][0]['url'] = median_age_weblayer.url
web_map_properties = {'title':'USA median age map',
'type':'Web Map',
'snippet':'This map service shows the median age of people' +\
'in the United States as of 2012 census. The Median Age for' +\
'the U.S. is 37 years of age.',
'tags':'ArcGIS Python API',
'text':json.dumps(json2)}
web_map_item = myGis.gis.content.add(web_map_properties)
search_result = myGis.gis.content.search("title:Montreal, Canada Buildings AND owner:esri_3d",
item_type="scene service", outside_org = True)
buildings_layer = search_result[0]
json1['operationalLayers'][0]['itemId'] = buildings_layer.itemid
json1['operationalLayers'][0]['layerType'] = "ArcGISSceneServiceLayer"
json1['operationalLayers'][0]['title'] = buildings_layer.title
json1['operationalLayers'][0]['url'] = buildings_layer.url
web_scene_item_properties = {'title':'Web scene with photo realistic buildings',
'type':'Web Scene',
'snippet':'This scene highlights buildings of Montreal, Canada',
'tags':'ArcGIS Python API',
'text': json.dumps(json1)}
# Use the add() method to publish a new web scenej
web_scene_item = myGis.gis.content.add(web_scene_item_properties)
web_scene_item.share(True)
web_scene_obj = arcgis.mapping.WebScene(web_scene_item)
return web_scene_obj
def testWebMapScene():
'''
- Test JSON load to
- publish a web map
'''
myGis = GisMapUpdate()
myGis.readJSONFile(jsonFile = "./data/map_ebola.json")
web_map_item_properties = {'title':'Ebola treatment locations',
'type':'Web Map',
'snippet':'This map shows locations of Ebola treatment centers in Africa',
'tags':'ArcGIS Python API',
'text':json.dumps(myGis.web_map_json)}
web_map_item = myGis.addGisItem(item_properties = web_map_item_properties)
# create a web map object out of the item
web_map_obj = arcgis.mapping.WebMap(web_map_item)
layer_list = web_map_obj['operationalLayers']
search_result = myGis.gis.content.search('title:Ebola', item_type='Feature Layer')
ebola = search_result[0]
# set the url to feature service item's url
layer_list[0]['url'] = ebola.layers[1].url
layer_list[0]['itemId'] = search_result[0].id
# update the web map object's operationalLayers dictionary
web_map_obj['operationalLayers'] = layer_list
search_result = myGis.gis.content.search('title:Western Pacific Typhoons (2005) AND owner:esri_3d',
item_type = 'Web Scene', outside_org = True)
web_scene_item = search_result[0]
web_scene_obj = arcgis.mapping.WebScene(web_scene_item)
return web_scene_obj
if __name__ == "__main__":
main() | apache-2.0 |
m12i/pykml | docs/sphinxext/matplotlib/ipython_directive.py | 7 | 15656 | import sys, os, shutil, imp, warnings, cStringIO, re
import IPython
from IPython.Shell import MatplotlibShell
try:
from hashlib import md5
except ImportError:
from md5 import md5
from docutils.parsers.rst import directives
import sphinx
sphinx_version = sphinx.__version__.split(".")
# The split is necessary for sphinx beta versions where the string is
# '6b1'
sphinx_version = tuple([int(re.split('[a-z]', x)[0])
for x in sphinx_version[:2]])
COMMENT, INPUT, OUTPUT = range(3)
rgxin = re.compile('In \[(\d+)\]:\s?(.*)\s*')
rgxout = re.compile('Out\[(\d+)\]:\s?(.*)\s*')
fmtin = 'In [%d]:'
fmtout = 'Out[%d]:'
def block_parser(part):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
#print 'PARSE', lines
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
inputline += '\n' + nextline[Nc:]
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
#print 'OUTPUT', output
block.append((OUTPUT, output))
break
#print 'returning block', block
return block
import matplotlib
matplotlib.use('Agg')
class EmbeddedSphinxShell:
def __init__(self):
self.cout = cStringIO.StringIO()
IPython.Shell.Term.cout = self.cout
IPython.Shell.Term.cerr = self.cout
argv = ['-autocall', '0']
self.user_ns = {}
self.user_glocal_ns = {}
self.IP = IPython.ipmaker.make_IPython(
argv, self.user_ns, self.user_glocal_ns, embedded=True,
#shell_class=IPython.Shell.InteractiveShell,
shell_class=MatplotlibShell,
rc_override=dict(colors = 'NoColor'))
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
# we need bookmark the current dir first so we can save
# relative to it
self.process_input('bookmark ipy_basedir')
self.cout.seek(0)
self.cout.truncate(0)
def process_input(self, line):
'process the input, capturing stdout'
#print "input='%s'"%self.input
stdout = sys.stdout
sys.stdout = self.cout
#self.IP.resetbuffer()
self.IP.push(self.IP.prefilter(line, 0))
#self.IP.runlines(line)
sys.stdout = stdout
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
#print 'BLOCK', block
ret = []
output = None
input_lines = None
m = rgxin.match(str(self.IP.outputcache.prompt1).strip())
lineno = int(m.group(1))
input_prompt = fmtin%lineno
output_prompt = fmtout%lineno
image_file = None
image_directive = None
for token, data in block:
if token==COMMENT:
if not self.is_suppress:
ret.append(data)
elif token==INPUT:
decorator, input, rest = data
#print 'INPUT:', data
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = decorator=='@doctest' or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_savefig = decorator is not None and decorator.startswith('@savefig')
#print 'is_verbatim=%s, is_doctest=%s, is_suppress=%s, is_savefig=%s'%(is_verbatim, is_doctest, is_suppress, is_savefig)
input_lines = input.split('\n')
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
if is_savefig:
saveargs = decorator.split(' ')
filename = saveargs[1]
outfile = os.path.join('_static/%s'%filename)
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = outfile
image_directive = '\n'.join(imagerows)
# TODO: can we get "rest" from ipython
#self.process_input('\n'.join(input_lines))
is_semicolon = False
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i==0:
# process the first input line
if is_verbatim:
self.process_input('')
else:
# only submit the line in non-verbatim mode
self.process_input(line)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input(line)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress:
if len(rest.strip()):
if is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append("%s"%rest)
ret.append('')
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon and not is_verbatim:
ret.append(output)
self.cout.truncate(0)
elif token==OUTPUT:
#print 'token==OUTPUT is_verbatim=%s'%is_verbatim
if is_verbatim:
# construct a mock output prompt
output = '%s %s\n'%(fmtout%lineno, data)
ret.append(output)
#print 'token==OUTPUT', output
if is_doctest:
submitted = data.strip()
found = output
if found is not None:
ind = found.find(output_prompt)
if ind<0:
raise RuntimeError('output prompt="%s" does not match out line=%s'%(output_prompt, found))
found = found[len(output_prompt):].strip()
if found!=submitted:
raise RuntimeError('doctest failure for input_lines="%s" with found_output="%s" and submitted output="%s"'%(input_lines, found, submitted))
#print 'doctest PASSED for input_lines="%s" with found_output="%s" and submitted output="%s"'%(input_lines, found, submitted)
if image_file is not None:
self.insure_pyplot()
command = 'plt.gcf().savefig("%s")'%image_file
#print 'SAVEFIG', command
self.process_input('bookmark ipy_thisdir')
self.process_input('cd -b ipy_basedir')
self.process_input(command)
self.process_input('cd -b ipy_thisdir')
self.cout.seek(0)
self.cout.truncate(0)
#print 'returning', ret, figure
return ret, image_directive
def insure_pyplot(self):
if self._pyplot_imported:
return
self.process_input('import matplotlib.pyplot as plt')
shell = EmbeddedSphinxShell()
def ipython_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine,
):
debug = ipython_directive.DEBUG
shell.is_suppress = options.has_key('suppress')
shell.is_doctest = options.has_key('doctest')
shell.is_verbatim = options.has_key('verbatim')
#print 'ipy', shell.is_suppress, options
parts = '\n'.join(content).split('\n\n')
lines = ['.. sourcecode:: ipython', '']
figures = []
for part in parts:
block = block_parser(part)
if len(block):
rows, figure = shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
#print lines
if len(lines)>2:
if debug:
print '\n'.join(lines)
else:
#print 'INSERTING %d lines'%len(lines)
state_machine.insert_input(
lines, state_machine.input_lines.source(0))
return []
ipython_directive.DEBUG = False
def setup(app):
setup.app = app
options = {
'suppress': directives.flag,
'doctest': directives.flag,
'verbatim': directives.flag,
}
app.add_directive('ipython', ipython_directive, True, (0, 2, 0), **options)
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
--------> print(url.split('&'))
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: np.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
--------> print(x)
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
""",
r"""
In [239]: 1/2
@verbatim
Out[239]: 0
In [240]: 1.0/2.0
Out[240]: 0.5
""",
r"""
@verbatim
In [6]: pwd
Out[6]: '/home/jdhunter/mypy'
""",
r"""
@verbatim
In [151]: myfile.upper?
Type: builtin_function_or_method
Base Class: <type 'builtin_function_or_method'>
String Form: <built-in method upper of str object at 0x980e2f0>
Namespace: Interactive
Docstring:
S.upper() -> string
Return a copy of the string S converted to uppercase.
"""
]
ipython_directive.DEBUG = True
#options = dict(suppress=True)
options = dict()
for example in examples:
content = example.split('\n')
ipython_directive('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
if __name__=='__main__':
test()
| bsd-3-clause |
yavalvas/yav_com | build/matplotlib/examples/axes_grid/demo_axes_hbox_divider.py | 7 | 1547 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.axes_divider import HBoxDivider
import mpl_toolkits.axes_grid1.axes_size as Size
def make_heights_equal(fig, rect, ax1, ax2, pad):
# pad in inches
h1, v1 = Size.AxesX(ax1), Size.AxesY(ax1)
h2, v2 = Size.AxesX(ax2), Size.AxesY(ax2)
pad_v = Size.Scaled(1)
pad_h = Size.Fixed(pad)
my_divider = HBoxDivider(fig, rect,
horizontal=[h1, pad_h, h2],
vertical=[v1, pad_v, v2])
ax1.set_axes_locator(my_divider.new_locator(0))
ax2.set_axes_locator(my_divider.new_locator(2))
if __name__ == "__main__":
arr1 = np.arange(20).reshape((4,5))
arr2 = np.arange(20).reshape((5,4))
fig, (ax1, ax2) = plt.subplots(1,2)
ax1.imshow(arr1, interpolation="nearest")
ax2.imshow(arr2, interpolation="nearest")
rect = 111 # subplot param for combined axes
make_heights_equal(fig, rect, ax1, ax2, pad=0.5) # pad in inches
for ax in [ax1, ax2]:
ax.locator_params(nbins=4)
# annotate
ax3 = plt.axes([0.5, 0.5, 0.001, 0.001], frameon=False)
ax3.xaxis.set_visible(False)
ax3.yaxis.set_visible(False)
ax3.annotate("Location of two axes are adjusted\n"
"so that they have equal heights\n"
"while maintaining their aspect ratios", (0.5, 0.5),
xycoords="axes fraction", va="center", ha="center",
bbox=dict(boxstyle="round, pad=1", fc="w"))
plt.show()
| mit |
asurve/incubator-systemml | src/main/python/tests/test_mllearn_numpy.py | 12 | 8831 | #!/usr/bin/python
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
# To run:
# - Python 2: `PYSPARK_PYTHON=python2 spark-submit --master local[*] --driver-class-path SystemML.jar test_mllearn_numpy.py`
# - Python 3: `PYSPARK_PYTHON=python3 spark-submit --master local[*] --driver-class-path SystemML.jar test_mllearn_numpy.py`
# Make the `systemml` package importable
import os
import sys
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../")
sys.path.insert(0, path)
import unittest
import numpy as np
from pyspark.ml import Pipeline
from pyspark.ml.feature import HashingTF, Tokenizer
from pyspark.sql import SparkSession
from sklearn import datasets, metrics, neighbors
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score, r2_score
from systemml.mllearn import LinearRegression, LogisticRegression, NaiveBayes, SVM
from sklearn import linear_model
sparkSession = SparkSession.builder.getOrCreate()
def writeColVector(X, fileName):
fileName = os.path.join(os.getcwd(), fileName)
X.tofile(fileName, sep='\n')
metaDataFileContent = '{ "data_type": "matrix", "value_type": "double", "rows":' + str(len(X)) + ', "cols": 1, "nnz": -1, "format": "csv", "author": "systemml-tests", "created": "0000-00-00 00:00:00 PST" }'
with open(fileName+'.mtd', 'w') as text_file:
text_file.write(metaDataFileContent)
def deleteIfExists(fileName):
try:
os.remove(fileName)
except OSError:
pass
# Currently not integrated with JUnit test
# ~/spark-1.6.1-scala-2.11/bin/spark-submit --master local[*] --driver-class-path SystemML.jar test.py
class TestMLLearn(unittest.TestCase):
def test_logistic(self):
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
n_samples = len(X_digits)
X_train = X_digits[:int(.9 * n_samples)]
y_train = y_digits[:int(.9 * n_samples)]
X_test = X_digits[int(.9 * n_samples):]
y_test = y_digits[int(.9 * n_samples):]
logistic = LogisticRegression(sparkSession)
logistic.fit(X_train, y_train)
mllearn_predicted = logistic.predict(X_test)
sklearn_logistic = linear_model.LogisticRegression()
sklearn_logistic.fit(X_train, y_train)
self.failUnless(accuracy_score(sklearn_logistic.predict(X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn
def test_logistic_mlpipeline(self):
training = sparkSession.createDataFrame([
("a b c d e spark", 1.0),
("b d", 2.0),
("spark f g h", 1.0),
("hadoop mapreduce", 2.0),
("b spark who", 1.0),
("g d a y", 2.0),
("spark fly", 1.0),
("was mapreduce", 2.0),
("e spark program", 1.0),
("a e c l", 2.0),
("spark compile", 1.0),
("hadoop software", 2.0)
], ["text", "label"])
tokenizer = Tokenizer(inputCol="text", outputCol="words")
hashingTF = HashingTF(inputCol="words", outputCol="features", numFeatures=20)
lr = LogisticRegression(sparkSession)
pipeline = Pipeline(stages=[tokenizer, hashingTF, lr])
model = pipeline.fit(training)
test = sparkSession.createDataFrame([
("spark i j k", 1.0),
("l m n", 2.0),
("mapreduce spark", 1.0),
("apache hadoop", 2.0)], ["text", "label"])
result = model.transform(test)
predictionAndLabels = result.select("prediction", "label")
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
evaluator = MulticlassClassificationEvaluator()
score = evaluator.evaluate(predictionAndLabels)
self.failUnless(score == 1.0)
def test_linear_regression(self):
diabetes = datasets.load_diabetes()
diabetes_X = diabetes.data[:, np.newaxis, 2]
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
regr = LinearRegression(sparkSession, solver='direct-solve')
regr.fit(diabetes_X_train, diabetes_y_train)
mllearn_predicted = regr.predict(diabetes_X_test)
sklearn_regr = linear_model.LinearRegression()
sklearn_regr.fit(diabetes_X_train, diabetes_y_train)
self.failUnless(r2_score(sklearn_regr.predict(diabetes_X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn
def test_linear_regression_cg(self):
diabetes = datasets.load_diabetes()
diabetes_X = diabetes.data[:, np.newaxis, 2]
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
regr = LinearRegression(sparkSession, solver='newton-cg')
regr.fit(diabetes_X_train, diabetes_y_train)
mllearn_predicted = regr.predict(diabetes_X_test)
sklearn_regr = linear_model.LinearRegression()
sklearn_regr.fit(diabetes_X_train, diabetes_y_train)
self.failUnless(r2_score(sklearn_regr.predict(diabetes_X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn
def test_svm(self):
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
n_samples = len(X_digits)
X_train = X_digits[:int(.9 * n_samples)]
y_train = y_digits[:int(.9 * n_samples)]
X_test = X_digits[int(.9 * n_samples):]
y_test = y_digits[int(.9 * n_samples):]
svm = SVM(sparkSession, is_multi_class=True, tol=0.0001)
mllearn_predicted = svm.fit(X_train, y_train).predict(X_test)
from sklearn import linear_model, svm
clf = svm.LinearSVC()
sklearn_predicted = clf.fit(X_train, y_train).predict(X_test)
accuracy = accuracy_score(sklearn_predicted, mllearn_predicted)
evaluation = 'test_svm accuracy_score(sklearn_predicted, mllearn_predicted) was {}'.format(accuracy)
self.failUnless(accuracy > 0.95, evaluation)
def test_naive_bayes(self):
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
n_samples = len(X_digits)
X_train = X_digits[:int(.9 * n_samples)]
y_train = y_digits[:int(.9 * n_samples)]
X_test = X_digits[int(.9 * n_samples):]
y_test = y_digits[int(.9 * n_samples):]
nb = NaiveBayes(sparkSession)
mllearn_predicted = nb.fit(X_train, y_train).predict(X_test)
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB()
sklearn_predicted = clf.fit(X_train, y_train).predict(X_test)
self.failUnless(accuracy_score(sklearn_predicted, mllearn_predicted) > 0.95 )
def test_naive_bayes1(self):
categories = ['alt.atheism', 'talk.religion.misc', 'comp.graphics', 'sci.space']
newsgroups_train = fetch_20newsgroups(subset='train', categories=categories)
newsgroups_test = fetch_20newsgroups(subset='test', categories=categories)
vectorizer = TfidfVectorizer()
# Both vectors and vectors_test are SciPy CSR matrix
vectors = vectorizer.fit_transform(newsgroups_train.data)
vectors_test = vectorizer.transform(newsgroups_test.data)
nb = NaiveBayes(sparkSession)
mllearn_predicted = nb.fit(vectors, newsgroups_train.target).predict(vectors_test)
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB()
sklearn_predicted = clf.fit(vectors, newsgroups_train.target).predict(vectors_test)
self.failUnless(accuracy_score(sklearn_predicted, mllearn_predicted) > 0.95 )
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
LennonLab/simplex | tools/SADfits/SADfits.py | 9 | 3639 | from __future__ import division
import matplotlib.pyplot as plt
import sys
import os
from random import shuffle
import numpy as np
########### PATHS ##############################################################
mydir = os.path.expanduser("~/GitHub/residence-time")
tools = os.path.expanduser(mydir + "/tools")
sys.path.append(tools + "/DiversityTools/macroeco_distributions")
import macroeco_distributions as md
sys.path.append(tools + "/DiversityTools/distributions")
import distributions as dist
sys.path.append(tools + "/DiversityTools/macroecotools")
import macroecotools as mct
sys.path.append(tools + "/metrics")
import metrics
sys.path.append(tools + "/DiversityTools/mete")
import mete
#sys.path.append(tools + "/pln")
#import pln
from scipy.stats.kde import gaussian_kde
from macroeco_distributions import pln, pln_solver
from numpy import empty
def get_kdens_choose_kernel(_list,kernel):
""" Finds the kernel density function across a sample of SADs """
density = gaussian_kde(_list)
n = len(_list)
xs = np.linspace(min(_list),max(_list),n)
#xs = np.linspace(0.0,1.0,n)
density.covariance_factor = lambda : kernel
density._compute_covariance()
D = [xs,density(xs)]
return D
def get_rad_pln(S, mu, sigma, lower_trunc = True):
"""Obtain the predicted RAD from a Poisson lognormal distribution"""
abundance = list(empty([S]))
rank = range(1, int(S) + 1)
cdf_obs = [(rank[i]-0.5) / S for i in range(0, int(S))]
j = 0
cdf_cum = 0
i = 1
while j < S:
cdf_cum += pln.pmf(i, mu, sigma, lower_trunc)
while cdf_cum >= cdf_obs[j]:
abundance[j] = i
j += 1
if j == S:
abundance.reverse()
return abundance
i += 1
def get_rad_from_obs(ab, dist):
mu, sigma = pln_solver(ab)
pred_rad = get_rad_pln(len(ab), mu, sigma)
return pred_rad
data = mydir + '/results/simulated_data/protected/RAD-Data.csv'
RADs = []
with open(data) as f:
for d in f:
d = list(eval(d))
sim = d.pop(0)
ct = d.pop(0)
if len(d) >= 10:
d = sorted(d, reverse=True)
RADs.append(d)
print 'Number of RADs:', len(RADs)
mete_r2s = []
zipf_r2s = []
pln_r2s = []
shuffle(RADs)
for i, obs in enumerate(RADs):
N = int(sum(obs))
S = int(len(obs))
print i, N, S, len(pln_r2s)
if S >= 10 and N > 50:
if N < 10000:
result = mete.get_mete_rad(S, N)
predRAD = result[0]
mete_r2 = mct.obs_pred_rsquare(np.array(obs), np.array(predRAD))
mete_r2s.append(mete_r2)
#zipf_pred = dist.zipf(obs)
#predRAD = zipf_pred.from_cdf()
#zipf_r2 = mct.obs_pred_rsquare(np.array(obs), np.array(predRAD))
#zipf_r2s.append(zipf_r2)
predRAD = get_rad_from_obs(obs, 'pln')
pln_r2 = mct.obs_pred_rsquare(np.array(obs), np.array(predRAD))
pln_r2s.append(pln_r2)
if len(pln_r2s) > 200: break
fig = plt.figure(111)
kernel = 0.5
D = get_kdens_choose_kernel(mete_r2s, kernel)
plt.plot(D[0],D[1],color = '0.3', lw=3, alpha = 0.99,label= 'METE')
#D = get_kdens_choose_kernel(zipf_r2s, kernel)
#plt.plot(D[0],D[1],color = 'c', lw=3, alpha = 0.99,label= 'Zipf')
D = get_kdens_choose_kernel(pln_r2s, kernel)
plt.plot(D[0],D[1],color = 'm', lw=3, alpha = 0.99, label= 'PLN')
plt.xlim(0.0, 1)
plt.legend(loc=2, fontsize=16)
plt.xlabel('$r$'+r'$^{2}$', fontsize=22)
plt.ylabel('$density$', fontsize=22)
plt.savefig(mydir + '/results/figures/SADfits.png', dpi=600, bbox_inches = "tight")
plt.close()
| mit |
pnedunuri/scikit-learn | sklearn/feature_extraction/image.py | 263 | 17600 | """
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils import check_array, check_random_state
from ..utils.fixes import astype
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction.
n_z: integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds),
np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = astype(mask, dtype=np.bool, copy=False)
mask = np.asarray(mask, dtype=np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
----------
n_x : int
Dimension in x axis
n_y : int
Dimension in y axis
n_z : int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : dtype, optional, default int
The data of the returned sparse matrix. By default it is int
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling to use if
`max_patches` is not None.
Returns
-------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print(patches.shape)
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image : array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h // 10, i_w // 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
| bsd-3-clause |
hammerlab/mhcflurry | downloads-generation/models_class1_kim_benchmark/curate.py | 1 | 9601 | """
Filter and combine various peptide/MHC datasets to derive a composite training set,
optionally including eluted peptides identified by mass-spec.
"""
import sys
import argparse
import pandas
from mhcflurry.common import normalize_allele_name
def normalize_allele_name_or_return_unknown(s):
result = normalize_allele_name(
s, raise_on_error=False, default_value="UNKNOWN")
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument(
"--data-kim2014",
action="append",
default=[],
help="Path to Kim 2014-style affinity data")
parser.add_argument(
"--data-iedb",
action="append",
default=[],
help="Path to IEDB-style affinity data (e.g. mhc_ligand_full.csv)")
parser.add_argument(
"--data-systemhc-atlas",
action="append",
default=[],
help="Path to systemhc-atlas-style mass-spec data")
parser.add_argument(
"--data-abelin-mass-spec",
action="append",
default=[],
help="Path to Abelin Immunity 2017 mass-spec hits")
parser.add_argument(
"--include-iedb-mass-spec",
action="store_true",
default=False,
help="Include mass-spec observations in IEDB")
parser.add_argument(
"--out-csv",
required=True,
help="Result file")
QUALITATIVE_TO_AFFINITY_AND_INEQUALITY = {
"Negative": (5000.0, ">"),
"Positive": (500.0, "<"), # used for mass-spec hits
"Positive-High": (100.0, "<"),
"Positive-Intermediate": (1000.0, "<"),
"Positive-Low": (5000.0, "<"),
}
QUALITATIVE_TO_AFFINITY = dict(
(key, value[0]) for (key, value)
in QUALITATIVE_TO_AFFINITY_AND_INEQUALITY.items())
QUALITATIVE_TO_INEQUALITY = dict(
(key, value[1]) for (key, value)
in QUALITATIVE_TO_AFFINITY_AND_INEQUALITY.items())
EXCLUDE_IEDB_ALLELES = [
"HLA class I",
"HLA class II",
]
def load_data_kim2014(filename):
df = pandas.read_table(filename)
print("Loaded kim2014 data: %s" % str(df.shape))
df["measurement_source"] = "kim2014"
df["measurement_value"] = df.meas
df["measurement_type"] = (df.inequality == "=").map({
True: "quantitative",
False: "qualitative",
})
df["measurement_inequality"] = df.inequality
df["original_allele"] = df.mhc
df["peptide"] = df.sequence
df["allele"] = df.mhc.map(normalize_allele_name_or_return_unknown)
print("Dropping un-parseable alleles: %s" % ", ".join(
df.ix[df.allele == "UNKNOWN"]["mhc"].unique()))
df = df.ix[df.allele != "UNKNOWN"]
print("Loaded kim2014 data: %s" % str(df.shape))
return df
def load_data_systemhc_atlas(filename, min_probability=0.99):
df = pandas.read_csv(filename)
print("Loaded systemhc atlas data: %s" % str(df.shape))
df["measurement_source"] = "systemhc-atlas"
df["measurement_value"] = QUALITATIVE_TO_AFFINITY["Positive"]
df["measurement_inequality"] = "<"
df["measurement_type"] = "qualitative"
df["original_allele"] = df.top_allele
df["peptide"] = df.search_hit
df["allele"] = df.top_allele.map(normalize_allele_name_or_return_unknown)
print("Dropping un-parseable alleles: %s" % ", ".join(
str(x) for x in df.ix[df.allele == "UNKNOWN"]["top_allele"].unique()))
df = df.loc[df.allele != "UNKNOWN"]
print("Systemhc atlas data now: %s" % str(df.shape))
print("Dropping data points with probability < %f" % min_probability)
df = df.loc[df.prob >= min_probability]
print("Systemhc atlas data now: %s" % str(df.shape))
print("Removing duplicates")
df = df.drop_duplicates(["allele", "peptide"])
print("Systemhc atlas data now: %s" % str(df.shape))
return df
def load_data_abelin_mass_spec(filename):
df = pandas.read_csv(filename)
print("Loaded Abelin mass-spec data: %s" % str(df.shape))
df["measurement_source"] = "abelin-mass-spec"
df["measurement_value"] = QUALITATIVE_TO_AFFINITY["Positive"]
df["measurement_inequality"] = "<"
df["measurement_type"] = "qualitative"
df["original_allele"] = df.allele
df["allele"] = df.original_allele.map(normalize_allele_name_or_return_unknown)
print("Dropping un-parseable alleles: %s" % ", ".join(
str(x) for x in df.ix[df.allele == "UNKNOWN"]["allele"].unique()))
df = df.loc[df.allele != "UNKNOWN"]
print("Abelin mass-spec data now: %s" % str(df.shape))
print("Removing duplicates")
df = df.drop_duplicates(["allele", "peptide"])
print("Abelin mass-spec data now: %s" % str(df.shape))
return df
def load_data_iedb(iedb_csv, include_qualitative=True, include_mass_spec=False):
iedb_df = pandas.read_csv(iedb_csv, skiprows=1, low_memory=False)
print("Loaded iedb data: %s" % str(iedb_df.shape))
print("Selecting only class I")
iedb_df = iedb_df.ix[
iedb_df["MHC allele class"].str.strip().str.upper() == "I"
]
print("New shape: %s" % str(iedb_df.shape))
print("Dropping known unusuable alleles")
iedb_df = iedb_df.ix[
~iedb_df["Allele Name"].isin(EXCLUDE_IEDB_ALLELES)
]
iedb_df = iedb_df.ix[
(~iedb_df["Allele Name"].str.contains("mutant")) &
(~iedb_df["Allele Name"].str.contains("CD1"))
]
iedb_df["allele"] = iedb_df["Allele Name"].map(normalize_allele_name_or_return_unknown)
print("Dropping un-parseable alleles: %s" % ", ".join(
iedb_df.ix[iedb_df.allele == "UNKNOWN"]["Allele Name"].unique()))
iedb_df = iedb_df.ix[iedb_df.allele != "UNKNOWN"]
print("IEDB measurements per allele:\n%s" % iedb_df.allele.value_counts())
quantitative = iedb_df.ix[iedb_df["Units"] == "nM"].copy()
quantitative["measurement_type"] = "quantitative"
quantitative["measurement_inequality"] = "="
print("Quantitative measurements: %d" % len(quantitative))
qualitative = iedb_df.ix[iedb_df["Units"] != "nM"].copy()
qualitative["measurement_type"] = "qualitative"
print("Qualitative measurements: %d" % len(qualitative))
if not include_mass_spec:
qualitative = qualitative.ix[
(~qualitative["Method/Technique"].str.contains("mass spec"))
].copy()
qualitative["Quantitative measurement"] = (
qualitative["Qualitative Measure"].map(QUALITATIVE_TO_AFFINITY))
qualitative["measurement_inequality"] = (
qualitative["Qualitative Measure"].map(QUALITATIVE_TO_INEQUALITY))
print("Qualitative measurements (possibly after dropping MS): %d" % (
len(qualitative)))
iedb_df = pandas.concat(
(
([quantitative]) +
([qualitative] if include_qualitative else [])),
ignore_index=True)
print("IEDB measurements per allele:\n%s" % iedb_df.allele.value_counts())
print("Subselecting to valid peptides. Starting with: %d" % len(iedb_df))
iedb_df["Description"] = iedb_df.Description.str.strip()
iedb_df = iedb_df.ix[
iedb_df.Description.str.match("^[ACDEFGHIKLMNPQRSTVWY]+$")
]
print("Now: %d" % len(iedb_df))
print("Annotating last author and category")
iedb_df["last_author"] = iedb_df.Authors.map(
lambda x: (
x.split(";")[-1]
.split(",")[-1]
.split(" ")[-1]
.strip()
.replace("*", ""))).values
iedb_df["category"] = (
iedb_df["last_author"] + " - " + iedb_df["Method/Technique"]).values
train_data = pandas.DataFrame()
train_data["peptide"] = iedb_df.Description.values
train_data["measurement_value"] = iedb_df[
"Quantitative measurement"
].values
train_data["measurement_source"] = iedb_df.category.values
train_data["measurement_inequality"] = iedb_df.measurement_inequality.values
train_data["allele"] = iedb_df["allele"].values
train_data["original_allele"] = iedb_df["Allele Name"].values
train_data["measurement_type"] = iedb_df["measurement_type"].values
train_data = train_data.drop_duplicates().reset_index(drop=True)
return train_data
def run():
args = parser.parse_args(sys.argv[1:])
dfs = []
for filename in args.data_iedb:
df = load_data_iedb(filename, include_mass_spec=args.include_iedb_mass_spec)
dfs.append(df)
for filename in args.data_kim2014:
df = load_data_kim2014(filename)
df["allele_peptide"] = df.allele + "_" + df.peptide
# Give precedence to IEDB data.
if dfs:
iedb_df = dfs[0]
iedb_df["allele_peptide"] = iedb_df.allele + "_" + iedb_df.peptide
print("Dropping kim2014 data present in IEDB.")
df = df.ix[
~df.allele_peptide.isin(iedb_df.allele_peptide)
]
print("Kim2014 data now: %s" % str(df.shape))
dfs.append(df)
for filename in args.data_systemhc_atlas:
df = load_data_systemhc_atlas(filename)
dfs.append(df)
for filename in args.data_abelin_mass_spec:
df = load_data_abelin_mass_spec(filename)
dfs.append(df)
df = pandas.concat(dfs, ignore_index=True)
print("Combined df: %s" % (str(df.shape)))
print("Removing combined duplicates")
df = df.drop_duplicates(["allele", "peptide", "measurement_value"])
print("New combined df: %s" % (str(df.shape)))
df = df[[
"allele",
"peptide",
"measurement_value",
"measurement_inequality",
"measurement_type",
"measurement_source",
"original_allele",
]].sort_values(["allele", "peptide"]).dropna()
print("Final combined df: %s" % (str(df.shape)))
df.to_csv(args.out_csv, index=False)
print("Wrote: %s" % args.out_csv)
if __name__ == '__main__':
run()
| apache-2.0 |
russellgeoff/blog | DMPs/dmp.py | 4 | 7299 | '''
Copyright (C) 2013 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
from cs import CanonicalSystem
class DMPs(object):
"""Implementation of Dynamic Motor Primitives,
as described in Dr. Stefan Schaal's (2002) paper."""
def __init__(self, dmps, bfs, dt=.01,
y0=0, goal=1, w=None,
ay=None, by=None, **kwargs):
"""
dmps int: number of dynamic motor primitives
bfs int: number of basis functions per DMP
dt float: timestep for simulation
y0 list: initial state of DMPs
goal list: goal state of DMPs
w list: tunable parameters, control amplitude of basis functions
ay int: gain on attractor term y dynamics
by int: gain on attractor term y dynamics
"""
self.dmps = dmps
self.bfs = bfs
self.dt = dt
if isinstance(y0, (int, float)):
y0 = np.ones(self.dmps)*y0
self.y0 = y0
if isinstance(goal, (int, float)):
goal = np.ones(self.dmps)*goal
self.goal = goal
if w is None:
# default is f = 0
w = np.zeros((self.dmps, self.bfs))
self.w = w
if ay is None: ay = np.ones(dmps)*25 # Schaal 2012
self.ay = ay
if by is None: by = self.ay.copy() / 4 # Schaal 2012
self.by = by
# set up the CS
self.cs = CanonicalSystem(dt=self.dt, **kwargs)
self.timesteps = int(self.cs.run_time / self.dt)
# set up the DMP system
self.reset_state()
def check_offset(self):
"""Check to see if initial position and goal are the same
if they are, offset slightly so that the forcing term is not 0"""
for d in range(self.dmps):
if (self.y0[d] == self.goal[d]):
self.goal[d] += 1e-4
def gen_front_term(self, x, dmp_num): raise NotImplementedError()
def gen_goal(self, y_des): raise NotImplementedError()
def gen_psi(self): raise NotImplementedError()
def gen_weights(self, f_target): raise NotImplementedError()
def imitate_path(self, y_des):
"""Takes in a desired trajectory and generates the set of
system parameters that best realize this path.
y_des list/array: the desired trajectories of each DMP
should be shaped [dmps, run_time]
"""
# set initial state and goal
if y_des.ndim == 1:
y_des = y_des.reshape(1,len(y_des))
self.y0 = y_des[:,0].copy()
self.y_des = y_des.copy()
self.goal = self.gen_goal(y_des)
self.check_offset()
# generate function to interpolate the desired trajectory
import scipy.interpolate
path = np.zeros((self.dmps, self.timesteps))
x = np.linspace(0, self.cs.run_time, y_des.shape[1])
for d in range(self.dmps):
path_gen = scipy.interpolate.interp1d(x, y_des[d])
for t in range(self.timesteps):
path[d, t] = path_gen(t * self.dt)
y_des = path
# calculate velocity of y_des
dy_des = np.diff(y_des) / self.dt
# add zero to the beginning of every row
dy_des = np.hstack((np.zeros((self.dmps, 1)), dy_des))
# calculate acceleration of y_des
ddy_des = np.diff(dy_des) / self.dt
# add zero to the beginning of every row
ddy_des = np.hstack((np.zeros((self.dmps, 1)), ddy_des))
f_target = np.zeros((y_des.shape[1], self.dmps))
# find the force required to move along this trajectory
for d in range(self.dmps):
f_target[:,d] = ddy_des[d] - self.ay[d] * \
(self.by[d] * (self.goal[d] - y_des[d]) - \
dy_des[d])
# efficiently generate weights to realize f_target
self.gen_weights(f_target)
'''# plot the basis function activations
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(211)
plt.plot(psi_track)
plt.title('psi_track')
# plot the desired forcing function vs approx
plt.subplot(212)
plt.plot(f_target[:,0])
plt.plot(np.sum(psi_track * self.w[0], axis=1))
plt.legend(['f_target', 'w*psi'])
plt.tight_layout()
plt.show()'''
self.reset_state()
return y_des
def rollout(self, timesteps=None, **kwargs):
"""Generate a system trial, no feedback is incorporated."""
self.reset_state()
if timesteps is None:
if kwargs.has_key('tau'):
timesteps = int(self.timesteps / kwargs['tau'])
else:
timesteps = self.timesteps
# set up tracking vectors
y_track = np.zeros((timesteps, self.dmps))
dy_track = np.zeros((timesteps, self.dmps))
ddy_track = np.zeros((timesteps, self.dmps))
for t in range(timesteps):
y, dy, ddy = self.step(**kwargs)
# record timestep
y_track[t] = y
dy_track[t] = dy
ddy_track[t] = ddy
return y_track, dy_track, ddy_track
def reset_state(self):
"""Reset the system state"""
self.y = self.y0.copy()
self.dy = np.zeros(self.dmps)
self.ddy = np.zeros(self.dmps)
self.cs.reset_state()
def step(self, tau=1.0, state_fb=None):
"""Run the DMP system for a single timestep.
tau float: scales the timestep
increase tau to make the system execute faster
state_fb np.array: optional system feedback
"""
# run canonical system
cs_args = {'tau':tau,
'error_coupling':1.0}
if state_fb is not None:
# take the 2 norm of the overall error
state_fb = state_fb.reshape(1,self.dmps)
dist = np.sqrt(np.sum((state_fb - self.y)**2))
cs_args['error_coupling'] = 1.0 / (1.0 + 10*dist)
x = self.cs.step(**cs_args)
# generate basis function activation
psi = self.gen_psi(x)
for d in range(self.dmps):
# generate the forcing term
f = self.gen_front_term(x, d) * \
(np.dot(psi, self.w[d])) / np.sum(psi)
# DMP acceleration
self.ddy[d] = (self.ay[d] *
(self.by[d] * (self.goal[d] - self.y[d]) - \
self.dy[d]/tau) + f) * tau**2
self.dy[d] += self.ddy[d] * self.dt * cs_args['error_coupling']
self.y[d] += self.dy[d] * self.dt * cs_args['error_coupling']
return self.y, self.dy, self.ddy
| gpl-3.0 |
plotly/python-api | packages/python/plotly/plotly/graph_objs/_scattercarpet.py | 1 | 78536 | from plotly.basedatatypes import BaseTraceType as _BaseTraceType
import copy as _copy
class Scattercarpet(_BaseTraceType):
# class properties
# --------------------
_parent_path_str = ""
_path_str = "scattercarpet"
_valid_props = {
"a",
"asrc",
"b",
"bsrc",
"carpet",
"connectgaps",
"customdata",
"customdatasrc",
"fill",
"fillcolor",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hoveron",
"hovertemplate",
"hovertemplatesrc",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"legendgroup",
"line",
"marker",
"meta",
"metasrc",
"mode",
"name",
"opacity",
"selected",
"selectedpoints",
"showlegend",
"stream",
"text",
"textfont",
"textposition",
"textpositionsrc",
"textsrc",
"texttemplate",
"texttemplatesrc",
"type",
"uid",
"uirevision",
"unselected",
"visible",
"xaxis",
"yaxis",
}
# a
# -
@property
def a(self):
"""
Sets the a-axis coordinates.
The 'a' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["a"]
@a.setter
def a(self, val):
self["a"] = val
# asrc
# ----
@property
def asrc(self):
"""
Sets the source reference on Chart Studio Cloud for a .
The 'asrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["asrc"]
@asrc.setter
def asrc(self, val):
self["asrc"] = val
# b
# -
@property
def b(self):
"""
Sets the b-axis coordinates.
The 'b' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["b"]
@b.setter
def b(self, val):
self["b"] = val
# bsrc
# ----
@property
def bsrc(self):
"""
Sets the source reference on Chart Studio Cloud for b .
The 'bsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bsrc"]
@bsrc.setter
def bsrc(self, val):
self["bsrc"] = val
# carpet
# ------
@property
def carpet(self):
"""
An identifier for this carpet, so that `scattercarpet` and
`contourcarpet` traces can specify a carpet plot on which they
lie
The 'carpet' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["carpet"]
@carpet.setter
def carpet(self, val):
self["carpet"] = val
# connectgaps
# -----------
@property
def connectgaps(self):
"""
Determines whether or not gaps (i.e. {nan} or missing values)
in the provided data arrays are connected.
The 'connectgaps' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["connectgaps"]
@connectgaps.setter
def connectgaps(self, val):
self["connectgaps"] = val
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for customdata
.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
# fill
# ----
@property
def fill(self):
"""
Sets the area to fill with a solid color. Use with `fillcolor`
if not "none". scatterternary has a subset of the options
available to scatter. "toself" connects the endpoints of the
trace (or each segment of the trace if it has gaps) into a
closed shape. "tonext" fills the space between two traces if
one completely encloses the other (eg consecutive contour
lines), and behaves like "toself" if there is no trace before
it. "tonext" should not be used if one trace does not enclose
the other.
The 'fill' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'toself', 'tonext']
Returns
-------
Any
"""
return self["fill"]
@fill.setter
def fill(self, val):
self["fill"] = val
# fillcolor
# ---------
@property
def fillcolor(self):
"""
Sets the fill color. Defaults to a half-transparent variant of
the line color, marker color, or marker line color, whichever
is available.
The 'fillcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["fillcolor"]
@fillcolor.setter
def fillcolor(self, val):
self["fillcolor"] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['a', 'b', 'text', 'name'] joined with '+' characters
(e.g. 'a+b')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
# hoverinfosrc
# ------------
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for hoverinfo
.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattercarpet.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for namelength .
Returns
-------
plotly.graph_objs.scattercarpet.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# hoveron
# -------
@property
def hoveron(self):
"""
Do the hover effects highlight individual points (markers or
line points) or do they highlight filled regions? If the fill
is "toself" or "tonext" and there are no markers or text, then
the default is "fills", otherwise it is "points".
The 'hoveron' property is a flaglist and may be specified
as a string containing:
- Any combination of ['points', 'fills'] joined with '+' characters
(e.g. 'points+fills')
Returns
-------
Any
"""
return self["hoveron"]
@hoveron.setter
def hoveron(self, val):
self["hoveron"] = val
# hovertemplate
# -------------
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for details on
the formatting syntax. Dates are formatted using d3-time-
format's syntax %{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format for details on
the date formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data described at
this link https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
Anything contained in tag `<extra>` is displayed in the
secondary box, for example "<extra>{fullData.name}</extra>". To
hide the secondary box completely, use an empty tag
`<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
# hovertemplatesrc
# ----------------
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
hovertemplate .
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
# hovertext
# ---------
@property
def hovertext(self):
"""
Sets hover text elements associated with each (a,b) point. If a
single string, the same string appears over all the data
points. If an array of strings, the items are mapped in order
to the the data points in (a,b). To be seen, trace `hoverinfo`
must contain a "text" flag.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
# hovertextsrc
# ------------
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for hovertext
.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for ids .
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
# legendgroup
# -----------
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces part of the same
legend group hide/show at the same time when toggling legend
items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattercarpet.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash
type string ("solid", "dot", "dash",
"longdash", "dashdot", or "longdashdot") or a
dash length list in px (eg "5px,10px,2px,2px").
shape
Determines the line shape. With "spline" the
lines are drawn using spline interpolation. The
other available values correspond to step-wise
line shapes.
smoothing
Has an effect only if `shape` is set to
"spline" Sets the amount of smoothing. 0
corresponds to no smoothing (equivalent to a
"linear" shape).
width
Sets the line width (in px).
Returns
-------
plotly.graph_objs.scattercarpet.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattercarpet.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.colorscale`. Has an
effect only if in `marker.color`is set to a
numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.color`) or the bounds set in
`marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical
array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.cmin` and/or `marker.cmax` to
be equidistant to this point. Has an effect
only if in `marker.color`is set to a numerical
array. Value should have the same units as in
`marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`plotly.graph_objects.scattercarpet.mark
er.ColorBar` instance or dict with compatible
properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may
be a palette name string of the following list:
Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Bl
ues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
gradient
:class:`plotly.graph_objects.scattercarpet.mark
er.Gradient` instance or dict with compatible
properties
line
:class:`plotly.graph_objects.scattercarpet.mark
er.Line` instance or dict with compatible
properties
maxdisplayed
Sets a maximum number of points to be drawn on
the graph. 0 corresponds to no limit.
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud
for opacity .
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.color`is set to a
numerical array. If true, `marker.cmin` will
correspond to the last color in the array and
`marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `marker.color`is set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px)
of the rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the
data in `size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points.
Use with `sizemin` and `sizemode`.
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
symbol
Sets the marker symbol type. Adding 100 is
equivalent to appending "-open" to a symbol
name. Adding 200 is equivalent to appending
"-dot" to a symbol name. Adding 300 is
equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on Chart Studio Cloud
for symbol .
Returns
-------
plotly.graph_objs.scattercarpet.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
# meta
# ----
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
# metasrc
# -------
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for meta .
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
# mode
# ----
@property
def mode(self):
"""
Determines the drawing mode for this scatter trace. If the
provided `mode` includes "text" then the `text` elements appear
at the coordinates. Otherwise, the `text` elements appear on
hover. If there are less than 20 points and the trace is not
stacked then the default is "lines+markers". Otherwise,
"lines".
The 'mode' property is a flaglist and may be specified
as a string containing:
- Any combination of ['lines', 'markers', 'text'] joined with '+' characters
(e.g. 'lines+markers')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["mode"]
@mode.setter
def mode(self, val):
self["mode"] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# selected
# --------
@property
def selected(self):
"""
The 'selected' property is an instance of Selected
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattercarpet.Selected`
- A dict of string/value properties that will be passed
to the Selected constructor
Supported dict properties:
marker
:class:`plotly.graph_objects.scattercarpet.sele
cted.Marker` instance or dict with compatible
properties
textfont
:class:`plotly.graph_objects.scattercarpet.sele
cted.Textfont` instance or dict with compatible
properties
Returns
-------
plotly.graph_objs.scattercarpet.Selected
"""
return self["selected"]
@selected.setter
def selected(self, val):
self["selected"] = val
# selectedpoints
# --------------
@property
def selectedpoints(self):
"""
Array containing integer indices of selected points. Has an
effect only for traces that support selections. Note that an
empty array means an empty selection where the `unselected` are
turned on for all points, whereas, any other non-array values
means no selection all where the `selected` and `unselected`
styles have no effect.
The 'selectedpoints' property accepts values of any type
Returns
-------
Any
"""
return self["selectedpoints"]
@selectedpoints.setter
def selectedpoints(self, val):
self["selectedpoints"] = val
# showlegend
# ----------
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattercarpet.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
Returns
-------
plotly.graph_objs.scattercarpet.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
# text
# ----
@property
def text(self):
"""
Sets text elements associated with each (a,b) point. If a
single string, the same string appears over all the data
points. If an array of strings, the items are mapped in order
to the the data points in (a,b). If trace `hoverinfo` contains
a "text" flag and "hovertext" is not set, these elements will
be seen in the hover labels.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# textfont
# --------
@property
def textfont(self):
"""
Sets the text font.
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattercarpet.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
Returns
-------
plotly.graph_objs.scattercarpet.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
# textposition
# ------------
@property
def textposition(self):
"""
Sets the positions of the `text` elements with respects to the
(x,y) coordinates.
The 'textposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top left', 'top center', 'top right', 'middle left',
'middle center', 'middle right', 'bottom left', 'bottom
center', 'bottom right']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textposition"]
@textposition.setter
def textposition(self, val):
self["textposition"] = val
# textpositionsrc
# ---------------
@property
def textpositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
textposition .
The 'textpositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textpositionsrc"]
@textpositionsrc.setter
def textpositionsrc(self, val):
self["textpositionsrc"] = val
# textsrc
# -------
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for text .
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
# texttemplate
# ------------
@property
def texttemplate(self):
"""
Template string used for rendering the information text that
appear on points. Note that this will override `textinfo`.
Variables are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for details on
the formatting syntax. Dates are formatted using d3-time-
format's syntax %{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format for details on
the date formatting syntax. Every attributes that can be
specified per-point (the ones that are `arrayOk: true`) are
available. variables `a`, `b` and `text`.
The 'texttemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["texttemplate"]
@texttemplate.setter
def texttemplate(self, val):
self["texttemplate"] = val
# texttemplatesrc
# ---------------
@property
def texttemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
texttemplate .
The 'texttemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["texttemplatesrc"]
@texttemplatesrc.setter
def texttemplatesrc(self, val):
self["texttemplatesrc"] = val
# uid
# ---
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
# uirevision
# ----------
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
# unselected
# ----------
@property
def unselected(self):
"""
The 'unselected' property is an instance of Unselected
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattercarpet.Unselected`
- A dict of string/value properties that will be passed
to the Unselected constructor
Supported dict properties:
marker
:class:`plotly.graph_objects.scattercarpet.unse
lected.Marker` instance or dict with compatible
properties
textfont
:class:`plotly.graph_objects.scattercarpet.unse
lected.Textfont` instance or dict with
compatible properties
Returns
-------
plotly.graph_objs.scattercarpet.Unselected
"""
return self["unselected"]
@unselected.setter
def unselected(self, val):
self["unselected"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# xaxis
# -----
@property
def xaxis(self):
"""
Sets a reference between this trace's x coordinates and a 2D
cartesian x axis. If "x" (the default value), the x coordinates
refer to `layout.xaxis`. If "x2", the x coordinates refer to
`layout.xaxis2`, and so on.
The 'xaxis' property is an identifier of a particular
subplot, of type 'x', that may be specified as the string 'x'
optionally followed by an integer >= 1
(e.g. 'x', 'x1', 'x2', 'x3', etc.)
Returns
-------
str
"""
return self["xaxis"]
@xaxis.setter
def xaxis(self, val):
self["xaxis"] = val
# yaxis
# -----
@property
def yaxis(self):
"""
Sets a reference between this trace's y coordinates and a 2D
cartesian y axis. If "y" (the default value), the y coordinates
refer to `layout.yaxis`. If "y2", the y coordinates refer to
`layout.yaxis2`, and so on.
The 'yaxis' property is an identifier of a particular
subplot, of type 'y', that may be specified as the string 'y'
optionally followed by an integer >= 1
(e.g. 'y', 'y1', 'y2', 'y3', etc.)
Returns
-------
str
"""
return self["yaxis"]
@yaxis.setter
def yaxis(self, val):
self["yaxis"] = val
# type
# ----
@property
def type(self):
return self._props["type"]
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
a
Sets the a-axis coordinates.
asrc
Sets the source reference on Chart Studio Cloud for a
.
b
Sets the b-axis coordinates.
bsrc
Sets the source reference on Chart Studio Cloud for b
.
carpet
An identifier for this carpet, so that `scattercarpet`
and `contourcarpet` traces can specify a carpet plot on
which they lie
connectgaps
Determines whether or not gaps (i.e. {nan} or missing
values) in the provided data arrays are connected.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
fill
Sets the area to fill with a solid color. Use with
`fillcolor` if not "none". scatterternary has a subset
of the options available to scatter. "toself" connects
the endpoints of the trace (or each segment of the
trace if it has gaps) into a closed shape. "tonext"
fills the space between two traces if one completely
encloses the other (eg consecutive contour lines), and
behaves like "toself" if there is no trace before it.
"tonext" should not be used if one trace does not
enclose the other.
fillcolor
Sets the fill color. Defaults to a half-transparent
variant of the line color, marker color, or marker line
color, whichever is available.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.scattercarpet.Hoverlabel`
instance or dict with compatible properties
hoveron
Do the hover effects highlight individual points
(markers or line points) or do they highlight filled
regions? If the fill is "toself" or "tonext" and there
are no markers or text, then the default is "fills",
otherwise it is "points".
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
hovertext
Sets hover text elements associated with each (a,b)
point. If a single string, the same string appears over
all the data points. If an array of strings, the items
are mapped in order to the the data points in (a,b). To
be seen, trace `hoverinfo` must contain a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
line
:class:`plotly.graph_objects.scattercarpet.Line`
instance or dict with compatible properties
marker
:class:`plotly.graph_objects.scattercarpet.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
mode
Determines the drawing mode for this scatter trace. If
the provided `mode` includes "text" then the `text`
elements appear at the coordinates. Otherwise, the
`text` elements appear on hover. If there are less than
20 points and the trace is not stacked then the default
is "lines+markers". Otherwise, "lines".
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
selected
:class:`plotly.graph_objects.scattercarpet.Selected`
instance or dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.scattercarpet.Stream`
instance or dict with compatible properties
text
Sets text elements associated with each (a,b) point. If
a single string, the same string appears over all the
data points. If an array of strings, the items are
mapped in order to the the data points in (a,b). If
trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in
the hover labels.
textfont
Sets the text font.
textposition
Sets the positions of the `text` elements with respects
to the (x,y) coordinates.
textpositionsrc
Sets the source reference on Chart Studio Cloud for
textposition .
textsrc
Sets the source reference on Chart Studio Cloud for
text .
texttemplate
Template string used for rendering the information text
that appear on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format for
details on the date formatting syntax. Every attributes
that can be specified per-point (the ones that are
`arrayOk: true`) are available. variables `a`, `b` and
`text`.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
texttemplate .
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
:class:`plotly.graph_objects.scattercarpet.Unselected`
instance or dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
"""
def __init__(
self,
arg=None,
a=None,
asrc=None,
b=None,
bsrc=None,
carpet=None,
connectgaps=None,
customdata=None,
customdatasrc=None,
fill=None,
fillcolor=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hoveron=None,
hovertemplate=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
legendgroup=None,
line=None,
marker=None,
meta=None,
metasrc=None,
mode=None,
name=None,
opacity=None,
selected=None,
selectedpoints=None,
showlegend=None,
stream=None,
text=None,
textfont=None,
textposition=None,
textpositionsrc=None,
textsrc=None,
texttemplate=None,
texttemplatesrc=None,
uid=None,
uirevision=None,
unselected=None,
visible=None,
xaxis=None,
yaxis=None,
**kwargs
):
"""
Construct a new Scattercarpet object
Plots a scatter trace on either the first carpet axis or the
carpet axis with a matching `carpet` attribute.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Scattercarpet`
a
Sets the a-axis coordinates.
asrc
Sets the source reference on Chart Studio Cloud for a
.
b
Sets the b-axis coordinates.
bsrc
Sets the source reference on Chart Studio Cloud for b
.
carpet
An identifier for this carpet, so that `scattercarpet`
and `contourcarpet` traces can specify a carpet plot on
which they lie
connectgaps
Determines whether or not gaps (i.e. {nan} or missing
values) in the provided data arrays are connected.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
fill
Sets the area to fill with a solid color. Use with
`fillcolor` if not "none". scatterternary has a subset
of the options available to scatter. "toself" connects
the endpoints of the trace (or each segment of the
trace if it has gaps) into a closed shape. "tonext"
fills the space between two traces if one completely
encloses the other (eg consecutive contour lines), and
behaves like "toself" if there is no trace before it.
"tonext" should not be used if one trace does not
enclose the other.
fillcolor
Sets the fill color. Defaults to a half-transparent
variant of the line color, marker color, or marker line
color, whichever is available.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.scattercarpet.Hoverlabel`
instance or dict with compatible properties
hoveron
Do the hover effects highlight individual points
(markers or line points) or do they highlight filled
regions? If the fill is "toself" or "tonext" and there
are no markers or text, then the default is "fills",
otherwise it is "points".
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
hovertext
Sets hover text elements associated with each (a,b)
point. If a single string, the same string appears over
all the data points. If an array of strings, the items
are mapped in order to the the data points in (a,b). To
be seen, trace `hoverinfo` must contain a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
line
:class:`plotly.graph_objects.scattercarpet.Line`
instance or dict with compatible properties
marker
:class:`plotly.graph_objects.scattercarpet.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
mode
Determines the drawing mode for this scatter trace. If
the provided `mode` includes "text" then the `text`
elements appear at the coordinates. Otherwise, the
`text` elements appear on hover. If there are less than
20 points and the trace is not stacked then the default
is "lines+markers". Otherwise, "lines".
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
selected
:class:`plotly.graph_objects.scattercarpet.Selected`
instance or dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.scattercarpet.Stream`
instance or dict with compatible properties
text
Sets text elements associated with each (a,b) point. If
a single string, the same string appears over all the
data points. If an array of strings, the items are
mapped in order to the the data points in (a,b). If
trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in
the hover labels.
textfont
Sets the text font.
textposition
Sets the positions of the `text` elements with respects
to the (x,y) coordinates.
textpositionsrc
Sets the source reference on Chart Studio Cloud for
textposition .
textsrc
Sets the source reference on Chart Studio Cloud for
text .
texttemplate
Template string used for rendering the information text
that appear on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format for
details on the date formatting syntax. Every attributes
that can be specified per-point (the ones that are
`arrayOk: true`) are available. variables `a`, `b` and
`text`.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
texttemplate .
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
:class:`plotly.graph_objects.scattercarpet.Unselected`
instance or dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
Returns
-------
Scattercarpet
"""
super(Scattercarpet, self).__init__("scattercarpet")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Scattercarpet
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Scattercarpet`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("a", None)
_v = a if a is not None else _v
if _v is not None:
self["a"] = _v
_v = arg.pop("asrc", None)
_v = asrc if asrc is not None else _v
if _v is not None:
self["asrc"] = _v
_v = arg.pop("b", None)
_v = b if b is not None else _v
if _v is not None:
self["b"] = _v
_v = arg.pop("bsrc", None)
_v = bsrc if bsrc is not None else _v
if _v is not None:
self["bsrc"] = _v
_v = arg.pop("carpet", None)
_v = carpet if carpet is not None else _v
if _v is not None:
self["carpet"] = _v
_v = arg.pop("connectgaps", None)
_v = connectgaps if connectgaps is not None else _v
if _v is not None:
self["connectgaps"] = _v
_v = arg.pop("customdata", None)
_v = customdata if customdata is not None else _v
if _v is not None:
self["customdata"] = _v
_v = arg.pop("customdatasrc", None)
_v = customdatasrc if customdatasrc is not None else _v
if _v is not None:
self["customdatasrc"] = _v
_v = arg.pop("fill", None)
_v = fill if fill is not None else _v
if _v is not None:
self["fill"] = _v
_v = arg.pop("fillcolor", None)
_v = fillcolor if fillcolor is not None else _v
if _v is not None:
self["fillcolor"] = _v
_v = arg.pop("hoverinfo", None)
_v = hoverinfo if hoverinfo is not None else _v
if _v is not None:
self["hoverinfo"] = _v
_v = arg.pop("hoverinfosrc", None)
_v = hoverinfosrc if hoverinfosrc is not None else _v
if _v is not None:
self["hoverinfosrc"] = _v
_v = arg.pop("hoverlabel", None)
_v = hoverlabel if hoverlabel is not None else _v
if _v is not None:
self["hoverlabel"] = _v
_v = arg.pop("hoveron", None)
_v = hoveron if hoveron is not None else _v
if _v is not None:
self["hoveron"] = _v
_v = arg.pop("hovertemplate", None)
_v = hovertemplate if hovertemplate is not None else _v
if _v is not None:
self["hovertemplate"] = _v
_v = arg.pop("hovertemplatesrc", None)
_v = hovertemplatesrc if hovertemplatesrc is not None else _v
if _v is not None:
self["hovertemplatesrc"] = _v
_v = arg.pop("hovertext", None)
_v = hovertext if hovertext is not None else _v
if _v is not None:
self["hovertext"] = _v
_v = arg.pop("hovertextsrc", None)
_v = hovertextsrc if hovertextsrc is not None else _v
if _v is not None:
self["hovertextsrc"] = _v
_v = arg.pop("ids", None)
_v = ids if ids is not None else _v
if _v is not None:
self["ids"] = _v
_v = arg.pop("idssrc", None)
_v = idssrc if idssrc is not None else _v
if _v is not None:
self["idssrc"] = _v
_v = arg.pop("legendgroup", None)
_v = legendgroup if legendgroup is not None else _v
if _v is not None:
self["legendgroup"] = _v
_v = arg.pop("line", None)
_v = line if line is not None else _v
if _v is not None:
self["line"] = _v
_v = arg.pop("marker", None)
_v = marker if marker is not None else _v
if _v is not None:
self["marker"] = _v
_v = arg.pop("meta", None)
_v = meta if meta is not None else _v
if _v is not None:
self["meta"] = _v
_v = arg.pop("metasrc", None)
_v = metasrc if metasrc is not None else _v
if _v is not None:
self["metasrc"] = _v
_v = arg.pop("mode", None)
_v = mode if mode is not None else _v
if _v is not None:
self["mode"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("selected", None)
_v = selected if selected is not None else _v
if _v is not None:
self["selected"] = _v
_v = arg.pop("selectedpoints", None)
_v = selectedpoints if selectedpoints is not None else _v
if _v is not None:
self["selectedpoints"] = _v
_v = arg.pop("showlegend", None)
_v = showlegend if showlegend is not None else _v
if _v is not None:
self["showlegend"] = _v
_v = arg.pop("stream", None)
_v = stream if stream is not None else _v
if _v is not None:
self["stream"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
_v = arg.pop("textfont", None)
_v = textfont if textfont is not None else _v
if _v is not None:
self["textfont"] = _v
_v = arg.pop("textposition", None)
_v = textposition if textposition is not None else _v
if _v is not None:
self["textposition"] = _v
_v = arg.pop("textpositionsrc", None)
_v = textpositionsrc if textpositionsrc is not None else _v
if _v is not None:
self["textpositionsrc"] = _v
_v = arg.pop("textsrc", None)
_v = textsrc if textsrc is not None else _v
if _v is not None:
self["textsrc"] = _v
_v = arg.pop("texttemplate", None)
_v = texttemplate if texttemplate is not None else _v
if _v is not None:
self["texttemplate"] = _v
_v = arg.pop("texttemplatesrc", None)
_v = texttemplatesrc if texttemplatesrc is not None else _v
if _v is not None:
self["texttemplatesrc"] = _v
_v = arg.pop("uid", None)
_v = uid if uid is not None else _v
if _v is not None:
self["uid"] = _v
_v = arg.pop("uirevision", None)
_v = uirevision if uirevision is not None else _v
if _v is not None:
self["uirevision"] = _v
_v = arg.pop("unselected", None)
_v = unselected if unselected is not None else _v
if _v is not None:
self["unselected"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
_v = arg.pop("xaxis", None)
_v = xaxis if xaxis is not None else _v
if _v is not None:
self["xaxis"] = _v
_v = arg.pop("yaxis", None)
_v = yaxis if yaxis is not None else _v
if _v is not None:
self["yaxis"] = _v
# Read-only literals
# ------------------
self._props["type"] = "scattercarpet"
arg.pop("type", None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit |
kaiserroll14/301finalproject | main/pandas/computation/scope.py | 24 | 9002 | """Module for scope operations
"""
import sys
import struct
import inspect
import datetime
import itertools
import pprint
import numpy as np
import pandas as pd
from pandas.compat import DeepChainMap, map, StringIO
from pandas.core.base import StringMixin
import pandas.computation as compu
def _ensure_scope(level, global_dict=None, local_dict=None, resolvers=(),
target=None, **kwargs):
"""Ensure that we are grabbing the correct scope."""
return Scope(level + 1, global_dict=global_dict, local_dict=local_dict,
resolvers=resolvers, target=target)
def _replacer(x):
"""Replace a number with its hexadecimal representation. Used to tag
temporary variables with their calling scope's id.
"""
# get the hex repr of the binary char and remove 0x and pad by pad_size
# zeros
try:
hexin = ord(x)
except TypeError:
# bytes literals masquerade as ints when iterating in py3
hexin = x
return hex(hexin)
def _raw_hex_id(obj):
"""Return the padded hexadecimal id of ``obj``."""
# interpret as a pointer since that's what really what id returns
packed = struct.pack('@P', id(obj))
return ''.join(map(_replacer, packed))
_DEFAULT_GLOBALS = {
'Timestamp': pd.lib.Timestamp,
'datetime': datetime.datetime,
'True': True,
'False': False,
'list': list,
'tuple': tuple,
'inf': np.inf,
'Inf': np.inf,
}
def _get_pretty_string(obj):
"""Return a prettier version of obj
Parameters
----------
obj : object
Object to pretty print
Returns
-------
s : str
Pretty print object repr
"""
sio = StringIO()
pprint.pprint(obj, stream=sio)
return sio.getvalue()
class Scope(StringMixin):
"""Object to hold scope, with a few bells to deal with some custom syntax
and contexts added by pandas.
Parameters
----------
level : int
global_dict : dict or None, optional, default None
local_dict : dict or Scope or None, optional, default None
resolvers : list-like or None, optional, default None
target : object
Attributes
----------
level : int
scope : DeepChainMap
target : object
temps : dict
"""
__slots__ = 'level', 'scope', 'target', 'temps'
def __init__(self, level, global_dict=None, local_dict=None, resolvers=(),
target=None):
self.level = level + 1
# shallow copy because we don't want to keep filling this up with what
# was there before if there are multiple calls to Scope/_ensure_scope
self.scope = DeepChainMap(_DEFAULT_GLOBALS.copy())
self.target = target
if isinstance(local_dict, Scope):
self.scope.update(local_dict.scope)
if local_dict.target is not None:
self.target = local_dict.target
self.update(local_dict.level)
frame = sys._getframe(self.level)
try:
# shallow copy here because we don't want to replace what's in
# scope when we align terms (alignment accesses the underlying
# numpy array of pandas objects)
self.scope = self.scope.new_child((global_dict or
frame.f_globals).copy())
if not isinstance(local_dict, Scope):
self.scope = self.scope.new_child((local_dict or
frame.f_locals).copy())
finally:
del frame
# assumes that resolvers are going from outermost scope to inner
if isinstance(local_dict, Scope):
resolvers += tuple(local_dict.resolvers.maps)
self.resolvers = DeepChainMap(*resolvers)
self.temps = {}
def __unicode__(self):
scope_keys = _get_pretty_string(list(self.scope.keys()))
res_keys = _get_pretty_string(list(self.resolvers.keys()))
return '%s(scope=%s, resolvers=%s)' % (type(self).__name__, scope_keys,
res_keys)
@property
def has_resolvers(self):
"""Return whether we have any extra scope.
For example, DataFrames pass Their columns as resolvers during calls to
``DataFrame.eval()`` and ``DataFrame.query()``.
Returns
-------
hr : bool
"""
return bool(len(self.resolvers))
def resolve(self, key, is_local):
"""Resolve a variable name in a possibly local context
Parameters
----------
key : text_type
A variable name
is_local : bool
Flag indicating whether the variable is local or not (prefixed with
the '@' symbol)
Returns
-------
value : object
The value of a particular variable
"""
try:
# only look for locals in outer scope
if is_local:
return self.scope[key]
# not a local variable so check in resolvers if we have them
if self.has_resolvers:
return self.resolvers[key]
# if we're here that means that we have no locals and we also have
# no resolvers
assert not is_local and not self.has_resolvers
return self.scope[key]
except KeyError:
try:
# last ditch effort we look in temporaries
# these are created when parsing indexing expressions
# e.g., df[df > 0]
return self.temps[key]
except KeyError:
raise compu.ops.UndefinedVariableError(key, is_local)
def swapkey(self, old_key, new_key, new_value=None):
"""Replace a variable name, with a potentially new value.
Parameters
----------
old_key : str
Current variable name to replace
new_key : str
New variable name to replace `old_key` with
new_value : object
Value to be replaced along with the possible renaming
"""
if self.has_resolvers:
maps = self.resolvers.maps + self.scope.maps
else:
maps = self.scope.maps
maps.append(self.temps)
for mapping in maps:
if old_key in mapping:
mapping[new_key] = new_value
return
def _get_vars(self, stack, scopes):
"""Get specifically scoped variables from a list of stack frames.
Parameters
----------
stack : list
A list of stack frames as returned by ``inspect.stack()``
scopes : sequence of strings
A sequence containing valid stack frame attribute names that
evaluate to a dictionary. For example, ('locals', 'globals')
"""
variables = itertools.product(scopes, stack)
for scope, (frame, _, _, _, _, _) in variables:
try:
d = getattr(frame, 'f_' + scope)
self.scope = self.scope.new_child(d)
finally:
# won't remove it, but DECREF it
# in Py3 this probably isn't necessary since frame won't be
# scope after the loop
del frame
def update(self, level):
"""Update the current scope by going back `level` levels.
Parameters
----------
level : int or None, optional, default None
"""
sl = level + 1
# add sl frames to the scope starting with the
# most distant and overwriting with more current
# makes sure that we can capture variable scope
stack = inspect.stack()
try:
self._get_vars(stack[:sl], scopes=['locals'])
finally:
del stack[:], stack
def add_tmp(self, value):
"""Add a temporary variable to the scope.
Parameters
----------
value : object
An arbitrary object to be assigned to a temporary variable.
Returns
-------
name : basestring
The name of the temporary variable created.
"""
name = '{0}_{1}_{2}'.format(type(value).__name__, self.ntemps,
_raw_hex_id(self))
# add to inner most scope
assert name not in self.temps
self.temps[name] = value
assert name in self.temps
# only increment if the variable gets put in the scope
return name
@property
def ntemps(self):
"""The number of temporary variables in this scope"""
return len(self.temps)
@property
def full_scope(self):
"""Return the full scope for use with passing to engines transparently
as a mapping.
Returns
-------
vars : DeepChainMap
All variables in this scope.
"""
maps = [self.temps] + self.resolvers.maps + self.scope.maps
return DeepChainMap(*maps)
| gpl-3.0 |
ast0815/remu | docs/examples/02/fit_models.py | 2 | 3824 | from six import print_
import numpy as np
from remu import binning
from remu import plotting
from remu import likelihood
from multiprocess import Pool
pool = Pool(8)
likelihood.mapper = pool.map
response_matrix = "../01/response_matrix.npz"
with open("../01/reco-binning.yml", 'rt') as f:
reco_binning = binning.yaml.full_load(f)
with open("../01/optimised-truth-binning.yml", 'rt') as f:
truth_binning = binning.yaml.full_load(f)
reco_binning.fill_from_csv_file("../00/real_data.txt")
data = reco_binning.get_entries_as_ndarray()
data_model = likelihood.PoissonData(data)
matrix_predictor = likelihood.ResponseMatrixPredictor(response_matrix)
calc = likelihood.LikelihoodCalculator(data_model, matrix_predictor)
test = likelihood.HypothesisTester(calc)
truth_binning.fill_from_csv_file("../00/modelA_truth.txt")
modelA = truth_binning.get_values_as_ndarray()
modelA /= np.sum(modelA)
truth_binning.reset()
truth_binning.fill_from_csv_file("../00/modelB_truth.txt")
modelB = truth_binning.get_values_as_ndarray()
modelB /= np.sum(modelB)
with open("simple_hypotheses.txt", 'w') as f:
print_(calc(modelA*1000), file=f)
print_(test.likelihood_p_value(modelA*1000), file=f)
print_(calc(modelB*1000), file=f)
print_(test.likelihood_p_value(modelB*1000), file=f)
modelA_shape = likelihood.TemplatePredictor([modelA])
modelA_reco_shape = matrix_predictor.compose(modelA_shape)
calcA = likelihood.LikelihoodCalculator(data_model, modelA_reco_shape)
maxi = likelihood.BasinHoppingMaximizer()
retA = maxi(calcA)
with open("modelA_fit.txt", 'w') as f:
print_(retA, file=f)
modelB_shape = likelihood.TemplatePredictor([modelB])
calcB = calc.compose(modelB_shape)
retB = maxi(calcB)
with open("modelB_fit.txt", 'w') as f:
print_(retB, file=f)
testA = likelihood.HypothesisTester(calcA, maximizer=maxi)
testB = likelihood.HypothesisTester(calcB, maximizer=maxi)
with open("fit_p-values.txt", 'w') as f:
print_(testA.max_likelihood_p_value(), file=f)
print_(testB.max_likelihood_p_value(), file=f)
pltr = plotting.get_plotter(reco_binning)
pltr.plot_entries(label='data', hatch=None)
modelA_reco, modelA_weights = modelA_reco_shape(retA.x)
modelA_logL = calcA(retA.x)
modelA_p = testA.likelihood_p_value(retA.x)
modelB_reco, modelB_weights = calcB.predictor(retB.x)
modelB_logL = calcB(retB.x)
modelB_p = testB.likelihood_p_value(retB.x)
pltr.plot_array(modelA_reco, label='model A: $\log L=%.1f$, $p=%.3f$'%(modelA_logL, modelA_p), hatch=None, linestyle='dashed')
pltr.plot_array(modelB_reco, label='model B: $\log L=%.1f$, $p=%.3f$'%(modelB_logL, modelB_p), hatch=None, linestyle='dotted')
pltr.legend(loc='lower center')
pltr.savefig("reco-comparison.png")
mix_model = likelihood.TemplatePredictor([modelA, modelB])
calc_mix = calc.compose(mix_model)
ret = maxi.maximize_log_likelihood(calc_mix)
with open("mix_model_fit.txt", 'w') as f:
print_(ret, file=f)
test = likelihood.HypothesisTester(calc_mix)
with open("mix_model_p_value.txt", 'w') as f:
print_(test.max_likelihood_p_value(), file=f)
p_values = []
A_values = np.linspace(0, 1000, 11)
for A in A_values:
p = test.max_likelihood_ratio_p_value((A,None))
print_(A, p)
p_values.append(p)
wilks_p_values = []
fine_A_values = np.linspace(0, 1000, 100)
for A in fine_A_values:
p = test.wilks_max_likelihood_ratio_p_value((A,None))
print_(A, p)
wilks_p_values.append(p)
from matplotlib import pyplot as plt
fig, ax = plt.subplots()
ax.set_xlabel("Model A weight")
ax.set_ylabel("p-value")
ax.plot(A_values, p_values, label="Profile plug-in")
ax.plot(fine_A_values, wilks_p_values, label="Wilks")
ax.axvline(ret.x[0], color='k', linestyle='solid')
ax.axhline(0.32, color='k', linestyle='dashed')
ax.axhline(0.05, color='k', linestyle='dashed')
ax.legend(loc='best')
fig.savefig("p-values.png")
| mit |
IndraVikas/scikit-learn | examples/linear_model/plot_polynomial_interpolation.py | 251 | 1895 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
plt.plot(x_plot, f(x_plot), label="ground truth")
plt.scatter(x, y, label="training points")
for degree in [3, 4, 5]:
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
| bsd-3-clause |
vortex-ape/scikit-learn | sklearn/metrics/tests/test_regression.py | 21 | 8513 | from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises, assert_raises_regex
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_squared_log_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_squared_log_error(y_true, y_pred),
mean_squared_error(np.log(1 + y_true),
np.log(1 + y_pred)))
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = mean_squared_log_error(y_true, y_pred)
assert_almost_equal(error, 0.200, decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_squared_log_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
assert_raises_regex(ValueError, "Mean Squared Logarithmic Error cannot be "
"used when targets contain negative values.",
mean_squared_log_error, [-1.], [-1.])
assert_raises_regex(ValueError, "Mean Squared Logarithmic Error cannot be "
"used when targets contain negative values.",
mean_squared_log_error, [1., 2., 3.], [1., -2., 3.])
assert_raises_regex(ValueError, "Mean Squared Logarithmic Error cannot be "
"used when targets contain negative values.",
mean_squared_log_error, [1., -2., 3.], [1., 2., 3.])
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test__check_reg_targets_exception():
invalid_multioutput = 'this_value_is_not_valid'
expected_message = ("Allowed 'multioutput' string values are.+"
"You provided multioutput={!r}".format(
invalid_multioutput))
assert_raises_regex(ValueError, expected_message,
_check_reg_targets,
[1, 2, 3],
[[1], [2], [3]],
invalid_multioutput)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]]*4
y_pred = [[1, 1]]*4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
# Handling msle separately as it does not accept negative inputs.
y_true = np.array([[0.5, 1], [1, 2], [7, 6]])
y_pred = np.array([[0.5, 2], [1, 2.5], [8, 8]])
msle = mean_squared_log_error(y_true, y_pred, multioutput='raw_values')
msle2 = mean_squared_error(np.log(1 + y_true), np.log(1 + y_pred),
multioutput='raw_values')
assert_array_almost_equal(msle, msle2, decimal=2)
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
# Handling msle separately as it does not accept negative inputs.
y_true = np.array([[0.5, 1], [1, 2], [7, 6]])
y_pred = np.array([[0.5, 2], [1, 2.5], [8, 8]])
msle = mean_squared_log_error(y_true, y_pred, multioutput=[0.3, 0.7])
msle2 = mean_squared_error(np.log(1 + y_true), np.log(1 + y_pred),
multioutput=[0.3, 0.7])
assert_almost_equal(msle, msle2, decimal=2)
| bsd-3-clause |
raghavrv/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 78 | 2702 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
def true_fun(X):
return np.cos(1.5 * np.pi * X)
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_val_score(pipeline, X[:, np.newaxis], y,
scoring="neg_mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, edgecolor='b', s=20, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
PythonProgramming/Support-Vector-Machines---Basics-and-Fundamental-Investing-Project | p22.py | 2 | 5141 | import pandas as pd
import os
import time
from datetime import datetime
import re
from time import mktime
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import style
style.use("dark_background")
def Forward(
gather=[
"Total Debt/Equity",
'Trailing P/E',
'Price/Sales',
'Price/Book',
'Profit Margin',
'Operating Margin',
'Return on Assets',
'Return on Equity',
'Revenue Per Share',
'Market Cap',
'Enterprise Value',
'Forward P/E',
'PEG Ratio',
'Enterprise Value/Revenue',
'Enterprise Value/EBITDA',
'Revenue',
'Gross Profit',
'EBITDA',
'Net Income Avl to Common ',
'Diluted EPS',
'Earnings Growth',
'Revenue Growth',
'Total Cash',
'Total Cash Per Share',
'Total Debt',
'Current Ratio',
'Book Value Per Share',
'Cash Flow',
'Beta',
'Held by Insiders',
'Held by Institutions',
'Shares Short (as of',
'Short Ratio',
'Short % of Float',
'Shares Short (prior '
]
):
df = pd.DataFrame(
columns = [
'Date',
'Unix',
'Ticker',
'Price',
'stock_p_change',
'SP500',
'sp500_p_change',
'Difference',
##############
'DE Ratio',
'Trailing P/E',
'Price/Sales',
'Price/Book',
'Profit Margin',
'Operating Margin',
'Return on Assets',
'Return on Equity',
'Revenue Per Share',
'Market Cap',
'Enterprise Value',
'Forward P/E',
'PEG Ratio',
'Enterprise Value/Revenue',
'Enterprise Value/EBITDA',
'Revenue',
'Gross Profit',
'EBITDA',
'Net Income Avl to Common ',
'Diluted EPS',
'Earnings Growth',
'Revenue Growth',
'Total Cash',
'Total Cash Per Share',
'Total Debt',
'Current Ratio',
'Book Value Per Share',
'Cash Flow',
'Beta',
'Held by Insiders',
'Held by Institutions',
'Shares Short (as of',
'Short Ratio',
'Short % of Float',
'Shares Short (prior ',
##############
'Status'
]
)
file_list = os.listdir("forward")
for each_file in file_list[1:]:
ticker = each_file.split(".html")[0]
full_file_path = "forward/"+each_file
source = open(full_file_path,'r').read()
try:
value_list = []
for each_data in gather:
try:
regex = re.escape(each_data) + r'.*?(\d{1,8}\.\d{1,8}M?B?|N/A)%?</td>'
value = re.search(regex, source)
value = (value.group(1))
if "B" in value:
value = float(value.replace("B",''))*1000000000
elif "M" in value:
value = float(value.replace("M",''))*1000000
value_list.append(value)
except Exception as e:
value = "N/A"
value_list.append(value)
# if value_list.count("N/A") > 0:
if value_list.count("N/A") > 15:
pass
else:
df = df.append(
{
'Date':"N/A",
'Unix':"N/A",
'Ticker':ticker,
'Price':"N/A",
'stock_p_change':"N/A",
'SP500':"N/A",
'sp500_p_change':"N/A",
'Difference':"N/A",
'DE Ratio':value_list[0],
#'Market Cap':value_list[1],
'Trailing P/E':value_list[1],
'Price/Sales':value_list[2],
'Price/Book':value_list[3],
'Profit Margin':value_list[4],
'Operating Margin':value_list[5],
'Return on Assets':value_list[6],
'Return on Equity':value_list[7],
'Revenue Per Share':value_list[8],
'Market Cap':value_list[9],
'Enterprise Value':value_list[10],
'Forward P/E':value_list[11],
'PEG Ratio':value_list[12],
'Enterprise Value/Revenue':value_list[13],
'Enterprise Value/EBITDA':value_list[14],
'Revenue':value_list[15],
'Gross Profit':value_list[16],
'EBITDA':value_list[17],
'Net Income Avl to Common ':value_list[18],
'Diluted EPS':value_list[19],
'Earnings Growth':value_list[20],
'Revenue Growth':value_list[21],
'Total Cash':value_list[22],
'Total Cash Per Share':value_list[23],
'Total Debt':value_list[24],
'Current Ratio':value_list[25],
'Book Value Per Share':value_list[26],
'Cash Flow':value_list[27],
'Beta':value_list[28],
'Held by Insiders':value_list[29],
'Held by Institutions':value_list[30],
'Shares Short (as of':value_list[31],
'Short Ratio':value_list[32],
'Short % of Float':value_list[33],
'Shares Short (prior ':value_list[34],
'Status':"N/A"
},
ignore_index=True)
except Exception as e:
pass
# df.to_csv("forward_sample_NO_NA.csv")
df.to_csv("forward_sample_WITH_NA.csv")
Forward()
| mit |
sarahgrogan/scikit-learn | examples/covariance/plot_robust_vs_empirical_covariance.py | 248 | 6359 | r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. Journal of Computational and
Graphical Statistics. December 1, 2005, 14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
| bsd-3-clause |
NunoEdgarGub1/scikit-learn | sklearn/neighbors/approximate.py | 128 | 22351 | """Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
# Joel Nothman <joel.nothman@gmail.com>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimenstion as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest()
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=None)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[i], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[i], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| bsd-3-clause |
SCgeeker/OpenSesame | setup-win32.py | 2 | 15674 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
This file is part of OpenSesame.
OpenSesame is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenSesame is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenSesame. If not, see <http://www.gnu.org/licenses/>.
----
## About
This script will create a binary Windows package of OpenSesame, using py2exe.
If possible, dependencies are simply copied into a subfolder and compiled to
`.pyc` format. If not possible, they are included in the `library.zip` file,
which is the default py2exe way of including dependencies.
## Usage
To compile all source files to `.pyc` (default), call the script as follows:
python setup-win32.py py2exe
To compile all source files to `.pyo`, call the script as follows:
python -O setup-win32.py py2exe
or
python -OO setup-win32.py py2exe
More options can be tweaked by changing the variables below.
## Output
The build will be stored in the `dist` subfolder.
## Python modules
The following Python modules should be installed:
bidi
- This is installed as an egg and therefore not packaged properly. For
packaging, simply place the `bidi` source folder directly in the
Python site-packages.
cairo
- Unofficial Windows builds can be downloaded from
<http://www.lfd.uci.edu/~gohlke/pythonlibs/#pycairo>
expyriment
matplotlib
Subdependencies:
- python-dateutil
- Installed as .egg, so requires manual copying. If it fails to
install due to decoding errors, the package can be copied
manually to site-packages.
- pytz
- Installed as .egg, so requires manual copying.
- six
- Installed as .egg, so requires manual copying.
opencv2
- Download and extract the regular OpenCV2 package
- Copy cv2.pyd from build/python/2.7 to the Python site-packages
folder
pil
psychopy
pyflakes
- This is installed as an egg and therefore not packaged properly. For
packaging, simply place the `pyflakes` source folder directly in the
Python site-packages.
py2exe
pyaudio
pygame
pyglet
- The installer doesn't work. Need to install from ``.zip`.
pyopengl
pyparallel
- Needs to be installed through the `.zip` package
- Manually place `simpleio.dll`, which is included with the pyparallel
source, in the main Python folder.
pyparsing
- Required by matplotlib. The installer doesn't work, so needs to be
installed from .zip.
pyqt4
- The Python 3 modules will break on compilation, and have to be
manually removed/ renamed. These are in `uic/port_v3`.
pyserial
numpy
scipy
vlc
- Required only for media_player_vlc
- Place vlc.py in the media_player_vlc folder. See below for folder
structure.
- Choose the version for VLC 2.0
- Available from <http://liris.cnrs.fr/advene/download/python-ctypes/>
## Folder structure
If media_player, media_player_vlc, boks, or pygaze are included, they are
assumed to be one folder up. So the folder structure should be as follows:
/[parent foler]
/opensesame
/pygaze
/boks
/media_player
/media_player_vlc
---
"""
from distutils.core import setup
import distutils.sysconfig as sysconfig
import compileall
import py_compile
import glob
import py2exe
import os
import sys
import shutil
import libqtopensesame.qtopensesame
import libopensesame.misc
import psychopy
import urllib
from setup_shared import included_plugins, included_extensions
# Set this to False to build a 'light' version without the Qt4 gui. This
# options currently breaks opensesamerun as well, so don't set it to False.
include_gui = True
# Miscellaneous settings
include_plugins = True
include_extensions = True
include_media_player = False
include_media_player_vlc = True
include_boks = True
include_pygaze = True
include_examples = True
include_sounds = True
include_faenza = True
include_inpout32 = True
include_simpleio = True
include_pyqt4_plugins = True
release_zip = True
release_build = 1
python_folder = os.path.dirname(sys.executable)
python_version = "%d.%d" % (sys.version_info[0], sys.version_info[1])
print(u'Python folder: %s' % python_folder)
print(u'Python version: %s' % python_version)
# Determine which files we're going to keep
if '-OO' in sys.argv:
strip_ext = '.py', '.pyc'
keep_ext = '.pyo'
optimize = 2
print(u'Compiling to .pyo')
elif '-O' in sys.argv:
strip_ext = '.py', '.pyc'
keep_ext = '.pyo'
optimize = 1
print(u'Compiling to .pyo')
else:
strip_ext = '.py', '.pyo'
keep_ext = '.pyc'
optimize = 0
print(u'Compiling to .pyc')
print(u'Optimize = %d' % optimize)
# Packages that are too be copied for the site-packages folder, rather than
# included by py2exe in the library .zip file. Copying packages is in general
# preferred, as some packages (e.g. expyriment) do not work well when included
# in the library.zip file that py2exe uses to store packages. Note eggs are not
# copied properly.
copy_packages = [
'QProgEdit',
'libopensesame',
'openexp',
'expyriment',
'psychopy',
'pyflakes',
'scipy',
'numpy',
'serial',
'parallel',
'OpenGL',
'PIL',
'pygame',
'pyglet',
'libqtopensesame',
'markdown',
'matplotlib',
'bidi',
'yaml',
'pygaze',
'pytz',
'pyparsing',
'dateutil',
'six',
'wx'
]
# A list of packages that shouldn't be stripped from .py files, because it
# breaks them.
no_strip = [
'expyriment'
]
# Packages that are part of the standard Python packages, but should not be
# included
exclude_packages = [
'idlelib',
'antigravity', # Kind of funny, importing this opens: http://xkcd.com/353/
'test', # Avoid automated tests, because they take ages
]
# Packages that are not part of the standard Python packages (or not detected
# as such), but should nevertheless be included
include_packages = [
'pyaudio',
'cairo',
'Image',
'cv2',
'sip',
'PyQt4.QtCore',
'PyQt4.QtGui',
'PyQt4.Qsci',
'PyQt4.QtWebKit',
'PyQt4.QtNetwork',
'PyQt4.uic',
]
exclude_dll = [
"MSVCP90.DLL",
"libzmq.dll",
]
# Create empty destination folders
if os.path.exists("dist"):
shutil.rmtree("dist")
os.mkdir("dist")
os.mkdir(os.path.join("dist", "plugins"))
os.mkdir(os.path.join("dist", "extensions"))
# Print copy PyQt4 plugins
if include_pyqt4_plugins:
shutil.copytree('%s\Lib\site-packages\PyQt4\plugins' % python_folder,
'dist\PyQt4_plugins')
# A filter to ignore non-relevant package files
def ignore_package_files(folder, files):
l = []
for f in files:
if os.path.splitext(f)[1] in [".pyo", ".pyc"]:
l.append(f)
return l
# A function to strip non-compiled scripts and backup files
def strip_py(folder):
for path in os.listdir(folder):
path = os.path.join(folder, path)
if os.path.isdir(path):
strip_py(path)
continue
base, ext = os.path.splitext(path)
if (ext in strip_ext and os.path.exists(base+keep_ext)) or \
path[-1] == '~':
print('stripping %s' % path)
os.remove(path)
# Copy packages
for pkg in copy_packages:
print('copying packages %s ... ' % pkg)
exec('import %s as _pkg' % pkg)
pkg_folder = os.path.dirname(_pkg.__file__)
pkg_target = os.path.join("dist", pkg)
# For modules that are .py files in site-packages
if pkg_folder.endswith('site-packages'):
print('\tmodule %s' % _pkg.__file__)
shutil.copy(os.path.join(pkg_folder, '%s.py' % pkg), 'dist')
compileall.compile_file(r'dist/%s.py' % pkg)
os.remove(r'dist/%s.py' % pkg)
# For packages that are subfolder of site-packages
else:
print('\tfrom %s' % pkg_folder)
shutil.copytree(pkg_folder, pkg_target, symlinks=True, \
ignore=ignore_package_files)
compileall.compile_dir(pkg_target, force=True)
# Expyriment assumes that certain source files are available, see
# http://code.google.com/p/expyriment/issues/detail?id=16
if pkg != no_strip:
strip_py(pkg_target)
# Create a list of standard pakcages that should be included
# http://stackoverflow.com/questions/6463918/how-can-i-get-a-list-of-all-the-python-standard-library-modules
print('detecting standard Python packages and modules ... ')
std_pkg = []
std_lib = sysconfig.get_python_lib(standard_lib=True)
for top, dirs, files in os.walk(std_lib):
for nm in files:
prefix = top[len(std_lib)+1:]
if prefix[:13] == 'site-packages':
continue
if nm == '__init__.py':
pkg = top[len(std_lib)+1:].replace(os.path.sep,'.')
elif nm[-3:] == '.py':
pkg = os.path.join(prefix, nm)[:-3].replace(os.path.sep,'.')
elif nm[-3:] == '.so' and top[-11:] == 'lib-dynload':
pkg = nm[0:-3]
if pkg[0] == '_':
continue
exclude = False
for _pkg in exclude_packages:
if pkg.find(_pkg) == 0:
exclude = True
break
if exclude:
continue
try:
exec('import %s' % pkg)
except:
continue
print(pkg)
std_pkg.append(pkg)
for pkg in sys.builtin_module_names:
print(pkg)
std_pkg.append(pkg)
windows_opensesame = {
"script" : "opensesame",
"icon_resources": [(0, os.path.join("resources", "opensesame.ico"))],
}
windows_opensesamerun = {
"script" : "opensesamerun",
"icon_resources": [(0, os.path.join("resources", "opensesamerun.ico"))],
}
windows = [windows_opensesamerun]
if include_gui:
windows += [windows_opensesame]
# Setup options
setup(
# Use 'console' to have the programs run in a terminal and
# 'windows' to run them normally.
windows = windows,
options = {
"py2exe" : {
"compressed" : True,
"optimize": optimize,
"bundle_files": 3,
"excludes": copy_packages,
"includes" : std_pkg + include_packages,
"dll_excludes" : exclude_dll,
}
}
)
# A filter to ignore non-relevant resource files
def ignore_resources(folder, files):
l = []
print("... %s" % folder)
for f in files:
if os.path.splitext(f)[1] in [".csv", ".pyc"] and f not in \
["icon_map.csv"]:
l.append(f)
if 'Faenza' in folder and f == 'scalable':
l.append(f)
if f == "Faenza" and (not include_faenza or not include_gui):
l.append(f)
if not include_gui and f in ('theme', 'locale', 'templates', 'ts', \
'ui'):
l.append(f)
return l
# Compiling opensesame to opensesame.pyc for the multiprocessing functionality
print("compiling opensesame")
py_compile.compile("opensesame", os.path.join("dist", "opensesame.pyc"))
# Copy resource files
print("copying resources")
shutil.copytree("resources", os.path.join("dist", "resources"), symlinks=True, \
ignore=ignore_resources)
print("copying README info etc.")
shutil.copyfile("readme.md", os.path.join("dist", "readme.md"))
shutil.copyfile("debian/copyright", os.path.join("dist", "copyright"))
shutil.copyfile("COPYING", os.path.join("dist", "COPYING"))
if include_gui:
shutil.copytree("help", os.path.join("dist", "help"))
print("copying PyGame/ SDLL dll's")
shutil.copyfile(r"%s\Lib\site-packages\pygame\SDL_ttf.dll" \
% python_folder, r"dist\SDL_ttf.dll")
shutil.copyfile(r"%s\Lib\site-packages\pygame\libfreetype-6.dll" \
% python_folder, r"dist\libfreetype-6.dll")
shutil.copyfile(r"%s\Lib\site-packages\pygame\libogg-0.dll" \
% python_folder, r"dist\libogg-0.dll")
print("copying PIL.pth")
shutil.copyfile(r"%s\Lib\site-packages\PIL.pth" \
% python_folder, r"dist\PIL.pth")
if include_simpleio:
print("copying simpleio.dll")
shutil.copyfile(r"%s\simpleio.dll" \
% python_folder, r"dist\simpleio.dll")
if include_inpout32:
print("copying inpout32.dll")
urllib.urlretrieve ("http://files.cogsci.nl/misc/inpout32.dll", \
r"dist\inpout32.dll")
# Include plug-ins
if include_plugins:
print("copying plugins" )
for plugin in included_plugins:
print("copying plugin %s" % plugin)
shutil.copytree(os.path.join("plugins", plugin), os.path.join("dist", \
"plugins", plugin))
for path in os.listdir(os.path.join("plugins", plugin)):
if path[-1] == "~" or os.path.splitext(path)[1] in [".pyc"]:
print("removing file %s" % path)
os.remove(os.path.join("dist", "plugins", plugin, path))
# Include extensions
if include_extensions:
print("copying extensions" )
for extension in included_extensions:
print("copying extension %s" % extension)
shutil.copytree(os.path.join("extensions", extension),
os.path.join("dist", "extensions", extension))
for path in os.listdir(os.path.join("extensions", extension)):
if path[-1] == "~" or os.path.splitext(path)[1] in [".pyc"]:
print("removing file %s" % path)
os.remove(os.path.join("dist", "extensions", extension, path))
# Include old media_player
if include_media_player:
print("copying media_player")
os.mkdir("dist\plugins\media_player")
shutil.copyfile(r"..\media_player\media_player.py", \
r"dist\plugins\media_player\media_player.py""")
shutil.copyfile(r"..\media_player\media_player.html", \
r"dist\plugins\media_player\media_player.html")
shutil.copyfile(r"..\media_player\media_player.png", \
r"dist\plugins\media_player\media_player.png")
shutil.copyfile(r"..\media_player\media_player_large.png", \
r"dist\plugins\media_player\media_player_large.png")
shutil.copyfile(r"..\media_player\info.txt", \
r"dist\plugins\media_player\info.txt")
# Include new vlc-based media player
if include_media_player_vlc:
print("copying media_player_vlc")
os.mkdir("dist\plugins\media_player_vlc")
shutil.copyfile(r"..\media_player_vlc\vlc.py", \
r"dist\plugins\media_player_vlc\vlc.py")
shutil.copyfile( \
r"..\media_player_vlc\media_player_vlc.py", \
r"dist\plugins\media_player_vlc\media_player_vlc.py")
shutil.copyfile( \
r"..\media_player_vlc\media_player_vlc.md", \
r"dist\plugins\media_player_vlc\media_player_vlc.md")
shutil.copyfile( \
r"..\media_player_vlc\media_player_vlc.png", \
r"dist\plugins\media_player_vlc\media_player_vlc.png")
shutil.copyfile( \
r"..\media_player_vlc\media_player_vlc_large.png", \
r"dist\plugins\media_player_vlc\media_player_vlc_large.png")
shutil.copyfile(r"..\media_player_vlc\info.json", \
r"dist\plugins\media_player_vlc\info.json")
# Include Boks plug-in
if include_boks:
print("copying boks")
shutil.copytree(r"..\boks\opensesame\boks",
r"dist\plugins\boks",
ignore=shutil.ignore_patterns('*.pyc', '.*', '.pyo'))
# Include PyGaze plug-ins
if include_pygaze:
print("copying pygaze")
for plugin in ['init', 'log', 'drift_correct', 'start_recording',
'stop_recording', 'wait']:
shutil.copytree(r"..\pygaze\opensesame_plugins\pygaze_%s" % plugin,
r"dist\plugins\pygaze_%s" % plugin,
ignore=shutil.ignore_patterns('*.pyc', '.*', '.pyo'))
# Include examples
if include_examples:
print("copying examples")
shutil.copytree("examples", os.path.join("dist", "examples"))
for path in os.listdir(os.path.join("dist", "examples")):
if path[-1] == "~" or os.path.splitext(path)[1] not in [".opensesame", \
".gz"]:
print("removing file %s" % path)
os.remove(os.path.join("dist", "examples", path))
# Include sounds
if include_sounds:
print("copying sounds")
shutil.copytree("sounds", os.path.join("dist", "sounds"))
# Create a zip release of the folder
if release_zip:
import zipfile
target_folder = 'opensesame_%s-win32-%s' % (libopensesame.misc.version, \
release_build)
target_zip = target_folder + '.zip'
shutil.move('dist', target_folder)
zipf = zipfile.ZipFile(target_zip, 'w', zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(target_folder):
for file in files:
path = os.path.join(root, file)
print('Adding %s' % path)
zipf.write(path)
zipf.close()
shutil.move(target_folder, 'dist')
print('zipfile: %s' % target_zip)
| gpl-3.0 |
ominux/scikit-learn | benchmarks/bench_sgd_covertype.py | 2 | 6189 | """
================================
Covertype dataset with dense SGD
================================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes on
the forest covertype dataset of Blackard, Jock, and Dean [1]. The dataset
comprises 581,012 samples. It is low-dimensional with 54 features and a
sparsity of approx. 23%. Here, we consider the task of predicting class 1
(spruce/fir). The classification performance of SGD is competitive with
Liblinear while being two orders of magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
Liblinear 9.4471s 0.0184s 0.2305
GaussianNB 2.5426s 0.1725s 0.3633
SGD 0.2137s 0.0047s 0.2300
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
To run this example use your favorite python shell::
% ipython examples/sgd/covertype_dense_sgd.py
"""
from __future__ import division
print __doc__
# Author: Peter Prettenhoer <peter.prettenhofer@gmail.com>
# License: BSD Style.
# $Id$
from time import time
import os
import numpy as np
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn import metrics
######################################################################
## Download the data, if not already on disk
if not os.path.exists('covtype.data.gz'):
# Download the data
import urllib
print "Downloading data, Please Wait (11MB)..."
opener = urllib.urlopen(
'http://archive.ics.uci.edu/ml/'
'machine-learning-databases/covtype/covtype.data.gz')
open('covtype.data.gz', 'wb').write(opener.read())
######################################################################
## Load dataset
print("Loading dataset...")
import gzip
f = gzip.open('covtype.data.gz')
X = np.fromstring(f.read().replace(",", " "), dtype=np.float64, sep=" ",
count=-1)
X = X.reshape((581012, 55))
f.close()
# class 1 vs. all others.
y = np.ones(X.shape[0]) * -1
y[np.where(X[:, -1] == 1)] = 1
X = X[:, :-1]
######################################################################
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
train_idx = idx[:522911]
test_idx = idx[522911:]
X_train = X[train_idx]
y_train = y[train_idx]
X_test = X[test_idx]
y_test = y[test_idx]
# free memory
del X
del y
######################################################################
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train-mean) / std
X_test = (X_test-mean) / std
######################################################################
## Print dataset statistics
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25),
X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25),
np.unique(y_train).shape[0]))
print("%s %d (%d, %d)" % ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train==1),
np.sum(y_train==-1)))
print("%s %d (%d, %d)" % ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test==1),
np.sum(y_test==-1)))
print("")
print("Training classifiers...")
print("")
######################################################################
## Benchmark classifiers
def benchmark(clf):
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
err = metrics.zero_one(y_test, pred) / float(pred.shape[0])
return err, train_time, test_time
######################################################################
## Train Liblinear model
liblinear_parameters = {
'loss': 'l2',
'penalty': 'l2',
'C': 1000,
'dual': False,
'tol': 1e-3,
}
liblinear_res = benchmark(LinearSVC(**liblinear_parameters))
liblinear_err, liblinear_train_time, liblinear_test_time = liblinear_res
######################################################################
## Train GaussianNB model
gnb_err, gnb_train_time, gnb_test_time = benchmark(GaussianNB())
######################################################################
## Train SGD model
sgd_parameters = {
'alpha': 0.001,
'n_iter': 2,
}
sgd_err, sgd_train_time, sgd_test_time = benchmark(SGDClassifier(
**sgd_parameters))
######################################################################
## Print classification performance
print("")
print("Classification performance:")
print("===========================")
print("")
def print_row(clf_type, train_time, test_time, err):
print("%s %s %s %s" % (clf_type.ljust(12),
("%.4fs" % train_time).center(10),
("%.4fs" % test_time).center(10),
("%.4f" % err).center(10)))
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"error-rate"))
print("-" * 44)
print_row("Liblinear", liblinear_train_time, liblinear_test_time,
liblinear_err)
print_row("GaussianNB", gnb_train_time, gnb_test_time, gnb_err)
print_row("SGD", sgd_train_time, sgd_test_time, sgd_err)
print("")
print("")
| bsd-3-clause |
tomlof/scikit-learn | sklearn/tests/test_dummy.py | 186 | 17778 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)))
else:
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)) > 0.5)
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1 / 3, decimal=1)
assert_almost_equal(p[2], 1 / 3, decimal=1)
assert_almost_equal(p[4], 1 / 3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_and_prior_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), y_expected)
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
| bsd-3-clause |
seanrivera/ASEN5090-Project | orbit.py | 1 | 10606 | import math
import matplotlib.pyplot as plt
from numpy import cross, dot, divide, arccos, arcsin, array, matmul, isnan, concatenate, transpose
from numpy.linalg import norm
from scipy.integrate import ode
from constants import *
from coordinate_system import eci2ecef, ecef2lla
from models import two_body_model
class Orbit:
DIFF = 1e-8 # Diff for eccentric
MAX_ITER = 100
# noinspection PyPep8Naming
def __init__(self, a=float('nan'), e=float('nan'), i=float('nan'), Omega=float('nan'), omega=float('nan'),
mo=float('nan'), tp=float('nan'), mu=mu_earth, name=''):
self.mu = mu
self.a = a # Semi-major Axis
if not math.isnan(a) and not math.isnan(mu):
self.period = 2 * math.pi * math.sqrt((a ** 3) / mu)
self.n = 2 * math.pi / self.period
else:
self.period = float('nan')
self.n = float('nan')
self.periapsis = float('nan')
self.apoapsis = float('nan')
self.E = float('nan') # Specific Energy
self.phi = float('nan') # flight path angle
self.h_hat = [float('nan'), float('nan'), float('nan')] # Angular momentum
self.tp = tp # time since periapsis
self.e = e # Eccentricity
self.i = i # Inclination
self.Omega = Omega # Longitude of the ascending node
self.omega = omega # Argument of periapsis
self.nu = float('nan') # True anomaly -Radians
self.ecc_anomaly = float('nan') # Eccentric anomaly -Radians
if math.isnan(mo):
self.find_mean_anomaly()
else:
self.mo = mo # Mean anomaly at epoch -Radians
self.find_eccentric_anomaly()
self.find_true_anomaly()
self.find_time_periapsis()
self.find_periapsis_apoapsis()
self.name = name
def xyz_vel_to_kelper(self, rvec, vvec, mu=mu_earth):
self.mu = mu
r = norm(rvec) # Radius
v = norm(vvec) # Velocity
self.h_hat = cross(rvec, vvec) # Specific angular momentum
h = norm(self.h_hat)
self.phi = arcsin(dot(rvec, vvec) / (r * v)) # flight path angle
n_hat = cross([0, 0, 1], self.h_hat)
period = norm(n_hat) # orbital period
e_hat = divide(cross(vvec, self.h_hat), mu) - divide(rvec, r)
self.E = (v ** 2 / 2) - (mu / r)
self.e = norm(e_hat)
self.i = arccos(divide(dot([0, 0, 1], self.h_hat), h))
self.Omega = arccos(n_hat[0] / period)
if n_hat[1] < 0:
self.Omega = 2 * math.pi - self.Omega
self.omega = arccos(dot(n_hat, e_hat) / (period * self.e))
if e_hat[2] < 0:
self.omega = 2 * math.pi - self.omega
self.nu = arccos(dot(e_hat, rvec) / (self.e * r))
if dot(rvec, vvec) < 0:
self.nu = 2 * math.pi - self.nu
self.a = -mu / (2 * self.E)
self.period = 2 * math.pi * math.sqrt((self.a ** 3) / mu)
self.n = 2 * math.pi / self.period
self.find_eccentric_anomaly()
self.find_mean_anomaly()
def kelper_to_xvz_vel(self):
p = self.a * (1 - self.e ** 2) # semiparameter
rvec_pqw = [p * math.cos(self.nu) / (1 + self.e * math.cos(self.nu)),
p * math.sin(self.nu) / (1 + self.e * math.cos(self.nu)),
0] # R in the pqw frame
vvec_pqw = [-math.sqrt(self.mu / p) * math.sin(self.nu), math.sqrt(self.mu / p) * (self.e + math.cos(self.nu)),
0] # V vector in the pqw frame
transformation__matrix = array([
[math.cos(self.Omega) *
math.cos(self.omega) - math.sin(self.Omega) * math.sin(self.omega) *
math.cos(self.i), -math.cos(self.Omega) * math.sin(self.omega) -
math.sin(self.Omega) * math.cos(self.omega) * math.cos(self.i),
math.sin(self.Omega) * math.sin(self.i)],
[math.sin(self.Omega) *
math.cos(self.omega) + math.cos(self.Omega) * math.sin(self.omega) *
math.cos(self.i),
-math.sin(self.Omega) * math.sin(
self.omega) +
math.cos(self.Omega) * math.cos(
self.omega) * math.cos(self.i),
-math.cos(self.Omega) * math.sin(
self.i)],
[math.sin(self.omega) *
math.sin(self.i),
math.cos(
self.omega) * math.sin(
self.i),
math.cos(self.i)]])
rvec_ijk = matmul(transformation__matrix, rvec_pqw)
vvec_ijk = matmul(transformation__matrix, vvec_pqw)
return rvec_ijk, vvec_ijk
def periapsis_apoapsis_set(self, periapsis, apoapsis, mu=mu_earth):
self.a = (apoapsis + periapsis) / 2
self.e = (apoapsis - periapsis) / (apoapsis + periapsis)
self.mu = mu
if not math.isnan(self.a) and not math.isnan(mu):
self.period = 2 * math.pi * math.sqrt((self.a ** 3) / mu)
self.n = 2 * math.pi / self.period
else:
self.period = float('nan')
self.n = float('nan')
def find_periapsis_apoapsis(self):
if not math.isnan(self.a) and not math.isnan(self.e):
self.periapsis = self.a * (1 - self.e)
self.apoapsis = self.a * (1 + self.e)
else:
self.periapsis = float('nan')
self.apoapsis = float('nan')
def find_eccentric_anomaly(self):
if math.isnan(self.e):
self.ecc_anomaly = float('nan')
return
if not math.isnan(self.mo):
cur_diff = float('inf')
guess_ecc = self.mo
cur_iter = 0
while cur_diff > self.DIFF and cur_iter < self.MAX_ITER:
self.ecc_anomaly = guess_ecc - (
(guess_ecc - self.e * math.sin(guess_ecc) - self.mo) / (1 - self.e * math.cos(guess_ecc)))
cur_diff = math.fabs(guess_ecc - self.ecc_anomaly)
guess_ecc = self.ecc_anomaly
cur_iter += 1
elif not math.isnan(self.nu) and not math.isnan(self.a):
cos_e = (self.e + math.cos(self.nu)) / (1 + self.e * math.cos(self.nu))
sin_e = (math.sqrt(1 - self.e ** 2) * math.sin(self.nu)) / (1 + self.e * math.cos(self.nu))
self.ecc_anomaly = math.atan2(sin_e, cos_e)
else:
self.ecc_anomaly = float('nan')
def find_true_anomaly(self):
if math.isnan(self.ecc_anomaly) or math.isnan(self.e):
self.nu = float('nan')
else:
self.nu = 2 * math.atan2(math.tan(self.ecc_anomaly / 2),
math.sqrt((1 - self.e) / (1 + self.e)))
def find_mean_anomaly(self):
if not math.isnan(self.ecc_anomaly) and not math.isnan(self.e): # Try the eccentric anomaly
self.mo = self.ecc_anomaly - self.e * math.sin(self.ecc_anomaly)
elif not math.isnan(self.tp) and not math.isnan(self.n): # Try the time since periapsis
self.mo = self.n * self.tp
else:
self.mo = float('nan')
if self.mo < 0:
self.mo += 2 * math.pi
def find_time_periapsis(self):
if math.isnan(self.mo) or math.isnan(self.n):
self.tp = float('nan')
else:
self.tp = self.mo / self.n
def vis_viva(self, r):
v = math.sqrt(self.mu * ((2 / r) - (1 / self.a)))
return v
def plot_ground_track(self, filename: str, duration: float, time_step: float, theta_gst: float,
rot: float = 7.2921158553 * 10 ** -5, out_name: str = "ground_track.png"):
plt.figure()
with open(filename, 'r') as f:
lat_lon_str = f.read()
lat = []
lon = []
sat_lat = []
sat_lon = []
for pos in lat_lon_str.split('\n'):
lls = pos.split()
if lls:
lon.append(float(lls[0]))
lat.append(float(lls[1]))
plt.plot(lon, lat)
time = 0
while time < duration:
(pos_eci, _) = self.kelper_to_xvz_vel()
pos_ecef = eci2ecef(pos_eci=pos_eci, theta_gst=theta_gst)
(tmp_lat, tmp_lon, _) = ecef2lla(pos_ecef=pos_ecef)
sat_lat.append(math.degrees(tmp_lat))
sat_lon.append(math.degrees(tmp_lon))
self.propagate(time_step=time_step)
time += time_step
theta_gst += rot * time_step
plt.plot(sat_lon, sat_lat, 'gd')
plt.xlabel("Latitude (deg)")
plt.ylabel("Longitude (deg)")
plt.title("Ground track plot")
plt.xlim([-180, 180])
plt.ylim([-90, 90])
plt.savefig(out_name)
def propagate(self, time_step: float):
if not isnan(self.mo):
self.mo += self.n * time_step
if self.mo > 2 * math.pi:
self.mo -= 2 * math.pi
self.find_eccentric_anomaly()
self.find_true_anomaly()
if not isnan(self.tp):
self.tp += time_step
if self.tp > self.period:
self.tp -= self.period
def ode_propagate(self, time_step: float, end_time: float, tol: float):
t = 0
dt = time_step
(X, V) = self.kelper_to_xvz_vel()
y0 = array(concatenate((X, transpose(V))))
r = ode(two_body_model).set_integrator('dopri5', atol=tol, rtol=tol)
r.set_initial_value(y=y0, t=t).set_f_params(self.mu)
while r.successful() and r.t < end_time:
r.integrate(r.t + dt)
pos_eci = r.y[0:3]
vel_eci = r.y[3:6]
self.xyz_vel_to_kelper(rvec=pos_eci, vvec=vel_eci, mu=self.mu)
def j2_nodal_regression(self, radius: float = rad_earth, j2: float = j2_earth):
p = self.a * (1 - self.e ** 2)
Omega_dot = -3 * self.n * radius ** 2 * j2 / (2 * p ** 2) * math.cos(self.i)
return Omega_dot
def polar_plot(self, time_step: float, fig=None, label: str = ""):
if not fig:
fig = plt.figure().add_subplot(111, 'polar')
self.find_eccentric_anomaly()
self.find_true_anomaly()
r = []
theta = []
r_init = (self.a * (1 - self.e * math.cos(self.ecc_anomaly))) / 1000
theta_init = self.nu
fig.plot(theta_init, r_init, '*')
counter = 0
while counter < self.period:
self.propagate(time_step=time_step)
r.append(self.a * (1 - self.e * math.cos(self.ecc_anomaly)) / 1000)
theta.append(self.nu)
counter += time_step
fig.plot(theta, r, label=label)
| gpl-3.0 |
vivekmishra1991/scikit-learn | sklearn/datasets/mlcomp.py | 289 | 3855 | # Copyright (c) 2010 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| bsd-3-clause |
jay-tyler/data-petting-zoo | DATA_PETTING_ZOO/data_petting_zoo/engine.py | 1 | 10941 | from re import match
from random import sample
from string import capwords
import pandas as pd
import numpy as np
from pandas import DataFrame
import fiona
from shapely.geometry import Point, asShape
from shapely.prepared import prep
from shapely import speedups
from rules import name_rules, geofeat_rules, wiki_codes
DATA_ROOT = "" # TODO: wire this up
# Warn if a value is being assigned to a copy
pd.set_option('mode.chained_assignment', 'warn')
# Imports of needed data
try:
GB = pd.read_pickle("../data/pickles/gb_finalfinal.pk1")
except IOError:
# TODO: In this case should do a bunch of stuff to get gb into namespace
pass
try:
NAMEFAM = pd.read_table("../data/namefam.tab")
except IOError:
# TODO: In this case should do a bunch of stuff to get gb into namespace
print 'oh no'
################################
# Setup Data Functions
################################
def set_gb(file_path):
"""Read the GB.txt file from geonames and return an appropriately
filtered DataFrame"""
gb = pd.read_csv(file_path)
# Trim away an initial index column
gb = gb.ix[:, 1:20]
column_names = [
'geoid', 'name', 'asciiname', 'altname', 'lat', 'long',
'feature_class', 'feature_code', 'country_code', 'cc2', 'adm1', 'adm2',
'adm3', 'adm4', 'pop', 'elev', 'delev', 'timezone', 'moddate'
]
gb.columns = column_names
# Removing rows that correspond to Cyprus, N. Ireland, cruft etc.
remove_these_adm1 = ['05', '00', '01', 'NIR', '03']
for i in remove_these_adm1:
gb = gb[gb.adm1 != i]
# Get rid of adm1 with null values
gb = gb[~gb.adm1.isnull()]
# Setting a list of alternate names from altname string field
gb['ls_altname'] = gb['altname'].dropna().apply(lambda x: x.split(','))
# Pick only feature codes that correspond to towns and cities, see
# http://www.geonames.org/export/codes.html
gb = gb[gb.feature_code.isin(geofeat_rules)]
# Clean up index
gb.index = range(len(gb))
return gb
def set_fam(dfin):
"""In-place setup name families for df dataframe.
Note: this only acts on the 'name' field. Use another function to setup
altname families
Column changes: will add a ls_namefam column
TODO: This is a one run deal; not idempotent. Fix.
"""
df = dfin.copy()
df["ls_namefam"] = np.nan
# Iterate over rows in dataframe
for index, row in df.iterrows():
# For each row, check each name_rule
for namekey, ls_regex in name_rules.iteritems():
result = patinstr(row.loc['name'], ls_regex)
if result:
cur = df.loc[index, 'ls_namefam']
if not isinstance(cur, list):
df.loc[index, 'ls_namefam'] = list([namekey])
else:
df.loc[index, 'ls_namefam'].append(namekey)
return df
def set_alt(df, column_names=None):
"""DataFrame should only be the dataframe that
comes after setgb if columns is left undefined; column_names is a list of
column names that the resulting dataframe should contain. This list should
include 'parent.'"""
df_alt = df.copy()
df_alt.drop(['altname'], inplace=True, axis=1)
df_alt.drop(['ls_altname'], inplace=True, axis=1)
df_alt['parent'] = np.nan
if column_names is None:
column_names = [
'geoid', 'name', 'parent', 'asciiname', 'lat', 'long',
'feature_class', 'feature_code', 'country_code', 'cc2',
'adm1', 'adm2', 'adm3', 'adm4', 'pop', 'elev', 'delev',
'timezone', 'moddate'
]
df_alt = df_alt[column_names]
i = len(df)
for index, row in df.iterrows():
parent_name = row['name']
try:
a_names = iter(row['ls_altname'])
except TypeError:
pass
else:
for a_name in a_names:
row['name'] = a_name
row['parent'] = parent_name
df_alt.ix[i] = row[column_names]
i += 1
df_alt[~df_alt['name'].str.contains('/')]
return df_alt
def set_namefam_table(file_path):
"""Read the namfam csv file and load to a table"""
return pd.read_csv(file_path)
def append_nuts3_region(dfin, shapefile_path):
""" Take a pandas dataframe with lat and long columns and a shapefile.
Convert coordinates to Shapely points to do a point-in-polygon check
for each point. Append name of nuts3 region corresponding to
point-in-polygon to the dataframe.
This is extremely slow. Needs to be optimized with Shapely boundary box.
"""
df = dfin.copy()
df['nuts3'] = np.nan
fc = fiona.open(shapefile_path)
speedups.enable()
for feature in fc:
prepared_shape = prep(asShape(feature['geometry']))
for index, row in df.iterrows():
point = Point(row.loc['long'], row.loc['lat'])
if prepared_shape.contains(point):
df.loc[index, 'nuts3name'] = feature['properties']['NUTS315NM']
df.loc[index, 'nuts3id'] = feature['properties']['NUTS315CD']
return df
def append_2013_gva(dfin, csv_file_path):
df = dfin.copy()
gva = pd.read_csv(csv_file_path)
gvasub = DataFrame(columns=['nuts3id', 'gva2013'])
gvasub['nuts3id'], gvasub['gva2013'] = gva['nutsid'], gva['2013']
df_gva = pd.merge(
left=df,
right=gvasub.dropna(),
how='left',
left_on='nuts3id',
right_on='nuts3id')
return df_gva
################################
# Helper Query Functions
################################
def patinls(slist, patlist):
"""Search each string in slist for a substring instance defined by patlist;
return re.match if found, None if not found"""
found = None
try:
strings = iter(slist)
except TypeError:
return
for string in strings:
if not found:
for pat in patlist:
found = match(pat, string)
if found:
break
else:
break
return found
def patinstr(string, patlist):
"""Search string for a substring instance defined by patlist; return
re.match if found, None if not found"""
found = None
if not isinstance(string, str):
return None
for pat in patlist:
found = match(pat, string)
if found:
break
return found
################################
# DataFrame Query Functions
################################
def get_fam(df, namekey):
"""Return a (sub-dataframe, namekey, placename) corresponding to a
particular namefamily. Will return None for placename from that
namefamily"""
def _droidfind(listin):
result = None
try:
result = namekey in listin
except TypeError:
pass
return result if result is True else None
mask = df['ls_namefam'].map(lambda x: _droidfind(x))
return df[~mask.isnull()], namekey, None
def query_random(df):
"""Return a (sub-dataframe, namekey, placename) corresponding to a
particular namefamily. Also return a sample placename from that
namefamily"""
df = df.copy()
namekeys = name_rules.keys()
namekey = sample(namekeys, 1)[0]
subdf = get_fam(df, namekey)[0]
placename = subdf['name'].sample().values[0]
return subdf, namekey, placename
def query_name(df, placestring):
"""Return a sub-DataFrame containing boolean matches to place name.
This is intended as a Helper function. Will attempt a more literal
match, and if this fails, will try a loose match.
"""
# First try to query assuming that the user formatted the placename
# correctly, then try a looser search
place = capwords(placestring).strip()
query = df['name'].str.contains('.*' + place + '.*')
if not query.any():
place = placestring.lower().strip()
query = df['name'].str.contains('.*' + place + '.*')
return query
def query_placename(df, placestring):
"""Attempt to match placestring to a city with a family pattern;
return the matching sub-DataFrame, namekey, and full placename if a match
is made, else None
If there are multiple associated name families, then one will be
picked at random"""
df = df.copy()
query = query_name(df, placestring)
# Return a dataframe row with the placename contained;
# assure that this row actually has a namefam associated
try:
namefam_row = df[query & (~df['ls_namefam'].isnull())].sample()
except ValueError:
# In this case, there is no namefam_row match
return None
namekey = namefam_row['ls_namefam'].map(lambda x: sample(x, 1))
# An ugly way to get the string out
namekey = namekey.values[0][0]
placename = namefam_row['name']
return get_fam(df, namekey)[0], namekey, placename.values[0]
def query_name_or_fam(df, placestring):
"""Attempt to match placestring to a city with a family pattern; else
attempt to find a place of the same name. Return None otherwise.
If namefamily is found: return the matching sub-DataFrame, namekey,
and full placenameif a match
If namefamily is not found, but placename is: return the singular placename
row as a DataFrame, None for namekey, and full placename
Otherwise return None."""
df = df.copy()
result = query_placename(df, placestring)
if result is None:
# Case of place with no namefam
query = query_name(df, placestring)
try:
row = df[query].sample()
result = row, None, row['name'].values[0]
except ValueError:
result = None
return result
def query_pop_slice(df, popthresh):
"""Return a sub-DataFrame from DataFrame df that excludes all population
values below the threshold"""
return df[df['pop'] >= popthresh]
def query_namefam_table(namekey):
"""Return a dictionary containing strings of all of the elements of
a namefam table in human readable string format"""
row = NAMEFAM[NAMEFAM['namekey'] == namekey]
if row.shape[0] == 0:
# Case of asking for a namekey that doesn't exist
return None
toreturn = dict()
# Turning wiki_code characters into a human readable string
wiki_str = ""
for code in row['wiki_codes'].values[0].split():
frag = wiki_codes.get(code.rstrip(',').lstrip())
frag = frag if frag is not None else ""
wiki_str += "{}, ".format(frag)
wiki_str = wiki_str.rstrip(", ")
toreturn['wiki_codes'] = wiki_str
for colname in ['namekey', 'humandef', 'lan_notes', 'human_namekey']:
val = row[colname].values[0]
if pd.isnull(val):
val = None
toreturn[colname] = val
return toreturn
if __name__ == "__main__":
gbf = 'data/pristine/GB.txt'
gb = set_gb(gbf)
gb2 = set_fam(gb)
print gb2['ls_namefam'].dropna()
| mit |
tkaitchuck/nupic | external/darwin64/lib/python2.6/site-packages/matplotlib/table.py | 69 | 16757 | """
Place a table below the x-axis at location loc.
The table consists of a grid of cells.
The grid need not be rectangular and can have holes.
Cells are added by specifying their row and column.
For the purposes of positioning the cell at (0, 0) is
assumed to be at the top left and the cell at (max_row, max_col)
is assumed to be at bottom right.
You can add additional cells outside this range to have convenient
ways of positioning more interesting grids.
Author : John Gill <jng@europe.renre.com>
Copyright : 2004 John Gill and John Hunter
License : matplotlib license
"""
from __future__ import division
import warnings
import artist
from artist import Artist
from patches import Rectangle
from cbook import is_string_like
from text import Text
from transforms import Bbox
class Cell(Rectangle):
"""
A cell is a Rectangle with some associated text.
"""
PAD = 0.1 # padding between text and rectangle
def __init__(self, xy, width, height,
edgecolor='k', facecolor='w',
fill=True,
text='',
loc=None,
fontproperties=None
):
# Call base
Rectangle.__init__(self, xy, width=width, height=height,
edgecolor=edgecolor, facecolor=facecolor,
)
self.set_clip_on(False)
# Create text object
if loc is None: loc = 'right'
self._loc = loc
self._text = Text(x=xy[0], y=xy[1], text=text,
fontproperties=fontproperties)
self._text.set_clip_on(False)
def set_transform(self, trans):
Rectangle.set_transform(self, trans)
# the text does not get the transform!
def set_figure(self, fig):
Rectangle.set_figure(self, fig)
self._text.set_figure(fig)
def get_text(self):
'Return the cell Text intance'
return self._text
def set_fontsize(self, size):
self._text.set_fontsize(size)
def get_fontsize(self):
'Return the cell fontsize'
return self._text.get_fontsize()
def auto_set_font_size(self, renderer):
""" Shrink font size until text fits. """
fontsize = self.get_fontsize()
required = self.get_required_width(renderer)
while fontsize > 1 and required > self.get_width():
fontsize -= 1
self.set_fontsize(fontsize)
required = self.get_required_width(renderer)
return fontsize
def draw(self, renderer):
if not self.get_visible(): return
# draw the rectangle
Rectangle.draw(self, renderer)
# position the text
self._set_text_position(renderer)
self._text.draw(renderer)
def _set_text_position(self, renderer):
""" Set text up so it draws in the right place.
Currently support 'left', 'center' and 'right'
"""
bbox = self.get_window_extent(renderer)
l, b, w, h = bbox.bounds
# draw in center vertically
self._text.set_verticalalignment('center')
y = b + (h / 2.0)
# now position horizontally
if self._loc == 'center':
self._text.set_horizontalalignment('center')
x = l + (w / 2.0)
elif self._loc == 'left':
self._text.set_horizontalalignment('left')
x = l + (w * self.PAD)
else:
self._text.set_horizontalalignment('right')
x = l + (w * (1.0 - self.PAD))
self._text.set_position((x, y))
def get_text_bounds(self, renderer):
""" Get text bounds in axes co-ordinates. """
bbox = self._text.get_window_extent(renderer)
bboxa = bbox.inverse_transformed(self.get_data_transform())
return bboxa.bounds
def get_required_width(self, renderer):
""" Get width required for this cell. """
l,b,w,h = self.get_text_bounds(renderer)
return w * (1.0 + (2.0 * self.PAD))
def set_text_props(self, **kwargs):
'update the text properties with kwargs'
self._text.update(kwargs)
class Table(Artist):
"""
Create a table of cells.
Table can have (optional) row and column headers.
Each entry in the table can be either text or patches.
Column widths and row heights for the table can be specifified.
Return value is a sequence of text, line and patch instances that make
up the table
"""
codes = {'best' : 0,
'upper right' : 1, # default
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'center left' : 5,
'center right' : 6,
'lower center' : 7,
'upper center' : 8,
'center' : 9,
'top right' : 10,
'top left' : 11,
'bottom left' : 12,
'bottom right' : 13,
'right' : 14,
'left' : 15,
'top' : 16,
'bottom' : 17,
}
FONTSIZE = 10
AXESPAD = 0.02 # the border between the axes and table edge
def __init__(self, ax, loc=None, bbox=None):
Artist.__init__(self)
if is_string_like(loc) and loc not in self.codes:
warnings.warn('Unrecognized location %s. Falling back on bottom; valid locations are\n%s\t' %(loc, '\n\t'.join(self.codes.keys())))
loc = 'bottom'
if is_string_like(loc): loc = self.codes.get(loc, 1)
self.set_figure(ax.figure)
self._axes = ax
self._loc = loc
self._bbox = bbox
# use axes coords
self.set_transform(ax.transAxes)
self._texts = []
self._cells = {}
self._autoRows = []
self._autoColumns = []
self._autoFontsize = True
self._cachedRenderer = None
def add_cell(self, row, col, *args, **kwargs):
""" Add a cell to the table. """
xy = (0,0)
cell = Cell(xy, *args, **kwargs)
cell.set_figure(self.figure)
cell.set_transform(self.get_transform())
cell.set_clip_on(False)
self._cells[(row, col)] = cell
def _approx_text_height(self):
return self.FONTSIZE/72.0*self.figure.dpi/self._axes.bbox.height * 1.2
def draw(self, renderer):
# Need a renderer to do hit tests on mouseevent; assume the last one will do
if renderer is None:
renderer = self._cachedRenderer
if renderer is None:
raise RuntimeError('No renderer defined')
self._cachedRenderer = renderer
if not self.get_visible(): return
renderer.open_group('table')
self._update_positions(renderer)
keys = self._cells.keys()
keys.sort()
for key in keys:
self._cells[key].draw(renderer)
#for c in self._cells.itervalues():
# c.draw(renderer)
renderer.close_group('table')
def _get_grid_bbox(self, renderer):
"""Get a bbox, in axes co-ordinates for the cells.
Only include those in the range (0,0) to (maxRow, maxCol)"""
boxes = [self._cells[pos].get_window_extent(renderer)
for pos in self._cells.keys()
if pos[0] >= 0 and pos[1] >= 0]
bbox = Bbox.union(boxes)
return bbox.inverse_transformed(self.get_transform())
def contains(self,mouseevent):
"""Test whether the mouse event occurred in the table.
Returns T/F, {}
"""
if callable(self._contains): return self._contains(self,mouseevent)
# TODO: Return index of the cell containing the cursor so that the user
# doesn't have to bind to each one individually.
if self._cachedRenderer is not None:
boxes = [self._cells[pos].get_window_extent(self._cachedRenderer)
for pos in self._cells.keys()
if pos[0] >= 0 and pos[1] >= 0]
bbox = bbox_all(boxes)
return bbox.contains(mouseevent.x,mouseevent.y),{}
else:
return False,{}
def get_children(self):
'Return the Artists contained by the table'
return self._cells.values()
get_child_artists = get_children # backward compatibility
def get_window_extent(self, renderer):
'Return the bounding box of the table in window coords'
boxes = [c.get_window_extent(renderer) for c in self._cells]
return bbox_all(boxes)
def _do_cell_alignment(self):
""" Calculate row heights and column widths.
Position cells accordingly.
"""
# Calculate row/column widths
widths = {}
heights = {}
for (row, col), cell in self._cells.iteritems():
height = heights.setdefault(row, 0.0)
heights[row] = max(height, cell.get_height())
width = widths.setdefault(col, 0.0)
widths[col] = max(width, cell.get_width())
# work out left position for each column
xpos = 0
lefts = {}
cols = widths.keys()
cols.sort()
for col in cols:
lefts[col] = xpos
xpos += widths[col]
ypos = 0
bottoms = {}
rows = heights.keys()
rows.sort()
rows.reverse()
for row in rows:
bottoms[row] = ypos
ypos += heights[row]
# set cell positions
for (row, col), cell in self._cells.iteritems():
cell.set_x(lefts[col])
cell.set_y(bottoms[row])
def auto_set_column_width(self, col):
self._autoColumns.append(col)
def _auto_set_column_width(self, col, renderer):
""" Automagically set width for column.
"""
cells = [key for key in self._cells if key[1] == col]
# find max width
width = 0
for cell in cells:
c = self._cells[cell]
width = max(c.get_required_width(renderer), width)
# Now set the widths
for cell in cells:
self._cells[cell].set_width(width)
def auto_set_font_size(self, value=True):
""" Automatically set font size. """
self._autoFontsize = value
def _auto_set_font_size(self, renderer):
if len(self._cells) == 0:
return
fontsize = self._cells.values()[0].get_fontsize()
cells = []
for key, cell in self._cells.iteritems():
# ignore auto-sized columns
if key[1] in self._autoColumns: continue
size = cell.auto_set_font_size(renderer)
fontsize = min(fontsize, size)
cells.append(cell)
# now set all fontsizes equal
for cell in self._cells.itervalues():
cell.set_fontsize(fontsize)
def scale(self, xscale, yscale):
""" Scale column widths by xscale and row heights by yscale. """
for c in self._cells.itervalues():
c.set_width(c.get_width() * xscale)
c.set_height(c.get_height() * yscale)
def set_fontsize(self, size):
"""
Set the fontsize of the cell text
ACCEPTS: a float in points
"""
for cell in self._cells.itervalues():
cell.set_fontsize(size)
def _offset(self, ox, oy):
'Move all the artists by ox,oy (axes coords)'
for c in self._cells.itervalues():
x, y = c.get_x(), c.get_y()
c.set_x(x+ox)
c.set_y(y+oy)
def _update_positions(self, renderer):
# called from renderer to allow more precise estimates of
# widths and heights with get_window_extent
# Do any auto width setting
for col in self._autoColumns:
self._auto_set_column_width(col, renderer)
if self._autoFontsize:
self._auto_set_font_size(renderer)
# Align all the cells
self._do_cell_alignment()
bbox = self._get_grid_bbox(renderer)
l,b,w,h = bbox.bounds
if self._bbox is not None:
# Position according to bbox
rl, rb, rw, rh = self._bbox
self.scale(rw/w, rh/h)
ox = rl - l
oy = rb - b
self._do_cell_alignment()
else:
# Position using loc
(BEST, UR, UL, LL, LR, CL, CR, LC, UC, C,
TR, TL, BL, BR, R, L, T, B) = range(len(self.codes))
# defaults for center
ox = (0.5-w/2)-l
oy = (0.5-h/2)-b
if self._loc in (UL, LL, CL): # left
ox = self.AXESPAD - l
if self._loc in (BEST, UR, LR, R, CR): # right
ox = 1 - (l + w + self.AXESPAD)
if self._loc in (BEST, UR, UL, UC): # upper
oy = 1 - (b + h + self.AXESPAD)
if self._loc in (LL, LR, LC): # lower
oy = self.AXESPAD - b
if self._loc in (LC, UC, C): # center x
ox = (0.5-w/2)-l
if self._loc in (CL, CR, C): # center y
oy = (0.5-h/2)-b
if self._loc in (TL, BL, L): # out left
ox = - (l + w)
if self._loc in (TR, BR, R): # out right
ox = 1.0 - l
if self._loc in (TR, TL, T): # out top
oy = 1.0 - b
if self._loc in (BL, BR, B): # out bottom
oy = - (b + h)
self._offset(ox, oy)
def get_celld(self):
'return a dict of cells in the table'
return self._cells
def table(ax,
cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None):
"""
TABLE(cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None)
Factory function to generate a Table instance.
Thanks to John Gill for providing the class and table.
"""
# Check we have some cellText
if cellText is None:
# assume just colours are needed
rows = len(cellColours)
cols = len(cellColours[0])
cellText = [[''] * rows] * cols
rows = len(cellText)
cols = len(cellText[0])
for row in cellText:
assert len(row) == cols
if cellColours is not None:
assert len(cellColours) == rows
for row in cellColours:
assert len(row) == cols
else:
cellColours = ['w' * cols] * rows
# Set colwidths if not given
if colWidths is None:
colWidths = [1.0/cols] * cols
# Check row and column labels
rowLabelWidth = 0
if rowLabels is None:
if rowColours is not None:
rowLabels = [''] * cols
rowLabelWidth = colWidths[0]
elif rowColours is None:
rowColours = 'w' * rows
if rowLabels is not None:
assert len(rowLabels) == rows
offset = 0
if colLabels is None:
if colColours is not None:
colLabels = [''] * rows
offset = 1
elif colColours is None:
colColours = 'w' * cols
offset = 1
if rowLabels is not None:
assert len(rowLabels) == rows
# Set up cell colours if not given
if cellColours is None:
cellColours = ['w' * cols] * rows
# Now create the table
table = Table(ax, loc, bbox)
height = table._approx_text_height()
# Add the cells
for row in xrange(rows):
for col in xrange(cols):
table.add_cell(row+offset, col,
width=colWidths[col], height=height,
text=cellText[row][col],
facecolor=cellColours[row][col],
loc=cellLoc)
# Do column labels
if colLabels is not None:
for col in xrange(cols):
table.add_cell(0, col,
width=colWidths[col], height=height,
text=colLabels[col], facecolor=colColours[col],
loc=colLoc)
# Do row labels
if rowLabels is not None:
for row in xrange(rows):
table.add_cell(row+offset, -1,
width=rowLabelWidth or 1e-15, height=height,
text=rowLabels[row], facecolor=rowColours[row],
loc=rowLoc)
if rowLabelWidth == 0:
table.auto_set_column_width(-1)
ax.add_table(table)
return table
artist.kwdocd['Table'] = artist.kwdoc(Table)
| gpl-3.0 |