repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
bovee/Aston | aston/tracefile/__init__.py | 1 | 7942 | '''
Classes that can open chromatographic files and return
info from them or Traces/Chromatograms.
'''
import re
import struct
import numpy as np
from aston.trace import Chromatogram, Trace
from aston.tracefile.mime import get_mimetype, tfclasses
def find_offset(f, search_str, hint=None):
if hint is None:
hint = 0
f.seek(hint)
regexp = re.compile(search_str)
while True:
d = f.read(len(search_str) * 200)
srch = regexp.search(d)
if srch is not None:
foff = f.tell() - len(d) + srch.end()
break
if len(d) == len(search_str): # no data read: EOF
return None
f.seek(f.tell() - len(search_str))
return foff
def parse_c_serialized(f):
"""
Reads in a binary file created by a C++ serializer (prob. MFC?)
and returns tuples of (header name, data following the header).
These are used by Thermo for *.CF and *.DXF files and by Agilent
for new-style *.REG files.
"""
# TODO: rewrite to use re library
f.seek(0)
try:
p_rec_type = None
while True:
rec_off = f.tell()
while True:
if f.read(2) == b'\xff\xff':
h = struct.unpack('<HH', f.read(4))
if h[1] < 64 and h[1] != 0:
rec_type = f.read(h[1])
if rec_type[0] == 67: # starts with 'C'
break
if f.read(1) == b'':
raise EOFError
f.seek(f.tell() - 2)
if p_rec_type is not None:
rec_len = f.tell() - 6 - len(rec_type) - rec_off
f.seek(rec_off)
yield p_rec_type, f.read(rec_len)
f.seek(f.tell() + 6 + len(rec_type))
# p_type = h[0]
p_rec_type = rec_type
except EOFError:
rec_len = f.tell() - 6 - len(rec_type) - rec_off
f.seek(rec_off)
yield p_rec_type, f.read(rec_len)
class TraceFile(object):
mime = '' # mimetype to associate file with (in tracefile.mime)
# traces is a list of possible traces:
# each item may start with a * indicating a events
# a # indicating a 2d trace or nothing indicating a single trace name
traces = []
def __init__(self, filename=None, ftype=None, data=None):
self.filename = filename
self.ftype = ''
self._data = None
# try to automatically change my class to reflect
# whatever type of file I'm pointing at if not provided
if type(self) is TraceFile:
if data is not None:
self._data = data
if filename is None:
return
with open(filename, mode='rb') as f:
magic = f.read(4)
ftype = get_mimetype(filename, magic)
if ftype is not None:
if ftype in tfclasses():
self.__class__ = tfclasses()[ftype]
self.ftype = ftype
else:
self.ftype = self.__class__.__name__
@property
def data(self):
if self._data is not None:
return self._data
else:
return Chromatogram()
def scans(self):
# TODO: decompose self.data into scans
pass
def total_trace(self, twin=None):
return self.data.trace(twin=twin)
# TODO: should this code be kept? (needs to be improved, if so)
# def plot(self, name='', ax=None):
# if ax is None:
# import matplotlib.pyplot as plt
# ax = plt.gca()
# for t in self.traces:
# if t.startswith('#'):
# #self.trace(t[1:]).plot(ax=ax)
# self.trace('').plot(ax=ax)
# elif t.startswith('*'):
# #TODO: plot events
# pass
# else:
# self.trace(t).plot(ax=ax)
# #TODO: colors?
# #TODO: plot 2d/colors
def trace(self, name='', tol=0.5, twin=None):
if isinstance(name, (int, float, np.float32, np.float64)):
name = str(name)
else:
name = name.lower()
# name of the 2d trace, if it exists
if any(t.startswith('#') for t in self.traces):
t2d = [t[1:] for t in self.traces if t.startswith('#')][0]
# clip out the starting 'MS' if present
if name.startswith(t2d):
name = name[len(t2d):]
else:
t2d = ''
# this is the only string we handle; all others handled in subclasses
if name in ['tic', 'x', '']:
return self.total_trace(twin)
elif name in self.traces:
return self._trace(name, twin)
elif t2d != '':
# this file contains 2d data; find the trace in that
return self.data.trace(name, tol, twin)
else:
return Trace()
def scan(self, t, dt=None, aggfunc=None):
"""
Returns the spectrum from a specific time or range of times.
"""
return self.data.scan(t, dt, aggfunc)
@property
def info(self):
# TODO: add creation date and short name
return {'filename': self.filename,
'filetype': self.ftype}
def events(self, name, twin=None):
# TODO: check for '*' trace in self.traces
return []
def subscan(self, name, t, mz):
"""
Returns a spectra linked to both a time and mz, e.g.
the daughter scan in an MSMS or a MS scan from a GC-GC.
"""
pass
def subscans(self, name, twin=None):
"""
Returns a list of times with subscans and their associated mzs.
Preliminary idea:
If all points in self.data have subscans, return True.
"""
# example: [0.1], [147, 178]
return [], []
def md5hash(self):
# TODO: calculate md5hash of this file
# to be used for determining if files in db are unique
raise NotImplementedError
class ScanListFile(TraceFile):
def scans(self, twin=None):
return []
# TODO: is there a point in creating a data property here? (for heatmaps?)
# TODO: if so, then need better binning code...
def total_trace(self, twin=None):
if twin is None:
twin = (-np.inf, np.inf)
times, y = [], []
for s in self.scans(twin):
t = float(s.name)
if t < twin[0]:
continue
if t > twin[1]:
break
times.append(t)
y.append(sum(s.abn))
return Trace(y, times, name='tic')
def trace(self, name='', tol=0.5, twin=None):
if twin is None:
twin = (-np.inf, np.inf)
if name in {'tic', 'x', ''}:
return self.total_trace(twin)
times, y = [], []
for s in self.scans(twin):
t = float(s.name)
if t < twin[0]:
continue
if t > twin[1]:
break
times.append(t)
# TODO: this can be vectorized with numpy?
y.append(sum(j for i, j in zip(s.x, s.abn)
if np.abs(i - name) < tol))
return Trace(y, times, name=name)
def scan(self, t, dt=None, aggfunc=None):
# TODO: use aggfunc
prev_s = None
bin_scans = []
for s in self.scans():
if float(s.name) > t:
if float(prev_s.name) - t < float(s.name) - t:
if dt is None:
return prev_s
else:
bin_scans.append(prev_s)
elif dt is None:
return s
bin_scans.append(s)
if float(s.name) > t + dt:
break
prev_s = s
# merge bin_scans and return
# FIXME
pass
| bsd-3-clause |
rajat1994/scikit-learn | sklearn/linear_model/tests/test_coordinate_descent.py | 114 | 25281 | # Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import TempMemmap
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
from sklearn.utils import check_array
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=100, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 50, 3), clf.mse_path_.shape)
assert_equal((2, 50), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((50, 3), clf.mse_path_.shape)
assert_equal(50, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_deprection_precompute_enet():
# Test that setting precompute="auto" gives a Deprecation Warning.
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
clf = ElasticNet(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
clf = Lasso(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
def test_check_input_false():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
X = check_array(X, order='F', dtype='float64')
y = check_array(X, order='F', dtype='float64')
clf = ElasticNet(selection='cyclic', tol=1e-8)
# Check that no error is raised if data is provided in the right format
clf.fit(X, y, check_input=False)
X = check_array(X, order='F', dtype='float32')
clf.fit(X, y, check_input=True)
# Check that an error is raised if data is provided in the wrong format,
# because of check bypassing
assert_raises(ValueError, clf.fit, X, y, check_input=False)
# With no input checking, providing X in C order should result in false
# computation
X = check_array(X, order='C', dtype='float64')
clf.fit(X, y, check_input=False)
coef_false = clf.coef_
clf.fit(X, y, check_input=True)
coef_true = clf.coef_
assert_raises(AssertionError, assert_array_almost_equal,
coef_true, coef_false)
def test_overrided_gram_matrix():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
Gram = X.T.dot(X)
clf = ElasticNet(selection='cyclic', tol=1e-8, precompute=Gram,
fit_intercept=True)
assert_warns_message(UserWarning,
"Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
clf.fit, X, y)
| bsd-3-clause |
andim/scipydirect | examples/SH.py | 1 | 1031 | #!/usr/bin/python
"""
Solve the 2D Shubert function.
"""
from __future__ import division
from scipydirect import minimize
import numpy as np
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
from matplotlib import cm
def obj(x):
"""Two Dimensional Shubert Function"""
j = np.arange(1, 6)
tmp1 = np.dot(j, np.cos((j+1)*x[0] + j))
tmp2 = np.dot(j, np.cos((j+1)*x[1] + j))
return tmp1 * tmp2
if __name__ == '__main__':
bounds = [(-10, 10), (-10, 10)]
res = minimize(obj, bounds)
print(res)
# Plot the results.
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = res.x
X, Y = np.mgrid[x[0]-2:x[0]+2:50j, x[1]-2:x[1]+2:50j]
Z = np.zeros_like(X)
for i in range(X.size):
Z.ravel()[i] = obj([X.flatten()[i], Y.flatten()[i]])
ax.plot_wireframe(X, Y, Z, rstride=1, cstride=1, cmap=cm.jet)
ax.scatter(x[0], x[1], res.fun, c='r', marker='o')
ax.set_title('Two Dimensional Shubert Function')
plt.show()
| mit |
samyachour/EKG_Analysis | wave.py | 1 | 19645 | import pywt
import numpy as np
import pandas as pd
import scipy.io as sio
from biosppy.signals import ecg
import scipy
from detect_peaks import detect_peaks as detect_peaks_orig
def getRPeaks(data, sampling_rate=300.):
"""
R peak detection in 1 dimensional ECG wave
Parameters
----------
data : array_like
1-dimensional array with input signal data
Returns
-------
data : array_like
1_dimensional array with the indices of each peak
"""
out = ecg.ecg(data, sampling_rate=sampling_rate, show=False)
return out[2]
def discardNoise(data, winSize=100):
"""
Discarding sections of the input signal that are noisy
Parameters
----------
data : array_like
1-dimensional array with input signal data
winSize : int
size of the windows to keep or discard
Returns
-------
data : array_like
1-dimensional array with cleaned up signal data
"""
left_limit = 0
right_limit = winSize
dataSize = data.size
data = data.tolist()
residuals = []
while True:
if right_limit > dataSize: window = data[left_limit:]
else: window = data[left_limit:right_limit]
w = pywt.Wavelet('sym4')
levels = pywt.dwt_max_level(len(window), w)
if levels < 1:
break
residual = calculate_residuals(np.asarray(window), levels=levels)
residuals.append(((left_limit, right_limit),residual))
left_limit += winSize
right_limit += winSize
cleanData = []
mean = np.mean([i[1] for i in residuals])
std = np.std([i[1] for i in residuals])
for i in residuals:
val = i[1]
if val < mean + std and val > mean - std:
cleanData += data[i[0][0]:i[0][1]]
#plot.plot([i[1] for i in residuals], title="Residuals", yLab="Residual Stat", xLab=str(winSize) + " sized window")
return np.asarray(cleanData)
def omit(coeffs, omissions, stationary=False):
"""
coefficient omission
Parameters
----------
coeffs : array_like
Coefficients list [cAn, {details_level_n}, ... {details_level_1}]
omissions: tuple(list, bool), optional
List of DETAIL levels to omit, if bool is true omit cA
Returns
-------
nD array of reconstructed data.
"""
for i in omissions[0]:
coeffs[-i] = {k: np.zeros_like(v) for k, v in coeffs[-i].items()}
if omissions[1]: # If we want to exclude cA
coeffs[0] = np.zeros_like(coeffs[0])
return coeffs
def decomp(cA, wavelet, levels, mode='constant', omissions=([], False)):
"""
n-dimensional discrete wavelet decompisition and reconstruction
Parameters
----------
cA : array_like
n-dimensional array with input data.
wavelet : Wavelet object or name string
Wavelet to use.
levels : int
The number of decomposition steps to perform.
mode : string, optional
The mode of signal padding, defaults to constant
omissions: tuple(list, bool), optional
List of DETAIL levels to omit, if bool is true omit cA
Returns
-------
nD array of reconstructed data.
"""
if omissions[0] and max(omissions[0]) > levels:
raise ValueError("Omission level %d is too high. Maximum allowed is %d." % (max(omissions[0]), levels))
coeffs = pywt.wavedecn(cA, wavelet, level=levels, mode=mode)
coeffs = omit(coeffs, omissions)
return pywt.waverecn(coeffs, wavelet, mode=mode)
def filterSignalMexh(data, sampling_rate=300.0):
"""
bandpass filter using mexican hat hardcoded values from physionet
Parameters
----------
data : array_like
1-dimensional array with input data.
Returns
-------
1D array of filtered signal data.
"""
# from physionet sample2017
b1 = np.asarray([-7.757327341237223e-05, -2.357742589814283e-04, -6.689305101192819e-04, -0.001770119249103,
-0.004364327211358, -0.010013251577232, -0.021344241245400, -0.042182820580118,
-0.077080889653194, -0.129740392318591, -0.200064921294891, -0.280328573340852,
-0.352139052257134, -0.386867664739069, -0.351974030208595, -0.223363323458050,
0, 0.286427448595213, 0.574058766243311, 0.788100265785590, 0.867325070584078,
0.788100265785590, 0.574058766243311, 0.286427448595213, 0, -0.223363323458050,
-0.351974030208595, -0.386867664739069, -0.352139052257134, -0.280328573340852,
-0.200064921294891, -0.129740392318591, -0.077080889653194, -0.042182820580118,
-0.021344241245400, -0.010013251577232, -0.004364327211358, -0.001770119249103,
-6.689305101192819e-04, -2.357742589814283e-04, -7.757327341237223e-05])
secs = b1.size/sampling_rate # Number of seconds in signal X
samps = secs*250 # Number of samples to downsample to
b1 = scipy.signal.resample(b1,int(samps))
bpfecg = scipy.signal.filtfilt(b1,1,data)
return bpfecg
def filterSignalBios(data, sampling_rate=300.0):
"""
filter signal using biosppy
Parameters
----------
data : array_like
1-dimensional array with input data.
sampling_rate : float, optional
discrete sampling rate for the signal, physionet training is 300. (hz)
Returns
-------
1D array of filtered signal data.
"""
out = ecg.ecg(data, sampling_rate=sampling_rate, show=False)
return out[1]
def detect_peaks(x, plotX=np.array([]), mph=None, mpd=1, threshold=0, edge='rising',
kpsh=False, valley=False, show=False, ax=None):
"""
Wrapper function for detect_peaks function in detect_peaks.py
Detect peaks in data based on their amplitude and other features.
Parameters
----------
x : 1D array_like
data.
plotX : 1D array_like optional (default = x)
original signal you might want to plot detected peaks on, if you used wavelets or the like
mph : {None, number}, optional (default = None)
detect peaks that are greater than minimum peak height.
mpd : positive integer, optional (default = 1)
detect peaks that are at least separated by minimum peak distance (in
number of data).
threshold : positive number, optional (default = 0)
detect peaks (valleys) that are greater (smaller) than `threshold`
in relation to their immediate neighbors.
edge : {None, 'rising', 'falling', 'both'}, optional (default = 'rising')
for a flat peak, keep only the rising edge ('rising'), only the
falling edge ('falling'), both edges ('both'), or don't detect a
flat peak (None).
kpsh : bool, optional (default = False)
keep peaks with same height even if they are closer than `mpd`.
valley : bool, optional (default = False)
if True (1), detect valleys (local minima) instead of peaks.
show : bool, optional (default = False)
if True (1), plot data in matplotlib figure.
ax : a matplotlib.axes.Axes instance, optional (default = None).
Returns
-------
ind : 1D array_like
indices of the peaks in `x`.
Notes
-----
The detection of valleys instead of peaks is performed internally by simply
negating the data: `ind_valleys = detect_peaks(-x)`
The function can handle NaN's
See this IPython Notebook [1]_.
References
----------
.. [1] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb
"""
if plotX.size == 0:
plotX = x # couldn't do in function declaration
return detect_peaks_orig(x, plotX=plotX, mph=mph, mpd=mpd, threshold=threshold, edge=edge,
kpsh=kpsh, valley=valley, show=show, ax=ax)
def getPWaves(signal):
"""
P Wave detection
Parameters
----------
signal : Signal object
signal object from Signal class in signal.py
Returns
-------
data : array_like
1_dimensional array with the indices of each p peak
"""
maxesP = []
for i in range(0, len(signal.RPeaks) - 1):
left_limit = signal.RPeaks[i]
right_limit = signal.RPeaks[i+1]
left_limit = right_limit - (right_limit-left_limit)//3
plotData = signal.data[left_limit:right_limit]
peaks = detect_peaks(plotData, mpd=160) # super high mpd so it only gets the best peak
if peaks.size != 0:
maxesP.append(left_limit + peaks[0]) # need to convert to original signal coordinates
else:
maxesP.append(left_limit) # if we can't find a p wave peak,
# just grab the leftmost point in the window
# TODO: better default case?
return np.asarray(maxesP)
def getBaseline(signal):
"""
Baseline estimation
Parameters
----------
signal : Signal object
signal object from Signal class in signal.py
Returns
-------
Y value in mV of baseline, float
"""
baselineY = 0
trueBaselines = 0
for i in range(0, len(signal.RPeaks) - 1):
left_limit = signal.RPeaks[i]
right_limit = signal.RPeaks[i+1]
RRinterval = signal.data[left_limit:right_limit] # the indices for one rrinterval
innerPeaks = detect_peaks(RRinterval, edge='both', mpd=30) # peaks in between
for i in range(0, len(innerPeaks) - 1):
# between the first set of peaks
left_limit = innerPeaks[i]
right_limit = innerPeaks[i+1]
plotData = RRinterval[left_limit:right_limit]
mean = np.mean(plotData)
bottom_limit = mean - 0.04
top_limit = mean + 0.04
baseline = True
# if any points in the subinterval are out of the range 'mean +/- 0.04'
for i in plotData:
if i < bottom_limit or i > top_limit:
baseline = False
if baseline:
baselineY += mean
trueBaselines += 1
if trueBaselines > 0:
return baselineY/trueBaselines
else:
return np.mean(signal.data)
""" Helper functions """
def load(filename, path = '../Physionet_Challenge/training2017/'):
"""
Load signal data in .mat form
Parameters
----------
filename : String
The name of the .mat file
path : String, optional
The path to the file directory, defaults to physionet training data
Returns
-------
1D array of signal data.
"""
mat = sio.loadmat(path + filename + '.mat')
data = np.divide(mat['val'][0],1000)
return data
def getRecords(trainingLabel, _not=False, path='../Physionet_Challenge/training2017/REFERENCE.csv'): # N O A ~
"""
Get record names from a reference.csv
Parameters
----------
trainingLabel : String
The label you want to grab, N O A ~ or All
_not : Bool, optional
If you want to get everything _except_ the given training label, default False
path : String, optional
The path to the Reference.csv, default is the training2017 csv
Returns
-------
tuple of equally sized lists:
list of record names
list of record labels N O A ~
"""
reference = pd.read_csv(path, names = ["file", "answer"]) # N O A ~
if trainingLabel == 'All':
return (reference['file'].tolist(), reference['answer'].tolist())
if _not:
subset = reference.ix[reference['answer']!=trainingLabel]
return (subset['file'].tolist(), subset['answer'].tolist())
else:
subset = reference.ix[reference['answer']==trainingLabel]
return (subset['file'].tolist(), subset['answer'].tolist())
def partition(index, df):
"""
Helper function for getPartitionedRecords() function
Partitions a (subsetted) dataframe into training and testing
Parameters
----------
index : int 0-9
The partition section you want to grab for testing, 1 is first 1/10th, 2 is the second 1/10th, etc.
df : pandas dataframe
The dataframe of records you want to partition, should have 2 columns 'File' and 'Answer'
and be all of one class, i.e. all 'Answer's should be 'N'
Returns
-------
tuple of tuples:
tuple of equally sized lists:
list of record names for 10% testing data
list of record labels N O A ~ for 10% testing data
tuple of equally sized lists:
list of record names for 90% training data
list of record labels N O A ~ for 90% training data
"""
size = df.shape[0]
tenth = int(size * 0.1) # this is a 1/10th of the rows in the dataframe of records
section = index * tenth
# Grab the section index to 1/10th plus the seciton index
testing = (df['file'].tolist()[section:section + tenth],
df['answer'].tolist()[section:section + tenth])
# Grab the everything but the section->section + 1/10th subset
training = (df['file'].tolist()[0:section] + df['file'].tolist()[section + tenth:],
df['answer'].tolist()[0:section] + df['answer'].tolist()[section + tenth:])
return (testing, training)
def getPartitionedRecords(index, path='../Physionet_Challenge/training2017/REFERENCE.csv'): # N O A ~
"""
Partition all the training data while maintaining the ratios of each class
Parameters
----------
index : int 0-9
The partition section you want to grab for testing, 1 is first 1/10th, 2 is the second 1/10th, etc.
path : String, optional
The path to the Reference.csv, default is the training2017 csv
Returns
-------
tuple of tuples:
tuple of equally sized lists:
list of record names for 10% testing data
list of record labels N O A ~ for 10% testing data
tuple of equally sized lists:
list of record names for 90% training data
list of record labels N O A ~ for 90% training data
"""
if index < 0 or index > 9:
raise ValueError("Index %d is not available, can only partition 10 different ways. Index must be 0-9." % (index))
reference = pd.read_csv(path, names = ["file", "answer"]) # N O A ~
n = reference.ix[reference['answer'] == 'N']
n = partition(index, n)
o = reference.ix[reference['answer'] == 'O']
o = partition(index, o)
a = reference.ix[reference['answer'] == 'A']
a = partition(index, a)
p = reference.ix[reference['answer'] == '~']
p = partition(index, p)
tempTestRec = []
tempTestLab = []
tempTrainRec = []
tempTrainLab = []
for i in [n,o,a,p]:
tempTestRec += i[0][0]
tempTestLab += i[0][1]
tempTrainRec += i[1][0]
tempTrainLab += i[1][1]
return ((tempTestRec, tempTestLab),(tempTrainRec, tempTrainLab))
def interval(data):
"""
Calculate the intervals from a list
Parameters
----------
data : array_like
1-dimensional array with input data.
Returns
-------
intervals : array_like
an array of interval lengths
"""
return np.array([data[i+1] - data[i] for i in range(0, len(data)-1)])
def calculate_residuals(original, levels=5):
# calculate residuals for a single EKG
"""
Calculate the intervals from a list
Parameters
----------
original : array_like
the original signal
levels : int, optional
the number of wavelet levels you'd like to decompose to
Returns
-------
residual : float
the residual value
"""
rebuilt = decomp(original, wavelet='sym4', levels=levels, mode='symmetric', omissions=([1],False))
residual = sum(abs(original-rebuilt[:len(original)]))/len(original)
return residual
def diff_var(intervals, skip=2):
"""
This function calculate the variances for the differences between
each value and the value that is the specified number (skip)
of values next to it. eg. skip = 2 means the differences of one value
and the value with 2 positions next to it.
Parameters
----------
intervals :
the interval that we want to calculate
skip : int, optional
the number of position that we want the differences from
Returns
-------
the variances of the differences in the intervals
"""
diff = []
for i in range(0, len(intervals)-skip, skip):
per_diff= intervals[i]-intervals[i+skip]
diff.append(per_diff)
diff = np.array(diff)
return np.var(diff)
def interval_bin(intervals, mid_bin_range):
"""
This function calculate the percentage of intervals that fall
in certain bins
Parameters
----------
intervals : array_like
array of interval lengths
mid_bin_range: tuple, optional
edge values for middle bin, defaults to normal record edges
Returns
-------
feat_list : tuple
tuple of bin values as decimal percentages (i.e. 0.2, 0.6, 0.2)
(
percentage intervals below mid_bin_range[0],
percentage intervals between mid_bin_range[0] and mid_bin_range[1],
percentage intervals above mid_bin_range[1]
)
"""
if len(intervals)==0:
print('RR interval == 0')
return [0,0,0]
n_below = 0.0
n_in = 0.0
n_higher = 0.0
for interval in intervals:
if interval < mid_bin_range[0]:
n_below += 1
elif interval <= mid_bin_range[1]:
n_in += 1
else:
n_higher +=1
feat_list = (n_below/len(intervals), n_in/len(intervals), n_higher/len(intervals))
return feat_list
def cal_stats(data):
"""
Generate statistics for the data given
Parameters
----------
data : array_like
1-dimensional array with input data.
Returns
-------
Array of summary statistics
"""
power = np.square(data)
return np.asarray([
np.amin(data),
np.amax(data),
np.mean(data),
np.std(data),
np.var(data),
np.average(power),
np.mean(np.absolute(data))
])
def stats_feat(coeffs):
"""
Generate stats for wavelet coeffcients
Parameters
----------
coeffs: list
the wavelet coeffcients with the format [cA, {d:cDn},...,{d:cD1}]
usually returned from pywt.wavedecn
Returns
-------
Array of summary statistics for all coefficients
"""
#calculate the stats from the coefficients
features = np.array([])
features = np.append(features, cal_stats(coeffs[0]))
for i in range(1,len(coeffs)):
features = np.append(features, cal_stats(coeffs[i]['d']))
return features
| gpl-3.0 |
glennq/scikit-learn | sklearn/linear_model/passive_aggressive.py | 28 | 11542 | # Authors: Rob Zinkov, Mathieu Blondel
# License: BSD 3 clause
from .stochastic_gradient import BaseSGDClassifier
from .stochastic_gradient import BaseSGDRegressor
from .stochastic_gradient import DEFAULT_EPSILON
class PassiveAggressiveClassifier(BaseSGDClassifier):
"""Passive Aggressive Classifier
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
fit_intercept : bool, default=False
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
loss : string, optional
The loss function to be used:
hinge: equivalent to PA-I in the reference paper.
squared_hinge: equivalent to PA-II in the reference paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
.. versionadded:: 0.17
parameter *class_weight* to automatically weight samples.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
.. versionadded:: 0.19
parameter *average* to use weights averaging in SGD
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDClassifier
Perceptron
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="hinge", n_jobs=1, random_state=None,
warm_start=False, class_weight=None, average=False):
super(PassiveAggressiveClassifier, self).__init__(
penalty=None,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
eta0=1.0,
warm_start=warm_start,
class_weight=class_weight,
average=average,
n_jobs=n_jobs)
self.C = C
self.loss = loss
def partial_fit(self, X, y, classes=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of the training data
y : numpy array of shape [n_samples]
Subset of the target values
classes : array, shape = [n_classes]
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight == 'balanced':
raise ValueError("class_weight 'balanced' is not supported for "
"partial_fit. For 'balanced' weights, use "
"`sklearn.utils.compute_class_weight` with "
"`class_weight='balanced'`. In place of y you "
"can use a large enough subset of the full "
"training set target to properly estimate the "
"class frequency distributions. Pass the "
"resulting weights as the class_weight "
"parameter.")
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr, n_iter=1,
classes=classes, sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_classes,n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [n_classes]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr,
coef_init=coef_init, intercept_init=intercept_init)
class PassiveAggressiveRegressor(BaseSGDRegressor):
"""Passive Aggressive Regressor
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
epsilon : float
If the difference between the current prediction and the correct label
is below this threshold, the model is not updated.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
loss : string, optional
The loss function to be used:
epsilon_insensitive: equivalent to PA-I in the reference paper.
squared_epsilon_insensitive: equivalent to PA-II in the reference
paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
.. versionadded:: 0.19
parameter *average* to use weights averaging in SGD
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDRegressor
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="epsilon_insensitive",
epsilon=DEFAULT_EPSILON, random_state=None, warm_start=False,
average=False):
super(PassiveAggressiveRegressor, self).__init__(
penalty=None,
l1_ratio=0,
epsilon=epsilon,
eta0=1.0,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
warm_start=warm_start,
average=average)
self.C = C
self.loss = loss
def partial_fit(self, X, y):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of training data
y : numpy array of shape [n_samples]
Subset of target values
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr, n_iter=1,
sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [1]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr,
coef_init=coef_init,
intercept_init=intercept_init)
| bsd-3-clause |
thomaslima/PySpice | PySpice/Probe/Plot.py | 1 | 1745 | ####################################################################################################
#
# PySpice - A Spice Package for Python
# Copyright (C) 2014 Fabrice Salvaire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
####################################################################################################
####################################################################################################
import matplotlib.pyplot as plt
####################################################################################################
def plot(waveform, *args, **kwargs):
"""Plot a waveform using the current Axes instance or the one specified by the *axis* key
argument. Additional parameters are passed to the Matplotlib plot function.
"""
axis = kwargs.get('axis', plt.gca())
if 'axis' in kwargs:
del kwargs['axis']
axis.plot(waveform.abscissa, waveform, *args, **kwargs)
####################################################################################################
#
# End
#
####################################################################################################
| gpl-3.0 |
RobertABT/heightmap | build/matplotlib/examples/event_handling/poly_editor.py | 6 | 5377 | """
This is an example to show how to build cross-GUI applications using
matplotlib event handling to interact with objects on the canvas
"""
import numpy as np
from matplotlib.lines import Line2D
from matplotlib.artist import Artist
from matplotlib.mlab import dist_point_to_segment
class PolygonInteractor:
"""
An polygon editor.
Key-bindings
't' toggle vertex markers on and off. When vertex markers are on,
you can move them, delete them
'd' delete the vertex under point
'i' insert a vertex at point. You must be within epsilon of the
line connecting two existing vertices
"""
showverts = True
epsilon = 5 # max pixel distance to count as a vertex hit
def __init__(self, ax, poly):
if poly.figure is None:
raise RuntimeError('You must first add the polygon to a figure or canvas before defining the interactor')
self.ax = ax
canvas = poly.figure.canvas
self.poly = poly
x, y = zip(*self.poly.xy)
self.line = Line2D(x, y, marker='o', markerfacecolor='r', animated=True)
self.ax.add_line(self.line)
#self._update_line(poly)
cid = self.poly.add_callback(self.poly_changed)
self._ind = None # the active vert
canvas.mpl_connect('draw_event', self.draw_callback)
canvas.mpl_connect('button_press_event', self.button_press_callback)
canvas.mpl_connect('key_press_event', self.key_press_callback)
canvas.mpl_connect('button_release_event', self.button_release_callback)
canvas.mpl_connect('motion_notify_event', self.motion_notify_callback)
self.canvas = canvas
def draw_callback(self, event):
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
self.ax.draw_artist(self.poly)
self.ax.draw_artist(self.line)
self.canvas.blit(self.ax.bbox)
def poly_changed(self, poly):
'this method is called whenever the polygon object is called'
# only copy the artist props to the line (except visibility)
vis = self.line.get_visible()
Artist.update_from(self.line, poly)
self.line.set_visible(vis) # don't use the poly visibility state
def get_ind_under_point(self, event):
'get the index of the vertex under point if within epsilon tolerance'
# display coords
xy = np.asarray(self.poly.xy)
xyt = self.poly.get_transform().transform(xy)
xt, yt = xyt[:, 0], xyt[:, 1]
d = np.sqrt((xt-event.x)**2 + (yt-event.y)**2)
indseq = np.nonzero(np.equal(d, np.amin(d)))[0]
ind = indseq[0]
if d[ind]>=self.epsilon:
ind = None
return ind
def button_press_callback(self, event):
'whenever a mouse button is pressed'
if not self.showverts: return
if event.inaxes==None: return
if event.button != 1: return
self._ind = self.get_ind_under_point(event)
def button_release_callback(self, event):
'whenever a mouse button is released'
if not self.showverts: return
if event.button != 1: return
self._ind = None
def key_press_callback(self, event):
'whenever a key is pressed'
if not event.inaxes: return
if event.key=='t':
self.showverts = not self.showverts
self.line.set_visible(self.showverts)
if not self.showverts: self._ind = None
elif event.key=='d':
ind = self.get_ind_under_point(event)
if ind is not None:
self.poly.xy = [tup for i,tup in enumerate(self.poly.xy) if i!=ind]
self.line.set_data(zip(*self.poly.xy))
elif event.key=='i':
xys = self.poly.get_transform().transform(self.poly.xy)
p = event.x, event.y # display coords
for i in range(len(xys)-1):
s0 = xys[i]
s1 = xys[i+1]
d = dist_point_to_segment(p, s0, s1)
if d<=self.epsilon:
self.poly.xy = np.array(
list(self.poly.xy[:i]) +
[(event.xdata, event.ydata)] +
list(self.poly.xy[i:]))
self.line.set_data(zip(*self.poly.xy))
break
self.canvas.draw()
def motion_notify_callback(self, event):
'on mouse movement'
if not self.showverts: return
if self._ind is None: return
if event.inaxes is None: return
if event.button != 1: return
x,y = event.xdata, event.ydata
self.poly.xy[self._ind] = x,y
self.line.set_data(zip(*self.poly.xy))
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.poly)
self.ax.draw_artist(self.line)
self.canvas.blit(self.ax.bbox)
if __name__ == '__main__':
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
theta = np.arange(0, 2*np.pi, 0.1)
r = 1.5
xs = r*np.cos(theta)
ys = r*np.sin(theta)
poly = Polygon(list(zip(xs, ys)), animated=True)
fig, ax = plt.subplots()
ax.add_patch(poly)
p = PolygonInteractor(ax, poly)
#ax.add_line(p.line)
ax.set_title('Click and drag a point to move it')
ax.set_xlim((-2,2))
ax.set_ylim((-2,2))
plt.show()
| mit |
chetan51/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/ticker.py | 69 | 37420 | """
Tick locating and formatting
============================
This module contains classes to support completely configurable tick
locating and formatting. Although the locators know nothing about
major or minor ticks, they are used by the Axis class to support major
and minor tick locating and formatting. Generic tick locators and
formatters are provided, as well as domain specific custom ones..
Tick locating
-------------
The Locator class is the base class for all tick locators. The
locators handle autoscaling of the view limits based on the data
limits, and the choosing of tick locations. A useful semi-automatic
tick locator is MultipleLocator. You initialize this with a base, eg
10, and it picks axis limits and ticks that are multiples of your
base.
The Locator subclasses defined here are
:class:`NullLocator`
No ticks
:class:`FixedLocator`
Tick locations are fixed
:class:`IndexLocator`
locator for index plots (eg. where x = range(len(y)))
:class:`LinearLocator`
evenly spaced ticks from min to max
:class:`LogLocator`
logarithmically ticks from min to max
:class:`MultipleLocator`
ticks and range are a multiple of base;
either integer or float
:class:`OldAutoLocator`
choose a MultipleLocator and dyamically reassign it for
intelligent ticking during navigation
:class:`MaxNLocator`
finds up to a max number of ticks at nice locations
:class:`AutoLocator`
:class:`MaxNLocator` with simple defaults. This is the default
tick locator for most plotting.
There are a number of locators specialized for date locations - see
the dates module
You can define your own locator by deriving from Locator. You must
override the __call__ method, which returns a sequence of locations,
and you will probably want to override the autoscale method to set the
view limits from the data limits.
If you want to override the default locator, use one of the above or a
custom locator and pass it to the x or y axis instance. The relevant
methods are::
ax.xaxis.set_major_locator( xmajorLocator )
ax.xaxis.set_minor_locator( xminorLocator )
ax.yaxis.set_major_locator( ymajorLocator )
ax.yaxis.set_minor_locator( yminorLocator )
The default minor locator is the NullLocator, eg no minor ticks on by
default.
Tick formatting
---------------
Tick formatting is controlled by classes derived from Formatter. The
formatter operates on a single tick value and returns a string to the
axis.
:class:`NullFormatter`
no labels on the ticks
:class:`FixedFormatter`
set the strings manually for the labels
:class:`FuncFormatter`
user defined function sets the labels
:class:`FormatStrFormatter`
use a sprintf format string
:class:`ScalarFormatter`
default formatter for scalars; autopick the fmt string
:class:`LogFormatter`
formatter for log axes
You can derive your own formatter from the Formatter base class by
simply overriding the ``__call__`` method. The formatter class has access
to the axis view and data limits.
To control the major and minor tick label formats, use one of the
following methods::
ax.xaxis.set_major_formatter( xmajorFormatter )
ax.xaxis.set_minor_formatter( xminorFormatter )
ax.yaxis.set_major_formatter( ymajorFormatter )
ax.yaxis.set_minor_formatter( yminorFormatter )
See :ref:`pylab_examples-major_minor_demo1` for an example of setting
major an minor ticks. See the :mod:`matplotlib.dates` module for
more information and examples of using date locators and formatters.
"""
from __future__ import division
import math
import numpy as np
from matplotlib import rcParams
from matplotlib import cbook
from matplotlib import transforms as mtransforms
class TickHelper:
axis = None
class DummyAxis:
def __init__(self):
self.dataLim = mtransforms.Bbox.unit()
self.viewLim = mtransforms.Bbox.unit()
def get_view_interval(self):
return self.viewLim.intervalx
def set_view_interval(self, vmin, vmax):
self.viewLim.intervalx = vmin, vmax
def get_data_interval(self):
return self.dataLim.intervalx
def set_data_interval(self, vmin, vmax):
self.dataLim.intervalx = vmin, vmax
def set_axis(self, axis):
self.axis = axis
def create_dummy_axis(self):
if self.axis is None:
self.axis = self.DummyAxis()
def set_view_interval(self, vmin, vmax):
self.axis.set_view_interval(vmin, vmax)
def set_data_interval(self, vmin, vmax):
self.axis.set_data_interval(vmin, vmax)
def set_bounds(self, vmin, vmax):
self.set_view_interval(vmin, vmax)
self.set_data_interval(vmin, vmax)
class Formatter(TickHelper):
"""
Convert the tick location to a string
"""
# some classes want to see all the locs to help format
# individual ones
locs = []
def __call__(self, x, pos=None):
'Return the format for tick val x at position pos; pos=None indicated unspecified'
raise NotImplementedError('Derived must overide')
def format_data(self,value):
return self.__call__(value)
def format_data_short(self,value):
'return a short string version'
return self.format_data(value)
def get_offset(self):
return ''
def set_locs(self, locs):
self.locs = locs
def fix_minus(self, s):
"""
some classes may want to replace a hyphen for minus with the
proper unicode symbol as described `here
<http://sourceforge.net/tracker/index.php?func=detail&aid=1962574&group_id=80706&atid=560720>`_.
The default is to do nothing
Note, if you use this method, eg in :meth`format_data` or
call, you probably don't want to use it for
:meth:`format_data_short` since the toolbar uses this for
interative coord reporting and I doubt we can expect GUIs
across platforms will handle the unicode correctly. So for
now the classes that override :meth:`fix_minus` should have an
explicit :meth:`format_data_short` method
"""
return s
class NullFormatter(Formatter):
'Always return the empty string'
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
return ''
class FixedFormatter(Formatter):
'Return fixed strings for tick labels'
def __init__(self, seq):
"""
seq is a sequence of strings. For positions `i<len(seq)` return
*seq[i]* regardless of *x*. Otherwise return ''
"""
self.seq = seq
self.offset_string = ''
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
if pos is None or pos>=len(self.seq): return ''
else: return self.seq[pos]
def get_offset(self):
return self.offset_string
def set_offset_string(self, ofs):
self.offset_string = ofs
class FuncFormatter(Formatter):
"""
User defined function for formatting
"""
def __init__(self, func):
self.func = func
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
return self.func(x, pos)
class FormatStrFormatter(Formatter):
"""
Use a format string to format the tick
"""
def __init__(self, fmt):
self.fmt = fmt
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
return self.fmt % x
class OldScalarFormatter(Formatter):
"""
Tick location is a plain old number.
"""
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
xmin, xmax = self.axis.get_view_interval()
d = abs(xmax - xmin)
return self.pprint_val(x,d)
def pprint_val(self, x, d):
#if the number is not too big and it's an int, format it as an
#int
if abs(x)<1e4 and x==int(x): return '%d' % x
if d < 1e-2: fmt = '%1.3e'
elif d < 1e-1: fmt = '%1.3f'
elif d > 1e5: fmt = '%1.1e'
elif d > 10 : fmt = '%1.1f'
elif d > 1 : fmt = '%1.2f'
else: fmt = '%1.3f'
s = fmt % x
#print d, x, fmt, s
tup = s.split('e')
if len(tup)==2:
mantissa = tup[0].rstrip('0').rstrip('.')
sign = tup[1][0].replace('+', '')
exponent = tup[1][1:].lstrip('0')
s = '%se%s%s' %(mantissa, sign, exponent)
else:
s = s.rstrip('0').rstrip('.')
return s
class ScalarFormatter(Formatter):
"""
Tick location is a plain old number. If useOffset==True and the data range
is much smaller than the data average, then an offset will be determined
such that the tick labels are meaningful. Scientific notation is used for
data < 1e-3 or data >= 1e4.
"""
def __init__(self, useOffset=True, useMathText=False):
# useOffset allows plotting small data ranges with large offsets:
# for example: [1+1e-9,1+2e-9,1+3e-9]
# useMathText will render the offset and scientific notation in mathtext
self._useOffset = useOffset
self._usetex = rcParams['text.usetex']
self._useMathText = useMathText
self.offset = 0
self.orderOfMagnitude = 0
self.format = ''
self._scientific = True
self._powerlimits = rcParams['axes.formatter.limits']
def fix_minus(self, s):
'use a unicode minus rather than hyphen'
if rcParams['text.usetex'] or not rcParams['axes.unicode_minus']: return s
else: return s.replace('-', u'\u2212')
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
if len(self.locs)==0:
return ''
else:
s = self.pprint_val(x)
return self.fix_minus(s)
def set_scientific(self, b):
'''True or False to turn scientific notation on or off
see also :meth:`set_powerlimits`
'''
self._scientific = bool(b)
def set_powerlimits(self, lims):
'''
Sets size thresholds for scientific notation.
e.g. ``xaxis.set_powerlimits((-3, 4))`` sets the pre-2007 default in
which scientific notation is used for numbers less than
1e-3 or greater than 1e4.
See also :meth:`set_scientific`.
'''
assert len(lims) == 2, "argument must be a sequence of length 2"
self._powerlimits = lims
def format_data_short(self,value):
'return a short formatted string representation of a number'
return '%1.3g'%value
def format_data(self,value):
'return a formatted string representation of a number'
s = self._formatSciNotation('%1.10e'% value)
return self.fix_minus(s)
def get_offset(self):
"""Return scientific notation, plus offset"""
if len(self.locs)==0: return ''
s = ''
if self.orderOfMagnitude or self.offset:
offsetStr = ''
sciNotStr = ''
if self.offset:
offsetStr = self.format_data(self.offset)
if self.offset > 0: offsetStr = '+' + offsetStr
if self.orderOfMagnitude:
if self._usetex or self._useMathText:
sciNotStr = self.format_data(10**self.orderOfMagnitude)
else:
sciNotStr = '1e%d'% self.orderOfMagnitude
if self._useMathText:
if sciNotStr != '':
sciNotStr = r'\times\mathdefault{%s}' % sciNotStr
s = ''.join(('$',sciNotStr,r'\mathdefault{',offsetStr,'}$'))
elif self._usetex:
if sciNotStr != '':
sciNotStr = r'\times%s' % sciNotStr
s = ''.join(('$',sciNotStr,offsetStr,'$'))
else:
s = ''.join((sciNotStr,offsetStr))
return self.fix_minus(s)
def set_locs(self, locs):
'set the locations of the ticks'
self.locs = locs
if len(self.locs) > 0:
vmin, vmax = self.axis.get_view_interval()
d = abs(vmax-vmin)
if self._useOffset: self._set_offset(d)
self._set_orderOfMagnitude(d)
self._set_format()
def _set_offset(self, range):
# offset of 20,001 is 20,000, for example
locs = self.locs
if locs is None or not len(locs) or range == 0:
self.offset = 0
return
ave_loc = np.mean(locs)
if ave_loc: # dont want to take log10(0)
ave_oom = math.floor(math.log10(np.mean(np.absolute(locs))))
range_oom = math.floor(math.log10(range))
if np.absolute(ave_oom-range_oom) >= 3: # four sig-figs
if ave_loc < 0:
self.offset = math.ceil(np.max(locs)/10**range_oom)*10**range_oom
else:
self.offset = math.floor(np.min(locs)/10**(range_oom))*10**(range_oom)
else: self.offset = 0
def _set_orderOfMagnitude(self,range):
# if scientific notation is to be used, find the appropriate exponent
# if using an numerical offset, find the exponent after applying the offset
if not self._scientific:
self.orderOfMagnitude = 0
return
locs = np.absolute(self.locs)
if self.offset: oom = math.floor(math.log10(range))
else:
if locs[0] > locs[-1]: val = locs[0]
else: val = locs[-1]
if val == 0: oom = 0
else: oom = math.floor(math.log10(val))
if oom <= self._powerlimits[0]:
self.orderOfMagnitude = oom
elif oom >= self._powerlimits[1]:
self.orderOfMagnitude = oom
else:
self.orderOfMagnitude = 0
def _set_format(self):
# set the format string to format all the ticklabels
# The floating point black magic (adding 1e-15 and formatting
# to 8 digits) may warrant review and cleanup.
locs = (np.asarray(self.locs)-self.offset) / 10**self.orderOfMagnitude+1e-15
sigfigs = [len(str('%1.8f'% loc).split('.')[1].rstrip('0')) \
for loc in locs]
sigfigs.sort()
self.format = '%1.' + str(sigfigs[-1]) + 'f'
if self._usetex:
self.format = '$%s$' % self.format
elif self._useMathText:
self.format = '$\mathdefault{%s}$' % self.format
def pprint_val(self, x):
xp = (x-self.offset)/10**self.orderOfMagnitude
if np.absolute(xp) < 1e-8: xp = 0
return self.format % xp
def _formatSciNotation(self, s):
# transform 1e+004 into 1e4, for example
tup = s.split('e')
try:
significand = tup[0].rstrip('0').rstrip('.')
sign = tup[1][0].replace('+', '')
exponent = tup[1][1:].lstrip('0')
if self._useMathText or self._usetex:
if significand == '1':
# reformat 1x10^y as 10^y
significand = ''
if exponent:
exponent = '10^{%s%s}'%(sign, exponent)
if significand and exponent:
return r'%s{\times}%s'%(significand, exponent)
else:
return r'%s%s'%(significand, exponent)
else:
s = ('%se%s%s' %(significand, sign, exponent)).rstrip('e')
return s
except IndexError, msg:
return s
class LogFormatter(Formatter):
"""
Format values for log axis;
if attribute *decadeOnly* is True, only the decades will be labelled.
"""
def __init__(self, base=10.0, labelOnlyBase = True):
"""
*base* is used to locate the decade tick,
which will be the only one to be labeled if *labelOnlyBase*
is ``False``
"""
self._base = base+0.0
self.labelOnlyBase=labelOnlyBase
self.decadeOnly = True
def base(self,base):
'change the *base* for labeling - warning: should always match the base used for :class:`LogLocator`'
self._base=base
def label_minor(self,labelOnlyBase):
'switch on/off minor ticks labeling'
self.labelOnlyBase=labelOnlyBase
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
vmin, vmax = self.axis.get_view_interval()
d = abs(vmax - vmin)
b=self._base
if x == 0.0:
return '0'
sign = np.sign(x)
# only label the decades
fx = math.log(abs(x))/math.log(b)
isDecade = self.is_decade(fx)
if not isDecade and self.labelOnlyBase: s = ''
elif x>10000: s= '%1.0e'%x
elif x<1: s = '%1.0e'%x
else : s = self.pprint_val(x,d)
if sign == -1:
s = '-%s' % s
return self.fix_minus(s)
def format_data(self,value):
self.labelOnlyBase = False
value = cbook.strip_math(self.__call__(value))
self.labelOnlyBase = True
return value
def format_data_short(self,value):
'return a short formatted string representation of a number'
return '%1.3g'%value
def is_decade(self, x):
n = self.nearest_long(x)
return abs(x-n)<1e-10
def nearest_long(self, x):
if x==0: return 0L
elif x>0: return long(x+0.5)
else: return long(x-0.5)
def pprint_val(self, x, d):
#if the number is not too big and it's an int, format it as an
#int
if abs(x)<1e4 and x==int(x): return '%d' % x
if d < 1e-2: fmt = '%1.3e'
elif d < 1e-1: fmt = '%1.3f'
elif d > 1e5: fmt = '%1.1e'
elif d > 10 : fmt = '%1.1f'
elif d > 1 : fmt = '%1.2f'
else: fmt = '%1.3f'
s = fmt % x
#print d, x, fmt, s
tup = s.split('e')
if len(tup)==2:
mantissa = tup[0].rstrip('0').rstrip('.')
sign = tup[1][0].replace('+', '')
exponent = tup[1][1:].lstrip('0')
s = '%se%s%s' %(mantissa, sign, exponent)
else:
s = s.rstrip('0').rstrip('.')
return s
class LogFormatterExponent(LogFormatter):
"""
Format values for log axis; using ``exponent = log_base(value)``
"""
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05)
d = abs(vmax-vmin)
b=self._base
if x == 0:
return '0'
sign = np.sign(x)
# only label the decades
fx = math.log(abs(x))/math.log(b)
isDecade = self.is_decade(fx)
if not isDecade and self.labelOnlyBase: s = ''
#if 0: pass
elif fx>10000: s= '%1.0e'%fx
#elif x<1: s = '$10^{%d}$'%fx
#elif x<1: s = '10^%d'%fx
elif fx<1: s = '%1.0e'%fx
else : s = self.pprint_val(fx,d)
if sign == -1:
s = '-%s' % s
return self.fix_minus(s)
class LogFormatterMathtext(LogFormatter):
"""
Format values for log axis; using ``exponent = log_base(value)``
"""
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
b = self._base
# only label the decades
if x == 0:
return '$0$'
sign = np.sign(x)
fx = math.log(abs(x))/math.log(b)
isDecade = self.is_decade(fx)
usetex = rcParams['text.usetex']
if sign == -1:
sign_string = '-'
else:
sign_string = ''
if not isDecade and self.labelOnlyBase: s = ''
elif not isDecade:
if usetex:
s = r'$%s%d^{%.2f}$'% (sign_string, b, fx)
else:
s = '$\mathdefault{%s%d^{%.2f}}$'% (sign_string, b, fx)
else:
if usetex:
s = r'$%s%d^{%d}$'% (sign_string, b, self.nearest_long(fx))
else:
s = r'$\mathdefault{%s%d^{%d}}$'% (sign_string, b, self.nearest_long(fx))
return s
class Locator(TickHelper):
"""
Determine the tick locations;
Note, you should not use the same locator between different :class:`~matplotlib.axis.Axis`
because the locator stores references to the Axis data and view
limits
"""
def __call__(self):
'Return the locations of the ticks'
raise NotImplementedError('Derived must override')
def view_limits(self, vmin, vmax):
"""
select a scale for the range from vmin to vmax
Normally This will be overridden.
"""
return mtransforms.nonsingular(vmin, vmax)
def autoscale(self):
'autoscale the view limits'
return self.view_limits(*self.axis.get_view_interval())
def pan(self, numsteps):
'Pan numticks (can be positive or negative)'
ticks = self()
numticks = len(ticks)
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05)
if numticks>2:
step = numsteps*abs(ticks[0]-ticks[1])
else:
d = abs(vmax-vmin)
step = numsteps*d/6.
vmin += step
vmax += step
self.axis.set_view_interval(vmin, vmax, ignore=True)
def zoom(self, direction):
"Zoom in/out on axis; if direction is >0 zoom in, else zoom out"
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05)
interval = abs(vmax-vmin)
step = 0.1*interval*direction
self.axis.set_view_interval(vmin + step, vmax - step, ignore=True)
def refresh(self):
'refresh internal information based on current lim'
pass
class IndexLocator(Locator):
"""
Place a tick on every multiple of some base number of points
plotted, eg on every 5th point. It is assumed that you are doing
index plotting; ie the axis is 0, len(data). This is mainly
useful for x ticks.
"""
def __init__(self, base, offset):
'place ticks on the i-th data points where (i-offset)%base==0'
self._base = base
self.offset = offset
def __call__(self):
'Return the locations of the ticks'
dmin, dmax = self.axis.get_data_interval()
return np.arange(dmin + self.offset, dmax+1, self._base)
class FixedLocator(Locator):
"""
Tick locations are fixed. If nbins is not None,
the array of possible positions will be subsampled to
keep the number of ticks <= nbins +1.
"""
def __init__(self, locs, nbins=None):
self.locs = locs
self.nbins = nbins
if self.nbins is not None:
self.nbins = max(self.nbins, 2)
def __call__(self):
'Return the locations of the ticks'
if self.nbins is None:
return self.locs
step = max(int(0.99 + len(self.locs) / float(self.nbins)), 1)
return self.locs[::step]
class NullLocator(Locator):
"""
No ticks
"""
def __call__(self):
'Return the locations of the ticks'
return []
class LinearLocator(Locator):
"""
Determine the tick locations
The first time this function is called it will try to set the
number of ticks to make a nice tick partitioning. Thereafter the
number of ticks will be fixed so that interactive navigation will
be nice
"""
def __init__(self, numticks = None, presets=None):
"""
Use presets to set locs based on lom. A dict mapping vmin, vmax->locs
"""
self.numticks = numticks
if presets is None:
self.presets = {}
else:
self.presets = presets
def __call__(self):
'Return the locations of the ticks'
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05)
if vmax<vmin:
vmin, vmax = vmax, vmin
if (vmin, vmax) in self.presets:
return self.presets[(vmin, vmax)]
if self.numticks is None:
self._set_numticks()
if self.numticks==0: return []
ticklocs = np.linspace(vmin, vmax, self.numticks)
return ticklocs
def _set_numticks(self):
self.numticks = 11 # todo; be smart here; this is just for dev
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
if vmax<vmin:
vmin, vmax = vmax, vmin
if vmin==vmax:
vmin-=1
vmax+=1
exponent, remainder = divmod(math.log10(vmax - vmin), 1)
if remainder < 0.5:
exponent -= 1
scale = 10**(-exponent)
vmin = math.floor(scale*vmin)/scale
vmax = math.ceil(scale*vmax)/scale
return mtransforms.nonsingular(vmin, vmax)
def closeto(x,y):
if abs(x-y)<1e-10: return True
else: return False
class Base:
'this solution has some hacks to deal with floating point inaccuracies'
def __init__(self, base):
assert(base>0)
self._base = base
def lt(self, x):
'return the largest multiple of base < x'
d,m = divmod(x, self._base)
if closeto(m,0) and not closeto(m/self._base,1):
return (d-1)*self._base
return d*self._base
def le(self, x):
'return the largest multiple of base <= x'
d,m = divmod(x, self._base)
if closeto(m/self._base,1): # was closeto(m, self._base)
#looks like floating point error
return (d+1)*self._base
return d*self._base
def gt(self, x):
'return the smallest multiple of base > x'
d,m = divmod(x, self._base)
if closeto(m/self._base,1):
#looks like floating point error
return (d+2)*self._base
return (d+1)*self._base
def ge(self, x):
'return the smallest multiple of base >= x'
d,m = divmod(x, self._base)
if closeto(m,0) and not closeto(m/self._base,1):
return d*self._base
return (d+1)*self._base
def get_base(self):
return self._base
class MultipleLocator(Locator):
"""
Set a tick on every integer that is multiple of base in the
view interval
"""
def __init__(self, base=1.0):
self._base = Base(base)
def __call__(self):
'Return the locations of the ticks'
vmin, vmax = self.axis.get_view_interval()
if vmax<vmin:
vmin, vmax = vmax, vmin
vmin = self._base.ge(vmin)
base = self._base.get_base()
n = (vmax - vmin + 0.001*base)//base
locs = vmin + np.arange(n+1) * base
return locs
def view_limits(self, dmin, dmax):
"""
Set the view limits to the nearest multiples of base that
contain the data
"""
vmin = self._base.le(dmin)
vmax = self._base.ge(dmax)
if vmin==vmax:
vmin -=1
vmax +=1
return mtransforms.nonsingular(vmin, vmax)
def scale_range(vmin, vmax, n = 1, threshold=100):
dv = abs(vmax - vmin)
maxabsv = max(abs(vmin), abs(vmax))
if maxabsv == 0 or dv/maxabsv < 1e-12:
return 1.0, 0.0
meanv = 0.5*(vmax+vmin)
if abs(meanv)/dv < threshold:
offset = 0
elif meanv > 0:
ex = divmod(math.log10(meanv), 1)[0]
offset = 10**ex
else:
ex = divmod(math.log10(-meanv), 1)[0]
offset = -10**ex
ex = divmod(math.log10(dv/n), 1)[0]
scale = 10**ex
return scale, offset
class MaxNLocator(Locator):
"""
Select no more than N intervals at nice locations.
"""
def __init__(self, nbins = 10, steps = None,
trim = True,
integer=False,
symmetric=False):
self._nbins = int(nbins)
self._trim = trim
self._integer = integer
self._symmetric = symmetric
if steps is None:
self._steps = [1, 1.5, 2, 2.5, 3, 4, 5, 6, 8, 10]
else:
if int(steps[-1]) != 10:
steps = list(steps)
steps.append(10)
self._steps = steps
if integer:
self._steps = [n for n in self._steps if divmod(n,1)[1] < 0.001]
def bin_boundaries(self, vmin, vmax):
nbins = self._nbins
scale, offset = scale_range(vmin, vmax, nbins)
if self._integer:
scale = max(1, scale)
vmin -= offset
vmax -= offset
raw_step = (vmax-vmin)/nbins
scaled_raw_step = raw_step/scale
for step in self._steps:
if step < scaled_raw_step:
continue
step *= scale
best_vmin = step*divmod(vmin, step)[0]
best_vmax = best_vmin + step*nbins
if (best_vmax >= vmax):
break
if self._trim:
extra_bins = int(divmod((best_vmax - vmax), step)[0])
nbins -= extra_bins
return (np.arange(nbins+1) * step + best_vmin + offset)
def __call__(self):
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05)
return self.bin_boundaries(vmin, vmax)
def view_limits(self, dmin, dmax):
if self._symmetric:
maxabs = max(abs(dmin), abs(dmax))
dmin = -maxabs
dmax = maxabs
dmin, dmax = mtransforms.nonsingular(dmin, dmax, expander = 0.05)
return np.take(self.bin_boundaries(dmin, dmax), [0,-1])
def decade_down(x, base=10):
'floor x to the nearest lower decade'
lx = math.floor(math.log(x)/math.log(base))
return base**lx
def decade_up(x, base=10):
'ceil x to the nearest higher decade'
lx = math.ceil(math.log(x)/math.log(base))
return base**lx
def is_decade(x,base=10):
lx = math.log(x)/math.log(base)
return lx==int(lx)
class LogLocator(Locator):
"""
Determine the tick locations for log axes
"""
def __init__(self, base=10.0, subs=[1.0]):
"""
place ticks on the location= base**i*subs[j]
"""
self.base(base)
self.subs(subs)
self.numticks = 15
def base(self,base):
"""
set the base of the log scaling (major tick every base**i, i interger)
"""
self._base=base+0.0
def subs(self,subs):
"""
set the minor ticks the log scaling every base**i*subs[j]
"""
if subs is None:
self._subs = None # autosub
else:
self._subs = np.asarray(subs)+0.0
def _set_numticks(self):
self.numticks = 15 # todo; be smart here; this is just for dev
def __call__(self):
'Return the locations of the ticks'
b=self._base
vmin, vmax = self.axis.get_view_interval()
if vmin <= 0.0:
vmin = self.axis.get_minpos()
if vmin <= 0.0:
raise ValueError(
"Data has no positive values, and therefore can not be log-scaled.")
vmin = math.log(vmin)/math.log(b)
vmax = math.log(vmax)/math.log(b)
if vmax<vmin:
vmin, vmax = vmax, vmin
numdec = math.floor(vmax)-math.ceil(vmin)
if self._subs is None: # autosub
if numdec>10: subs = np.array([1.0])
elif numdec>6: subs = np.arange(2.0, b, 2.0)
else: subs = np.arange(2.0, b)
else:
subs = self._subs
stride = 1
while numdec/stride+1 > self.numticks:
stride += 1
decades = np.arange(math.floor(vmin),
math.ceil(vmax)+stride, stride)
if len(subs) > 1 or (len(subs == 1) and subs[0] != 1.0):
ticklocs = []
for decadeStart in b**decades:
ticklocs.extend( subs*decadeStart )
else:
ticklocs = b**decades
return np.array(ticklocs)
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
if vmax<vmin:
vmin, vmax = vmax, vmin
minpos = self.axis.get_minpos()
if minpos<=0:
raise ValueError(
"Data has no positive values, and therefore can not be log-scaled.")
if vmin <= minpos:
vmin = minpos
if not is_decade(vmin,self._base): vmin = decade_down(vmin,self._base)
if not is_decade(vmax,self._base): vmax = decade_up(vmax,self._base)
if vmin==vmax:
vmin = decade_down(vmin,self._base)
vmax = decade_up(vmax,self._base)
result = mtransforms.nonsingular(vmin, vmax)
return result
class SymmetricalLogLocator(Locator):
"""
Determine the tick locations for log axes
"""
def __init__(self, transform, subs=[1.0]):
"""
place ticks on the location= base**i*subs[j]
"""
self._transform = transform
self._subs = subs
self.numticks = 15
def _set_numticks(self):
self.numticks = 15 # todo; be smart here; this is just for dev
def __call__(self):
'Return the locations of the ticks'
b = self._transform.base
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = self._transform.transform((vmin, vmax))
if vmax<vmin:
vmin, vmax = vmax, vmin
numdec = math.floor(vmax)-math.ceil(vmin)
if self._subs is None:
if numdec>10: subs = np.array([1.0])
elif numdec>6: subs = np.arange(2.0, b, 2.0)
else: subs = np.arange(2.0, b)
else:
subs = np.asarray(self._subs)
stride = 1
while numdec/stride+1 > self.numticks:
stride += 1
decades = np.arange(math.floor(vmin), math.ceil(vmax)+stride, stride)
if len(subs) > 1 or subs[0] != 1.0:
ticklocs = []
for decade in decades:
ticklocs.extend(subs * (np.sign(decade) * b ** np.abs(decade)))
else:
ticklocs = np.sign(decades) * b ** np.abs(decades)
return np.array(ticklocs)
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
b = self._transform.base
if vmax<vmin:
vmin, vmax = vmax, vmin
if not is_decade(abs(vmin), b):
if vmin < 0:
vmin = -decade_up(-vmin, b)
else:
vmin = decade_down(vmin, b)
if not is_decade(abs(vmax), b):
if vmax < 0:
vmax = -decade_down(-vmax, b)
else:
vmax = decade_up(vmax, b)
if vmin == vmax:
if vmin < 0:
vmin = -decade_up(-vmin, b)
vmax = -decade_down(-vmax, b)
else:
vmin = decade_down(vmin, b)
vmax = decade_up(vmax, b)
result = mtransforms.nonsingular(vmin, vmax)
return result
class AutoLocator(MaxNLocator):
def __init__(self):
MaxNLocator.__init__(self, nbins=9, steps=[1, 2, 5, 10])
class OldAutoLocator(Locator):
"""
On autoscale this class picks the best MultipleLocator to set the
view limits and the tick locs.
"""
def __init__(self):
self._locator = LinearLocator()
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def refresh(self):
'refresh internal information based on current lim'
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = 0.05)
d = abs(vmax-vmin)
self._locator = self.get_locator(d)
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
d = abs(vmax-vmin)
self._locator = self.get_locator(d)
return self._locator.view_limits(vmin, vmax)
def get_locator(self, d):
'pick the best locator based on a distance'
d = abs(d)
if d<=0:
locator = MultipleLocator(0.2)
else:
try: ld = math.log10(d)
except OverflowError:
raise RuntimeError('AutoLocator illegal data interval range')
fld = math.floor(ld)
base = 10**fld
#if ld==fld: base = 10**(fld-1)
#else: base = 10**fld
if d >= 5*base : ticksize = base
elif d >= 2*base : ticksize = base/2.0
else : ticksize = base/5.0
locator = MultipleLocator(ticksize)
return locator
__all__ = ('TickHelper', 'Formatter', 'FixedFormatter',
'NullFormatter', 'FuncFormatter', 'FormatStrFormatter',
'ScalarFormatter', 'LogFormatter', 'LogFormatterExponent',
'LogFormatterMathtext', 'Locator', 'IndexLocator',
'FixedLocator', 'NullLocator', 'LinearLocator',
'LogLocator', 'AutoLocator', 'MultipleLocator',
'MaxNLocator', )
| gpl-3.0 |
BonexGu/Blik2D-SDK | Blik2D/addon/tensorflow-1.2.1_for_blik/tensorflow/examples/tutorials/input_fn/boston.py | 51 | 2709 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DNNRegressor with custom input_fn for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import pandas as pd
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
COLUMNS = ["crim", "zn", "indus", "nox", "rm", "age",
"dis", "tax", "ptratio", "medv"]
FEATURES = ["crim", "zn", "indus", "nox", "rm",
"age", "dis", "tax", "ptratio"]
LABEL = "medv"
def input_fn(data_set):
feature_cols = {k: tf.constant(data_set[k].values) for k in FEATURES}
labels = tf.constant(data_set[LABEL].values)
return feature_cols, labels
def main(unused_argv):
# Load datasets
training_set = pd.read_csv("boston_train.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
test_set = pd.read_csv("boston_test.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Set of 6 examples for which to predict median house values
prediction_set = pd.read_csv("boston_predict.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Feature cols
feature_cols = [tf.contrib.layers.real_valued_column(k)
for k in FEATURES]
# Build 2 layer fully connected DNN with 10, 10 units respectively.
regressor = tf.contrib.learn.DNNRegressor(feature_columns=feature_cols,
hidden_units=[10, 10],
model_dir="/tmp/boston_model")
# Fit
regressor.fit(input_fn=lambda: input_fn(training_set), steps=5000)
# Score accuracy
ev = regressor.evaluate(input_fn=lambda: input_fn(test_set), steps=1)
loss_score = ev["loss"]
print("Loss: {0:f}".format(loss_score))
# Print out predictions
y = regressor.predict(input_fn=lambda: input_fn(prediction_set))
# .predict() returns an iterator; convert to a list and print predictions
predictions = list(itertools.islice(y, 6))
print("Predictions: {}".format(str(predictions)))
if __name__ == "__main__":
tf.app.run()
| mit |
tjhunter/karps | python/karps/row.py | 1 | 8926 | """ Utilities to express rows of data with Karps.
"""
import pandas as pd
from .proto import types_pb2
from .proto import row_pb2
from .types import *
__all__ = ['CellWithType', 'as_cell', 'as_python_object', 'as_pandas_object']
class CellWithType(object):
""" A cell of data, with its type information.
This is usually constructed with one of the helper functions.
"""
def __init__(self, proto):
assert proto
self._proto = proto # type: row_pb2.CellWithType
def __repr__(self):
return repr((self.type, self._proto.cell))
def __eq__(self, other):
return self._proto == other._proto
def __ne__(self, other):
return self._proto != other._proto
@property
def type(self):
""" The data type associated to this cell.
"""
if self._proto.cell_type:
return DataType(self._proto.cell_type)
return None
def as_cell(obj, schema=None):
""" Converts a python object as a cell, potentially with the help of extra type hints.
If the type is not provided, it will be inferred
The object can be any of the following:
- None
- a primitive
- an iterable or a tuple. They are considered array types, unless a struct type is provided as a
hint
- a dictionary. It is considered a struct, with all the fields sorted in alphabetical order.
- a pandas type
- a Spark row
- a numpy row
"""
cwt_proto = _as_cell(obj, schema._proto if schema else None)
return CellWithType(cwt_proto)
def as_python_object(cwt):
""" Converts a CellWithType object to a python object (best effort).
"""
return _as_python(cwt._proto.cell, cwt._proto.cell_type)
def as_pandas_object(cwt):
"""Converts a CellWithType object to a pandas object (best effort)"""
pobj = as_python_object(cwt)
if isinstance(pobj, list):
# This is a list, try to convert it to a pandas dataframe.
return pd.DataFrame(pobj)
return pobj
def _as_python(c_proto, tpe_proto):
# The type is still required for dictionaries.
if c_proto.HasField('int_value'):
return int(c_proto.int_value)
if c_proto.HasField('string_value'):
return str(c_proto.string_value)
if c_proto.HasField('double_value'):
return float(c_proto.double_value)
if c_proto.HasField('array_value'):
return [_as_python(x, tpe_proto.array_type) for x in c_proto.array_value.values]
if c_proto.HasField('struct_value'):
fields = tpe_proto.struct_type.fields
field_names = [f.field_name for f in fields]
field_types = [f.field_type for f in fields]
values = [_as_python(x, t) for (x, t) in zip(c_proto.struct_value, field_types)]
return dict(zip(field_names, values))
def _as_cell_infer(obj):
""" Converts a python object to a proto CellWithType object, and attemps to infer the data type
at the same time.
"""
if obj is None:
# No type, empty data.
return row_pb2.CellWithType(cell=row_pb2.Cell(), cell_type=None)
if isinstance(obj, int):
# Strict int
return _as_cell(obj, types_pb2.SQLType(basic_type=types_pb2.SQLType.INT, nullable=False))
if isinstance(obj, float):
# Strict float -> double
return _as_cell(obj, types_pb2.SQLType(basic_type=types_pb2.SQLType.DOUBLE, nullable=False))
if isinstance(obj, str):
# Strict string
return _as_cell(obj, types_pb2.SQLType(basic_type=types_pb2.SQLType.STRING, nullable=False))
if isinstance(obj, list):
# Something that looks like a list.
obj = list(obj)
# Get the inner content, and check that the types are mergeable after that.
l = [_as_cell_infer(x) for x in obj]
inner_type = _merge_proto_types([cwt.cell_type for cwt in l])
cells = [cwt.cell for cwt in l]
return row_pb2.CellWithType(
cell=row_pb2.Cell(
array_value=row_pb2.ArrayCell(
values=cells)),
cell_type=types_pb2.SQLType(
array_type=inner_type))
if isinstance(obj, tuple):
# A tuple is interpreted as a dictionary with some implicit names:
field_names = ["_" + str(idx+1) for idx in range(len(obj))]
dct = dict(zip(field_names, obj))
return _as_cell_infer(dct)
if isinstance(obj, dict):
# It is a dictionary. This is easy, we just build a structure from the inner data.
cells = [_as_cell(x, None) for (_, x) in obj.items()]
keys = [k for (k, _) in obj.items()]
return _struct(cells, keys, sort=True)
raise Exception("Cannot understand object of type %s: %s" % (type(obj), obj))
_none_proto_type = types_pb2.SQLType()
def _as_cell(obj, tpe_proto):
""" Converts a python object to a proto CellWithType object.
obj: python object
tpe_proto: a proto.Type object
"""
# This is one of the most complex functions, because it tries to do the 'right' thing from
# the perspective of the user, which is a fuzzy concept.
if tpe_proto is None or tpe_proto == _none_proto_type:
return _as_cell_infer(obj)
if obj is None:
assert tpe_proto.nullable, (obj, tpe_proto)
# empty value, potentially no type either.
return row_pb2.CellWithType(cell=row_pb2.Cell(), cell_type=tpe_proto)
if isinstance(obj, int):
assert tpe_proto.basic_type == types_pb2.SQLType.INT, (type(tpe_proto), tpe_proto)
return row_pb2.CellWithType(
cell=row_pb2.Cell(int_value=int(obj)),
cell_type=tpe_proto)
if isinstance(obj, float):
assert tpe_proto.basic_type == types_pb2.SQLType.DOUBLE, (type(tpe_proto), tpe_proto)
return row_pb2.CellWithType(
cell=row_pb2.Cell(double_value=float(obj)),
cell_type=tpe_proto)
if isinstance(obj, str):
assert tpe_proto.basic_type == types_pb2.SQLType.STRING, (type(tpe_proto), tpe_proto)
return row_pb2.CellWithType(
cell=row_pb2.Cell(string_value=str(obj)),
cell_type=tpe_proto)
# Something that looks like a list
if isinstance(obj, (list, tuple)) and tpe_proto.HasField("array_type"):
obj = list(obj)
cwt_ps = [_as_cell(x, tpe_proto.array_type) for x in obj]
c_ps = [cwt_p.cell for cwt_p in cwt_ps]
# Try to merge together the types of the inner cells. We may have some surprises here.
merge_type = tpe_proto.array_type
for cwt_p in cwt_ps:
merge_type = merge_proto_types(merge_type, cwt_p.cell_type)
return row_pb2.CellWithType(
cell=row_pb2.Cell(array_value=row_pb2.ArrayCell(values=c_ps)),
cell_type=types_pb2.SQLType(
array_type=merge_type,
nullable = tpe_proto.nullable))
if isinstance(obj, dict) and tpe_proto.HasField("struct_type"):
fields = tpe_proto.struct_type.fields
assert len(obj) == len(fields), (tpe_proto, obj)
cells = []
new_fields = []
for field in fields:
assert field.field_name in obj, (field, obj)
# The inner type may be None, in which case, it
f_cwt = _as_cell(obj[field.field_name], field.field_type)
cells.append(f_cwt.cell)
# The type may also have been updated if something got infered.
f = types_pb2.StructField(
field_name=field.field_name,
field_type=f_cwt.cell_type)
new_fields.append(f)
return row_pb2.CellWithType(
cell=row_pb2.Cell(struct_value=row_pb2.Row(values=cells)),
cell_type=types_pb2.SQLType(
struct_type=types_pb2.StructType(fields=new_fields),
nullable=tpe_proto.nullable))
if isinstance(obj, (list, tuple)) and tpe_proto.HasField("struct_type"):
# Treat it as a dictionary, for which the user has not specified the name of the fields.
obj = list(obj)
fields = tpe_proto.struct_type.fields
assert len(obj) == len(fields), (tpe_proto, obj)
cells = []
new_fields = []
for field, x in zip(fields, obj):
# The inner type may be None, in which case, it
f_cwt = _as_cell(x, field.field_type)
assert f_cwt, (x, field)
cells.append(f_cwt.cell)
# The type may also have been updated if something got infered.
f = types_pb2.StructField(
field_name=field.field_name,
field_type=f_cwt.cell_type)
new_fields.append(f)
return row_pb2.CellWithType(
cell=row_pb2.Cell(struct_value=row_pb2.Row(values=cells)),
cell_type=types_pb2.SQLType(
struct_type=types_pb2.StructType(fields=new_fields),
nullable=tpe_proto.nullable))
def _merge_proto_types(l):
res = None
for t_p in l:
if res is None:
res = t_p
else:
res = merge_proto_types(res, t_p)
return res
def _struct(cwts, field_names, sort=False):
assert len(cwts) == len(field_names)
if sort:
z = sorted(zip(field_names, cwts), key=lambda k: k[0])
return _struct([c for (_, c) in z], [k for (k, _) in z])
sfields = [types_pb2.StructField(
field_name=fname,
field_type=cwt.cell_type) for (cwt, fname) in zip(cwts, field_names)]
vals = [cwt.cell for cwt in cwts]
return row_pb2.CellWithType(
cell=row_pb2.Cell(array_value=row_pb2.ArrayCell(values=vals)),
cell_type = types_pb2.SQLType(
struct_type=types_pb2.StructType(
fields=sfields)
)
)
| apache-2.0 |
timsnyder/bokeh | bokeh/models/tests/test_mappers.py | 1 | 4293 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from bokeh.models.tests.utils.property_utils import check_properties_existence
from bokeh.palettes import Spectral6
# Module under test
import bokeh.models.mappers as bmm
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Test_CategoricalColorMapper(object):
def test_basic(self):
mapper = bmm.CategoricalColorMapper()
check_properties_existence(mapper, [
"factors",
"palette",
"start",
"end",
"nan_color"],
)
def test_warning_with_short_palette(self, recwarn):
bmm.CategoricalColorMapper(factors=["a", "b", "c"], palette=["red", "green"])
assert len(recwarn) == 1
def test_no_warning_with_long_palette(self, recwarn):
bmm.CategoricalColorMapper(factors=["a", "b", "c"], palette=["red", "green", "orange", "blue"])
assert len(recwarn) == 0
def test_with_pandas_index(self, pd):
fruits = ['Apples', 'Pears', 'Nectarines', 'Plums', 'Grapes', 'Strawberries']
years = ['2015', '2016', '2017']
data = {'2015' : [2, 1, 4, 3, 2, 4],
'2016' : [5, 3, 3, 2, 4, 6],
'2017' : [3, 2, 4, 4, 5, 3]}
df = pd.DataFrame(data, index=fruits)
fruits = df.index
years = df.columns
m = bmm.CategoricalColorMapper(palette=Spectral6, factors=years, start=1, end=2)
assert list(m.factors) == list(years)
assert isinstance(m.factors, pd.Index)
class Test_CategoricalPatternMapper(object):
def test_basic(self):
mapper = bmm.CategoricalPatternMapper()
check_properties_existence(mapper, [
"factors",
"patterns",
"start",
"end",
"default_value"],
)
class Test_CategoricalMarkerMapper(object):
def test_basic(self):
mapper = bmm.CategoricalMarkerMapper()
check_properties_existence(mapper, [
"factors",
"markers",
"start",
"end",
"default_value"],
)
class Test_LinearColorMapper(object):
def test_basic(self):
mapper = bmm.LinearColorMapper()
check_properties_existence(mapper, [
"palette",
"low",
"high",
"low_color",
"high_color",
"nan_color"],
)
class Test_LogColorMapper(object):
def test_basic(self):
mapper = bmm.LogColorMapper()
check_properties_existence(mapper, [
"palette",
"low",
"high",
"low_color",
"high_color",
"nan_color"],
)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause |
cogeorg/BlackRhino | examples/firesales_simple/networkx/readwrite/tests/test_gml.py | 35 | 3099 | #!/usr/bin/env python
import io
from nose.tools import *
from nose import SkipTest
import networkx
class TestGraph(object):
@classmethod
def setupClass(cls):
global pyparsing
try:
import pyparsing
except ImportError:
try:
import matplotlib.pyparsing as pyparsing
except:
raise SkipTest('gml test: pyparsing not available.')
def setUp(self):
self.simple_data="""Creator me
graph [
comment "This is a sample graph"
directed 1
IsPlanar 1
pos [ x 0 y 1 ]
node [
id 1
label "Node 1"
pos [ x 1 y 1 ]
]
node [
id 2
pos [ x 1 y 2 ]
label "Node 2"
]
node [
id 3
label "Node 3"
pos [ x 1 y 3 ]
]
edge [
source 1
target 2
label "Edge from node 1 to node 2"
color [line "blue" thickness 3]
]
edge [
source 2
target 3
label "Edge from node 2 to node 3"
]
edge [
source 3
target 1 label
"Edge from node 3 to node 1"
]
]
"""
def test_parse_gml(self):
G=networkx.parse_gml(self.simple_data,relabel=True)
assert_equals(sorted(G.nodes()),\
['Node 1', 'Node 2', 'Node 3'])
assert_equals( [e for e in sorted(G.edges())],\
[('Node 1', 'Node 2'),
('Node 2', 'Node 3'),
('Node 3', 'Node 1')])
assert_equals( [e for e in sorted(G.edges(data=True))],\
[('Node 1', 'Node 2',
{'color': {'line': 'blue', 'thickness': 3},
'label': 'Edge from node 1 to node 2'}),
('Node 2', 'Node 3',
{'label': 'Edge from node 2 to node 3'}),
('Node 3', 'Node 1',
{'label': 'Edge from node 3 to node 1'})])
def test_read_gml(self):
import os,tempfile
(fd,fname)=tempfile.mkstemp()
fh=open(fname,'w')
fh.write(self.simple_data)
fh.close()
Gin=networkx.read_gml(fname,relabel=True)
G=networkx.parse_gml(self.simple_data,relabel=True)
assert_equals( sorted(G.nodes(data=True)), sorted(Gin.nodes(data=True)))
assert_equals( sorted(G.edges(data=True)), sorted(Gin.edges(data=True)))
os.close(fd)
os.unlink(fname)
def test_relabel_duplicate(self):
data="""
graph
[
label ""
directed 1
node
[
id 0
label "same"
]
node
[
id 1
label "same"
]
]
"""
fh = io.BytesIO(data.encode('UTF-8'))
fh.seek(0)
assert_raises(networkx.NetworkXError,networkx.read_gml,fh,relabel=True)
def test_bool(self):
G=networkx.Graph()
G.add_node(1,on=True)
G.add_edge(1,2,on=False)
data = '\n'.join(list(networkx.generate_gml(G)))
answer ="""graph [
node [
id 0
label 1
on 1
]
node [
id 1
label 2
]
edge [
source 0
target 1
on 0
]
]"""
assert_equal(data,answer)
| gpl-3.0 |
DougBurke/astropy | astropy/visualization/wcsaxes/grid_paths.py | 2 | 3885 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from matplotlib.lines import Path
from ...coordinates.angle_utilities import angular_separation
# Tolerance for WCS round-tripping
ROUND_TRIP_TOL = 1e-1
# Tolerance for discontinuities relative to the median
DISCONT_FACTOR = 10.
def get_lon_lat_path(lon_lat, pixel, lon_lat_check):
"""
Draw a curve, taking into account discontinuities.
Parameters
----------
lon_lat : `~numpy.ndarray`
The longitude and latitude values along the curve, given as a (n,2)
array.
pixel : `~numpy.ndarray`
The pixel coordinates corresponding to ``lon_lat``
lon_lat_check : `~numpy.ndarray`
The world coordinates derived from converting from ``pixel``, which is
used to ensure round-tripping.
"""
# In some spherical projections, some parts of the curve are 'behind' or
# 'in front of' the plane of the image, so we find those by reversing the
# transformation and finding points where the result is not consistent.
sep = angular_separation(np.radians(lon_lat[:, 0]),
np.radians(lon_lat[:, 1]),
np.radians(lon_lat_check[:, 0]),
np.radians(lon_lat_check[:, 1]))
with np.errstate(invalid='ignore'):
sep[sep > np.pi] -= 2. * np.pi
mask = np.abs(sep > ROUND_TRIP_TOL)
# Mask values with invalid pixel positions
mask = mask | np.isnan(pixel[:, 0]) | np.isnan(pixel[:, 1])
# We can now start to set up the codes for the Path.
codes = np.zeros(lon_lat.shape[0], dtype=np.uint8)
codes[:] = Path.LINETO
codes[0] = Path.MOVETO
codes[mask] = Path.MOVETO
# Also need to move to point *after* a hidden value
codes[1:][mask[:-1]] = Path.MOVETO
# We now go through and search for discontinuities in the curve that would
# be due to the curve going outside the field of view, invalid WCS values,
# or due to discontinuities in the projection.
# We start off by pre-computing the step in pixel coordinates from one
# point to the next. The idea is to look for large jumps that might indicate
# discontinuities.
step = np.sqrt((pixel[1:, 0] - pixel[:-1, 0]) ** 2 +
(pixel[1:, 1] - pixel[:-1, 1]) ** 2)
# We search for discontinuities by looking for places where the step
# is larger by more than a given factor compared to the median
# discontinuous = step > DISCONT_FACTOR * np.median(step)
discontinuous = step[1:] > DISCONT_FACTOR * step[:-1]
# Skip over discontinuities
codes[2:][discontinuous] = Path.MOVETO
# The above missed the first step, so check that too
if step[0] > DISCONT_FACTOR * step[1]:
codes[1] = Path.MOVETO
# Create the path
path = Path(pixel, codes=codes)
return path
def get_gridline_path(world, pixel):
"""
Draw a grid line
Parameters
----------
world : `~numpy.ndarray`
The longitude and latitude values along the curve, given as a (n,2)
array.
pixel : `~numpy.ndarray`
The pixel coordinates corresponding to ``lon_lat``
"""
# Mask values with invalid pixel positions
mask = np.isnan(pixel[:, 0]) | np.isnan(pixel[:, 1])
# We can now start to set up the codes for the Path.
codes = np.zeros(world.shape[0], dtype=np.uint8)
codes[:] = Path.LINETO
codes[0] = Path.MOVETO
codes[mask] = Path.MOVETO
# Also need to move to point *after* a hidden value
codes[1:][mask[:-1]] = Path.MOVETO
# We now go through and search for discontinuities in the curve that would
# be due to the curve going outside the field of view, invalid WCS values,
# or due to discontinuities in the projection.
# Create the path
path = Path(pixel, codes=codes)
return path
| bsd-3-clause |
EtienneCmb/tensorpac | tensorpac/utils.py | 1 | 29055 | """Utility functions."""
import logging
import numpy as np
from scipy.signal import periodogram
from tensorpac.methods.meth_pac import _kl_hr
from tensorpac.pac import _PacObj, _PacVisual
from tensorpac.io import set_log_level
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
logger = logging.getLogger('tensorpac')
def pac_vec(f_pha='mres', f_amp='mres'):
"""Generate cross-frequency coupling vectors.
Parameters
----------
Frequency vector for the phase and amplitude. Here you can use
several forms to define those vectors :
* Basic list/tuple (ex: [2, 4] or [8, 12]...)
* List of frequency bands (ex: [[2, 4], [5, 7]]...)
* Dynamic definition : (start, stop, width, step)
* Range definition (ex : np.arange(3) => [[0, 1], [1, 2]])
* Using a string. `f_pha` and `f_amp` can be 'lres', 'mres', 'hres'
respectively for low, middle and high resolution vectors. In that
case, it uses the definition proposed by Bahramisharif et al. 2013
:cite:`bahramisharif2013propagating` i.e
f_pha = [f - f / 4, f + f / 4] and f_amp = [f - f / 8, f + f / 8]
Returns
-------
f_pha, f_amp : array_like
Arrays containing the pairs of phase and amplitude frequencies. Each
vector have a shape of (N, 2).
"""
nb_fcy = dict(lres=10, mres=30, hres=50, demon=70, hulk=100)
if isinstance(f_pha, str):
# get where phase frequencies start / finish / number
f_pha_start, f_pha_end = 2, 20
f_pha_nb = nb_fcy[f_pha]
# f_pha = [f - f / 4, f + f / 4]
f_pha_mid = np.linspace(f_pha_start, f_pha_end, f_pha_nb)
f_pha = np.c_[f_pha_mid - f_pha_mid / 4., f_pha_mid + f_pha_mid / 4.]
if isinstance(f_amp, str):
# get where amplitude frequencies start / finish / number
f_amp_start, f_amp_end = 60, 160
f_amp_nb = nb_fcy[f_amp]
# f_amp = [f - f / 8, f + f / 8]
f_amp_mid = np.linspace(f_amp_start, f_amp_end, f_amp_nb)
f_amp = np.c_[f_amp_mid - f_amp_mid / 8., f_amp_mid + f_amp_mid / 8.]
return _check_freq(f_pha), _check_freq(f_amp)
def _check_freq(f):
"""Check the frequency definition."""
f = np.atleast_2d(np.asarray(f))
#
if len(f.reshape(-1)) == 1:
raise ValueError("The length of f should at least be 2.")
elif 2 in f.shape: # f of shape (N, 2) or (2, N)
if f.shape[1] is not 2:
f = f.T
elif np.squeeze(f).shape == (4,): # (f_start, f_end, f_width, f_step)
f = _pair_vectors(*tuple(np.squeeze(f)))
else: # Sequential
f = f.reshape(-1)
f.sort()
f = np.c_[f[0:-1], f[1::]]
return f
def _pair_vectors(f_start, f_end, f_width, f_step):
# Generate two array for phase and amplitude :
fdown = np.arange(f_start, f_end - f_width, f_step)
fup = np.arange(f_start + f_width, f_end, f_step)
return np.c_[fdown, fup]
def pac_trivec(f_start=60., f_end=160., f_width=10.):
"""Generate triangular vector.
By contrast with the pac_vec function, this function generate frequency
vector with an increasing frequency bandwidth.
Parameters
----------
f_start : float | 60.
Starting frequency.
f_end : float | 160.
Ending frequency.
f_width : float | 10.
Frequency bandwidth increase between each band.
Returns
-------
f : array_like
The triangular vector.
tridx : array_like
The triangular index for the reconstruction.
"""
starting = np.arange(f_start, f_end + f_width, f_width)
f, tridx = np.array([]), np.array([])
for num, k in enumerate(starting[0:-1]):
# Lentgh of the vector to build :
le = len(starting) - (num + 1)
# Create the frequency vector for this starting frequency :
fst = np.c_[np.full(le, k), starting[num + 1::]]
nfst = fst.shape[0]
# Create the triangular index for this vector of frequencies :
idx = np.c_[np.flipud(np.arange(nfst)), np.full(nfst, num)]
tridx = np.concatenate((tridx, idx), axis=0) if tridx.size else idx
f = np.concatenate((f, fst), axis=0) if f.size else fst
return f, tridx
class PSD(object):
"""Power Spectrum Density for electrophysiological brain data.
Parameters
----------
x : array_like
Array of data of shape (n_epochs, n_times)
sf : float
The sampling frequency.
"""
def __init__(self, x, sf):
"""Init."""
assert isinstance(x, np.ndarray) and (x.ndim == 2), (
"x should be a 2d array of shape (n_epochs, n_times)")
self._n_trials, self._n_times = x.shape
logger.info(f"Compute PSD over {self._n_trials} trials and "
f"{self._n_times} time points")
self._freqs, self._psd = periodogram(x, fs=sf, window=None,
nfft=self._n_times,
detrend='constant',
return_onesided=True,
scaling='density', axis=1)
def plot(self, f_min=None, f_max=None, confidence=95, interp=None,
log=False, grid=True, fz_title=18, fz_labels=15):
"""Plot the PSD.
Parameters
----------
f_min, f_max : (int, float) | None
Frequency bounds to use for plotting
confidence : (int, float) | None
Light gray confidence interval. If None, no interval will be
displayed
interp : int | None
Line interpolation integer. For example, if interp is 10 the number
of points is going to be multiply by 10
log : bool | False
Use a log scale representation
grid : bool | True
Add a grid to the plot
fz_title : int | 18
Font size for the title
fz_labels : int | 15
Font size the x/y labels
Returns
-------
ax : Matplotlib axis
The matplotlib axis that contains the figure
"""
import matplotlib.pyplot as plt
f_types = (int, float)
# interpolation
xvec, yvec = self._freqs, self._psd
if isinstance(interp, int) and (interp > 1):
# from scipy.interpolate import make_interp_spline, BSpline
from scipy.interpolate import interp1d
xnew = np.linspace(xvec[0], xvec[-1], len(xvec) * interp)
f = interp1d(xvec, yvec, kind='quadratic', axis=1)
yvec = f(xnew)
xvec = xnew
# (f_min, f_max)
f_min = xvec[0] if not isinstance(f_min, f_types) else f_min
f_max = xvec[-1] if not isinstance(f_max, f_types) else f_max
# plot main psd
plt.plot(xvec, yvec.mean(0), color='black',
label='mean PSD over trials')
# plot confidence interval
if isinstance(confidence, (int, float)) and (0 < confidence < 100):
logger.info(f" Add {confidence}th confidence interval")
interval = (100. - confidence) / 2
kw = dict(axis=0, interpolation='nearest')
psd_min = np.percentile(yvec, interval, **kw)
psd_max = np.percentile(yvec, 100. - interval, **kw)
plt.fill_between(xvec, psd_max, psd_min, color='lightgray',
alpha=0.5,
label=f"{confidence}th confidence interval")
plt.legend(fontsize=fz_labels)
plt.xlabel("Frequencies (Hz)", fontsize=fz_labels)
plt.ylabel("Power (V**2/Hz)", fontsize=fz_labels)
plt.title(f"PSD mean over {self._n_trials} trials", fontsize=fz_title)
plt.xlim(f_min, f_max)
if log:
from matplotlib.ticker import ScalarFormatter
plt.xscale('log', basex=10)
plt.gca().xaxis.set_major_formatter(ScalarFormatter())
if grid:
plt.grid(color='grey', which='major', linestyle='-',
linewidth=1., alpha=0.5)
plt.grid(color='lightgrey', which='minor', linestyle='--',
linewidth=0.5, alpha=0.5)
return plt.gca()
def plot_st_psd(self, f_min=None, f_max=None, log=False, grid=True,
fz_title=18, fz_labels=15, fz_cblabel=15, **kw):
"""Single-trial PSD plot.
Parameters
----------
f_min, f_max : (int, float) | None
Frequency bounds to use for plotting
log : bool | False
Use a log scale representation
grid : bool | True
Add a grid to the plot
fz_title : int | 18
Font size for the title
fz_labels : int | 15
Font size the x/y labels
fz_cblabel : int | 15
Font size the colorbar label labels
Returns
-------
ax : Matplotlib axis
The matplotlib axis that contains the figure
"""
# manage input variables
kw['fz_labels'] = kw.get('fz_labels', fz_labels)
kw['fz_title'] = kw.get('fz_title', fz_title)
kw['fz_cblabel'] = kw.get('fz_cblabel', fz_title)
kw['xlabel'] = kw.get('xlabel', "Frequencies (Hz)")
kw['ylabel'] = kw.get('ylabel', "Trials")
kw['title'] = kw.get('title', "Single-trial PSD")
kw['cblabel'] = kw.get('cblabel', "Power (V**2/Hz)")
# (f_min, f_max)
xvec, psd = self._freqs, self._psd
f_types = (int, float)
f_min = xvec[0] if not isinstance(f_min, f_types) else f_min
f_max = xvec[-1] if not isinstance(f_max, f_types) else f_max
# locate (f_min, f_max) indices
f_min_idx = np.abs(xvec - f_min).argmin()
f_max_idx = np.abs(xvec - f_max).argmin()
sl_freq = slice(f_min_idx, f_max_idx)
xvec = xvec[sl_freq]
psd = psd[:, sl_freq]
# make the 2D plot
_viz = _PacVisual()
trials = np.arange(self._n_trials)
_viz.pacplot(psd, xvec, trials, **kw)
if log:
from matplotlib.ticker import ScalarFormatter
plt.xscale('log', basex=10)
plt.gca().xaxis.set_major_formatter(ScalarFormatter())
if grid:
plt.grid(color='grey', which='major', linestyle='-',
linewidth=1., alpha=0.5)
plt.grid(color='lightgrey', which='minor', linestyle='--',
linewidth=0.5, alpha=0.5)
return plt.gca()
def show(self):
"""Display the PSD figure."""
import matplotlib.pyplot as plt
plt.show()
@property
def freqs(self):
"""Get the frequency vector."""
return self._freqs
@property
def psd(self):
"""Get the psd value."""
return self._psd
class BinAmplitude(_PacObj):
"""Bin the amplitude according to the phase.
Parameters
----------
x : array_like
Array of data of shape (n_epochs, n_times)
sf : float
The sampling frequency
f_pha : tuple, list | [2, 4]
List of two floats describing the frequency bounds for extracting the
phase
f_amp : tuple, list | [60, 80]
List of two floats describing the frequency bounds for extracting the
amplitude
n_bins : int | 18
Number of bins to use to binarize the phase and the amplitude
dcomplex : {'wavelet', 'hilbert'}
Method for the complex definition. Use either 'hilbert' or
'wavelet'.
cycle : tuple | (3, 6)
Control the number of cycles for filtering (only if dcomplex is
'hilbert'). Should be a tuple of integers where the first one
refers to the number of cycles for the phase and the second for the
amplitude :cite:`bahramisharif2013propagating`.
width : int | 7
Width of the Morlet's wavelet.
edges : int | None
Number of samples to discard to avoid edge effects due to filtering
"""
def __init__(self, x, sf, f_pha=[2, 4], f_amp=[60, 80], n_bins=18,
dcomplex='hilbert', cycle=(3, 6), width=7, edges=None,
n_jobs=-1):
"""Init."""
_PacObj.__init__(self, f_pha=f_pha, f_amp=f_amp, dcomplex=dcomplex,
cycle=cycle, width=width)
# check
x = np.atleast_2d(x)
assert x.ndim <= 2, ("`x` input should be an array of shape "
"(n_epochs, n_times)")
assert isinstance(sf, (int, float)), ("`sf` input should be a integer "
"or a float")
assert all([isinstance(k, (int, float)) for k in f_pha]), (
"`f_pha` input should be a list of two integers / floats")
assert all([isinstance(k, (int, float)) for k in f_amp]), (
"`f_amp` input should be a list of two integers / floats")
assert isinstance(n_bins, int), "`n_bins` should be an integer"
logger.info(f"Binning {f_amp}Hz amplitude according to {f_pha}Hz "
"phase")
# extract phase and amplitude
kw = dict(keepfilt=False, edges=edges, n_jobs=n_jobs)
pha = self.filter(sf, x, 'phase', **kw)
amp = self.filter(sf, x, 'amplitude', **kw)
# binarize amplitude according to phase
self._amplitude = _kl_hr(pha, amp, n_bins, mean_bins=False).squeeze()
self.n_bins = n_bins
def plot(self, unit='rad', normalize=False, **kw):
"""Plot the amplitude.
Parameters
----------
unit : {'rad', 'deg'}
The unit to use for the phase. Use either 'deg' for degree or 'rad'
for radians
normalize : bool | None
Normalize the histogram by the maximum
kw : dict | {}
Additional inputs are passed to the matplotlib.pyplot.bar function
Returns
-------
ax : Matplotlib axis
The matplotlib axis that contains the figure
"""
import matplotlib.pyplot as plt
assert unit in ['rad', 'deg']
if unit == 'rad':
self._phase = np.linspace(-np.pi, np.pi, self.n_bins)
width = 2 * np.pi / self.n_bins
elif unit == 'deg':
self._phase = np.linspace(-180, 180, self.n_bins)
width = 360 / self.n_bins
amp_mean = self._amplitude.mean(1)
if normalize:
amp_mean /= amp_mean.max()
plt.bar(self._phase, amp_mean, width=width, **kw)
plt.xlabel(f"Frequency phase ({self.n_bins} bins)", fontsize=18)
plt.ylabel("Amplitude", fontsize=18)
plt.title("Binned amplitude")
plt.autoscale(enable=True, axis='x', tight=True)
def show(self):
"""Show the figure."""
import matplotlib.pyplot as plt
plt.show()
@property
def amplitude(self):
"""Get the amplitude value."""
return self._amplitude
@property
def phase(self):
"""Get the phase value."""
return self._phase
class ITC(_PacObj, _PacVisual):
"""Compute the Inter-Trials Coherence (ITC).
The Inter-Trials Coherence (ITC) is a measure of phase consistency over
trials for a single recording site (electrode / sensor etc.).
Parameters
----------
x : array_like
Array of data of shape (n_epochs, n_times)
sf : float
The sampling frequency
f_pha : tuple, list | [2, 4]
List of two floats describing the frequency bounds for extracting the
phase
dcomplex : {'wavelet', 'hilbert'}
Method for the complex definition. Use either 'hilbert' or
'wavelet'.
cycle : tuple | 3
Control the number of cycles for filtering the phase (only if dcomplex
is 'hilbert').
width : int | 7
Width of the Morlet's wavelet.
edges : int | None
Number of samples to discard to avoid edge effects due to filtering
"""
def __init__(self, x, sf, f_pha=[2, 4], dcomplex='hilbert', cycle=3,
width=7, edges=None, n_jobs=-1, verbose=None):
"""Init."""
set_log_level(verbose)
_PacObj.__init__(self, f_pha=f_pha, f_amp=[60, 80], dcomplex=dcomplex,
cycle=(cycle, 6), width=width)
_PacVisual.__init__(self)
# check
x = np.atleast_2d(x)
assert x.ndim <= 2, ("`x` input should be an array of shape "
"(n_epochs, n_times)")
self._n_trials = x.shape[0]
logger.info("Inter-Trials Coherence (ITC)")
logger.info(f" extracting {len(self.xvec)} phases")
# extract phase and amplitude
kw = dict(keepfilt=False, edges=edges, n_jobs=n_jobs)
pha = self.filter(sf, x, 'phase', **kw)
# compute itc
self._itc = np.abs(np.exp(1j * pha).mean(1)).squeeze()
self._sf = sf
def plot(self, times=None, **kw):
"""Plot the Inter-Trials Coherence.
Parameters
----------
times : array_like | None
Custom time vector to use
kw : dict | {}
Additional inputs are either pass to the matplotlib.pyplot.plot
function if a single phase band is used, otherwise to the
matplotlib.pyplot.pcolormesh function
Returns
-------
ax : Matplotlib axis
The matplotlib axis that contains the figure
"""
import matplotlib.pyplot as plt
n_pts = self._itc.shape[-1]
if not isinstance(times, np.ndarray):
times = np.arange(n_pts) / self._sf
times = times[self._edges]
assert len(times) == n_pts, ("The length of the time vector should be "
"{n_pts}")
xlab = 'Time'
title = f"Inter-Trials Coherence ({self._n_trials} trials)"
if self._itc.ndim == 1:
plt.plot(times, self._itc, **kw)
elif self._itc.ndim == 2:
vmin = kw.get('vmin', np.percentile(self._itc, 1))
vmax = kw.get('vmax', np.percentile(self._itc, 99))
self.pacplot(self._itc, times, self.xvec, vmin=vmin, vmax=vmax,
ylabel="Frequency for phase (Hz)", xlabel=xlab,
title=title, **kw)
return plt.gca()
def show(self):
"""Show the figure."""
import matplotlib.pyplot as plt
plt.show()
@property
def itc(self):
"""Get the itc value."""
return self._itc
class PeakLockedTF(_PacObj, _PacVisual):
"""Peak-Locked Time-frequency representation.
This class can be used in order to re-align time-frequency representations
around a time-point (cue) according to the closest phase peak. This type
of visualization can bring out a cyclic behavior of the amplitude at a
given phase, potentially indicating the presence of a phase-amplitude
coupling. Here's the detailed pipeline :
* Filter around a single phase frequency bands and across multiple
amplitude frequencies
* Use a `cue` which define the time-point to use for the realignment
* Detect in the filtered phase the closest peak to the cue. This step
is repeated to each trial in order to get a list of length (n_epochs)
that contains the number of sample (shift) so that if the phase is
moved, the peak fall onto the cue. A positive shift indicates that
the phase is moved forward while a negative shift is for a backward
move
* Apply, to each trial, this shift to the amplitude
* Plot the mean re-aligned amplitudes
Parameters
----------
x : array_like
Array of data of shape (n_epochs, n_times)
sf : float
The sampling frequency
cue : int, float
Time-point to use in order to detect the closest phase peak. This
parameter works in conjunction with the `times` input below. Use
either :
* An integer and `times` is None to indicate that you want to
realign according to a time-point in sample
* A integer or a float with `times` the time vector if you want
that Tensorpac automatically infer the sample number around which
to align
times : array_like | None
Time vector
f_pha : tuple, list | [2, 4]
List of two floats describing the frequency bounds for extracting the
phase
f_amp : tuple, list | [60, 80]
Frequency vector for the amplitude. Here you can use several forms to
define those vectors :
* Dynamic definition : (start, stop, width, step)
* Using a string : `f_amp` can be 'lres', 'mres', 'hres'
respectively for low, middle and high resolution vectors
cycle : tuple | (3, 6)
Control the number of cycles for filtering. Should be a tuple of
integers where the first one refers to the number of cycles for the
phase and the second for the amplitude
:cite:`bahramisharif2013propagating`.
"""
def __init__(self, x, sf, cue, times=None, f_pha=[5, 7], f_amp='hres',
cycle=(3, 6), n_jobs=-1, verbose=None):
"""Init."""
set_log_level(verbose)
# initialize to retrieve filtering methods
_PacObj.__init__(self, f_pha=f_pha, f_amp=f_amp, dcomplex='hilbert',
cycle=cycle)
_PacVisual.__init__(self)
logger.info("PeakLockedTF object defined")
# inputs checking
x = np.atleast_2d(x)
assert isinstance(x, np.ndarray) and (x.ndim == 2)
assert isinstance(sf, (int, float))
assert isinstance(cue, (int, float))
assert isinstance(f_pha, (list, tuple)) and (len(f_pha) == 2)
n_epochs, n_times = x.shape
# manage cur conversion
if times is None:
cue = int(cue)
times = np.arange(n_times)
logger.info(f" align on sample cue={cue}")
else:
assert isinstance(times, np.ndarray) and (len(times) == n_times)
cue_time = cue
cue = np.abs(times - cue).argmin() - 1
logger.info(f" align on time-point={cue_time} (sample={cue})")
self.cue, self._times = cue, times
# extract phase and amplitudes
logger.info(f" extract phase and amplitudes "
f"(n_amps={len(self.yvec)})")
kw = dict(keepfilt=False, n_jobs=n_jobs)
pha = self.filter(sf, x, 'phase', n_jobs=n_jobs, keepfilt=True)
amp = self.filter(sf, x, 'amplitude', n_jobs=n_jobs)
self._pha, self._amp = pha, amp ** 2
# peak detection
logger.info(f" running peak detection around sample={cue}")
self.shifts = self._peak_detection(self._pha.squeeze(), cue)
# realign phases and amplitudes
logger.info(f" realign the {n_epochs} phases and amplitudes")
self.amp_a = self._shift_signals(self._amp, self.shifts, fill_with=0.)
self.pha_a = self._shift_signals(self._pha, self.shifts, fill_with=0.)
@staticmethod
def _peak_detection(pha, cue):
"""Single trial closest to a cue peak detection.
Parameters
----------
pha : array_like
Array of single trial phases of shape (n_trials, n_times)
cue : int
Cue to use as a reference (in sample unit)
Returns
-------
peaks : array_like
Array of length (n_trials,) describing each delay to apply
to each trial in order to realign the phases. In detail :
* Positive delays means that zeros should be prepend
* Negative delays means that zeros should be append
"""
n_trials, n_times = pha.shape
peaks = []
for tr in range(n_trials):
# select the single trial phase
st_pha = pha[tr, :]
# detect all peaks across time points
st_peaks = []
for t in range(n_times - 1):
if (st_pha[t - 1] < st_pha[t]) and (st_pha[t] > st_pha[t + 1]):
st_peaks += [t]
# detect the minimum peak
min_peak = st_peaks[np.abs(np.array(st_peaks) - cue).argmin()]
peaks += [cue - min_peak]
return np.array(peaks)
@staticmethod
def _shift_signals(sig, n_shifts, fill_with=0):
"""Shift an array of signals according to an array of delays.
Parameters
----------
sig : array_like
Array of signals of shape (n_freq, n_trials, n_times)
n_shifts : array_like
Array of delays to apply to each trial of shape (n_trials,)
fill_with : int
Value to prepend / append to each shifted time-series
Returns
-------
sig_shifted : array_like
Array of shifted signals with the same shape as the input
"""
# prepare the needed variables
n_freqs, n_trials, n_pts = sig.shape
sig_shifted = np.zeros_like(sig)
# shift each trial
for tr in range(n_trials):
# select the data of a specific trial
st_shift = n_shifts[tr]
st_sig = sig[:, tr, :]
fill = np.full((n_freqs, abs(st_shift)), fill_with,
dtype=st_sig.dtype)
# shift this specific trial
if st_shift > 0: # move forward = prepend zeros
sig_shifted[:, tr, :] = np.c_[fill, st_sig][:, 0:-st_shift]
elif st_shift < 0: # move backward = append zeros
sig_shifted[:, tr, :] = np.c_[st_sig, fill][:, abs(st_shift):]
return sig_shifted
def plot(self, zscore=False, baseline=None, edges=0, **kwargs):
"""Integrated Peak-Locked TF plotting function.
Parameters
----------
zscore : bool | False
Normalize the power by using a z-score normalization. This can be
useful in order to compensate the 1 / f effect in the power
spectrum. If True, the mean and deviation are computed at the
single trial level and across all time points
baseline : tuple | None
Baseline period to use in order to apply the z-score correction.
Should be in samples.
edges : int | 0
Number of pixels to discard to compensate filtering edge effect
(`power[edges:-edges]`).
kwargs : dict | {}
Additional arguments are sent to the
:class:`tensorpac.utils.PeakLockedTF.pacplot` method
"""
# manage additional arguments
kwargs['colorbar'] = False
kwargs['ylabel'] = 'Frequency for amplitude (hz)'
kwargs['xlabel'] = ''
kwargs['fz_labels'] = kwargs.get('fz_labels', 14)
kwargs['fz_cblabel'] = kwargs.get('fz_cblabel', 14)
kwargs['fz_title'] = kwargs.get('fz_title', 16)
sl_times = slice(edges, len(self._times) - edges)
times = self._times[sl_times]
pha_n = self.pha_a[..., sl_times].squeeze()
# z-score normalization
if zscore:
if baseline is None:
bsl_idx = sl_times
else:
assert len(baseline) == 2
bsl_idx = slice(baseline[0], baseline[1])
_mean = self.amp_a[..., bsl_idx].mean(2, keepdims=True)
_std = self.amp_a[..., bsl_idx].std(2, keepdims=True)
_std[_std == 0.] = 1. # correction from NaN
amp_n = (self.amp_a[..., sl_times] - _mean) / _std
else:
amp_n = self.amp_a[..., sl_times]
# grid definition
gs = GridSpec(8, 8)
# image plot
plt.subplot(gs[slice(0, 6), 0:-1])
self.pacplot(amp_n.mean(1), times, self.yvec, **kwargs)
plt.axvline(times[self.cue], color='w', lw=2)
plt.tick_params(bottom=False, labelbottom=False)
ax_1 = plt.gca()
# external colorbar
plt.subplot(gs[slice(1, 5), -1])
cb = plt.colorbar(self._plt_im, pad=0.01, cax=plt.gca())
cb.set_label('Power (V**2/Hz)', fontsize=kwargs['fz_cblabel'])
cb.outline.set_visible(False)
# phase plot
plt.subplot(gs[slice(6, 8), 0:-1])
plt.plot(times, pha_n.T, color='lightgray', alpha=.2, lw=1.)
plt.plot(times, pha_n.mean(0), label='single trial phases', alpha=.2,
lw=1.) # legend tweaking
plt.plot(times, pha_n.mean(0), label='mean phases',
color='#1f77b4')
plt.axvline(times[self.cue], color='k', lw=2)
plt.autoscale(axis='both', tight=True, enable=True)
plt.xlabel("Times", fontsize=kwargs['fz_labels'])
plt.ylabel("V / Hz", fontsize=kwargs['fz_labels'])
# bottom legend
plt.legend(loc='center', bbox_to_anchor=(.5, -.5),
fontsize='x-large', ncol=2)
ax_2 = plt.gca()
return [ax_1, ax_2]
| bsd-3-clause |
yunfeilu/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 181 | 15664 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(sp.issparse(Y))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
| bsd-3-clause |
mverzett/rootpy | docs/sphinxext/numpydoc/plot_directive.py | 5 | 19693 | """
A special directive for generating a matplotlib plot.
.. warning::
This is a hacked version of plot_directive.py from Matplotlib.
It's very much subject to change!
Usage
-----
Can be used like this::
.. plot:: examples/example.py
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3], [4,5,6])
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
The content is interpreted as doctest formatted if it has a line starting
with ``>>>``.
The ``plot`` directive supports the options
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. Default can be changed in conf.py
and the ``image`` directive options ``alt``, ``height``, ``width``,
``scale``, ``align``, ``class``.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which plot:: file names are relative to.
(If None or empty, file names are relative to the directoly where
the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen.
plot_html_show_formats
Whether to show links to the files in HTML.
TODO
----
* Refactor Latex output; now it's plain images, but it would be nice
to make them appear side-by-side, or in floats.
"""
import sys, os, glob, shutil, imp, warnings, cStringIO, re, textwrap, traceback
import sphinx
import warnings
warnings.warn("A plot_directive module is also available under "
"matplotlib.sphinxext; expect this numpydoc.plot_directive "
"module to be deprecated after relevant features have been "
"integrated there.",
FutureWarning, stacklevel=2)
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
app.add_config_value('plot_pre_code', '', True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_directive('plot', plot_directive, True, (0, 1, False),
**plot_directive_options)
#------------------------------------------------------------------------------
# plot:: directive
#------------------------------------------------------------------------------
from docutils.parsers.rst import directives
from docutils import nodes
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_format(arg):
return directives.choice(arg, ('python', 'lisp'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
plot_directive_options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
}
#------------------------------------------------------------------------------
# Generating output
#------------------------------------------------------------------------------
from docutils import nodes, utils
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{%- for option in options %}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{% endfor %}
{{ only_latex }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
{% endfor %}
"""
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def run(arguments, content, options, state_machine, state, lineno):
if arguments and content:
raise RuntimeError("plot:: directive can't have both args and content")
document = state_machine.document
config = document.settings.env.config
options.setdefault('include-source', config.plot_include_source)
# determine input
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if arguments:
if not config.plot_basedir:
source_file_name = os.path.join(rst_dir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
code = open(source_file_name, 'r').read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if options.has_key('format'):
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = makefig(code, source_file_name, build_dir, output_base,
config)
errors = []
except PlotError, err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s: %s" % (output_base, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
opts = [':%s: %s' % (key, val) for key, val in options.items()
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
if j == 0:
src_link = source_link
else:
src_link = None
result = format_template(
TEMPLATE,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
shutil.copyfile(fn, os.path.join(dest_dir,
os.path.basename(fn)))
# copy script (if necessary)
if source_file_name == rst_file:
target_name = os.path.join(dest_dir, output_base + source_ext)
f = open(target_name, 'w')
f.write(unescape_doctest(code))
f.close()
return errors
#------------------------------------------------------------------------------
# Run code and capture figures
#------------------------------------------------------------------------------
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.image as image
from matplotlib import _pylab_helpers
import exceptions
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None):
# Change the working directory to the directory of the example, so
# it can get at its data files, if any.
pwd = os.getcwd()
old_sys_path = list(sys.path)
if code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Redirect stdout
stdout = sys.stdout
sys.stdout = cStringIO.StringIO()
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
exec setup.config.plot_pre_code in ns
exec code in ns
except (Exception, SystemExit), err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
#------------------------------------------------------------------------------
# Generating figures
#------------------------------------------------------------------------------
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived)
or os.stat(derived).st_mtime < os.stat(original).st_mtime)
def makefig(code, code_path, output_dir, output_base, config):
"""
Run a pyplot script *code* and save the images under *output_dir*
with file names derived from *output_base*
"""
# -- Parse format list
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50}
formats = []
for fmt in config.plot_formats:
if isinstance(fmt, str):
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt)==2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in range(1000):
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# -- We didn't find the files, so build them
results = []
ns = {}
for i, code_piece in enumerate(code_pieces):
# Clear between runs
plt.close('all')
# Run code
run_code(code_piece, code_path, ns)
# Collect images
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except exceptions.BaseException, err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
# Results
results.append((code_piece, images))
return results
#------------------------------------------------------------------------------
# Relative pathnames
#------------------------------------------------------------------------------
try:
from os.path import relpath
except ImportError:
def relpath(target, base=os.curdir):
"""
Return a relative path to the target from either the current
dir or an optional base dir. Base can be a directory
specified either as absolute or relative to current dir.
"""
if not os.path.exists(target):
raise OSError, 'Target does not exist: '+target
if not os.path.isdir(base):
raise OSError, 'Base is not a directory or does not exist: '+base
base_list = (os.path.abspath(base)).split(os.sep)
target_list = (os.path.abspath(target)).split(os.sep)
# On the windows platform the target may be on a completely
# different drive from the base.
if os.name in ['nt','dos','os2'] and base_list[0] <> target_list[0]:
raise OSError, 'Target is on a different drive to base. Target: '+target_list[0].upper()+', base: '+base_list[0].upper()
# Starting from the filepath root, work out how much of the
# filepath is shared by base and target.
for i in range(min(len(base_list), len(target_list))):
if base_list[i] <> target_list[i]: break
else:
# If we broke out of the loop, i is pointing to the first
# differing path elements. If we didn't break out of the
# loop, i is pointing to identical path elements.
# Increment i so that in all cases it points to the first
# differing path elements.
i+=1
rel_list = [os.pardir] * (len(base_list)-i) + target_list[i:]
return os.path.join(*rel_list)
| gpl-3.0 |
tswast/google-cloud-python | language/docs/conf.py | 2 | 11912 | # -*- coding: utf-8 -*-
#
# google-cloud-language documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
__version__ = "0.1.0"
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.6.3"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_flags = ["members"]
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# Allow markdown includes (so releases.md can include CHANGLEOG.md)
# http://www.sphinx-doc.org/en/master/markdown.html
source_parsers = {".md": "recommonmark.parser.CommonMarkParser"}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"google-cloud-language"
copyright = u"2017, Google"
author = u"Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for Python",
"github_user": "googleapis",
"github_repo": "google-cloud-python",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-language-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"google-cloud-language.tex",
u"google-cloud-language Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"google-cloud-language",
u"google-cloud-language Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"google-cloud-language",
u"google-cloud-language Documentation",
author,
"google-cloud-language",
"GAPIC library for the {metadata.shortName} v1 service",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("http://python.readthedocs.org/en/latest/", None),
"gax": ("https://gax-python.readthedocs.org/en/latest/", None),
"google-auth": ("https://google-auth.readthedocs.io/en/stable", None),
"google-gax": ("https://gax-python.readthedocs.io/en/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest", None),
"grpc": ("https://grpc.io/grpc/python/", None),
"requests": ("https://requests.kennethreitz.org/en/stable/", None),
"fastavro": ("https://fastavro.readthedocs.io/en/stable/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
| apache-2.0 |
qifeigit/scikit-learn | examples/linear_model/plot_ransac.py | 250 | 1673 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
toastedcornflakes/scikit-learn | sklearn/linear_model/tests/test_least_angle.py | 42 | 20925 | from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.model_selection import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.exceptions import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-4, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
@ignore_warnings
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = assert_warns(DeprecationWarning, estimator.decision_function, X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually guaranteed in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
for method in ['lar', 'lasso']:
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=False)
assert_true(coefs.min() < 0)
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=True)
assert_true(coefs.min() >= 0)
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'Lars': {'n_nonzero_coefs': 5},
'LassoLars': {'alpha': 0.1},
'LarsCV': {},
'LassoLarsCV': {},
'LassoLarsIC': {}}
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(estimator.coef_.min() < 0)
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(min(estimator.coef_) >= 0)
def test_lasso_lars_vs_lasso_cd_positive(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,
normalize=False, positive=True).fit(X, y)
clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,
normalize=False, positive=True).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
| bsd-3-clause |
pierrelb/RMG-Py | rmgpy/tools/plot.py | 2 | 18806 | import matplotlib as mpl
# Force matplotlib to not use any Xwindows backend.
# This must be called before pylab, matplotlib.pyplot, or matplotlib.backends is imported
mpl.use('Agg')
import matplotlib.pyplot as plt
from rmgpy.tools.data import GenericData
def parseCSVData(csvFile):
"""
This function parses a typical csv file outputted from a simulation or
sensitivity analysis in the form of
Time (s) Header1 Header2 Header....
t0 val1_0 val2_0 val...
t1..
..
It returns the data in the form
Time, DataList
Where Time is returned as a GenericData object, and DataList is list of GenericData objects
"""
import csv
import numpy
import re
# Pattern for matching indices or units
indexPattern = re.compile(r'^\S+\(\d+\)$')
unitsPattern = re.compile(r'\s\(.+\)$')
rxnSensPattern = re.compile('^dln\[\S+\]\/dln\[k\d+\]:\s\S+$')
thermoSensPattern = re.compile('^dln\[\S+\]\/dG\[\S+\]$')
timeData = []; data = {}
f = csv.reader(open(csvFile, 'r'))
columns = zip(*f)
time = GenericData(label = columns[0][0],
data = numpy.array(columns[0][1:],dtype=numpy.float64),
)
# Parse the units from the Time header
if unitsPattern.search(time.label):
label, sep, units = time.label[:-1].rpartition('(')
time.label = label
time.units = units
dataList = []
for col in columns[1:]:
header = col[0]
values = numpy.array(col[1:],dtype=numpy.float64)
data = GenericData(label=header,data=values)
# Parse the index or the label from the header
if indexPattern.search(data.label):
species, sep, index = data.label[:-1].rpartition('(')
# Save the species attribute if an index was found
data.species = species
data.index = int(index)
elif unitsPattern.search(data.label):
label, sep, units = data.label[:-1].rpartition('(')
data.label = label
data.units = units
elif rxnSensPattern.search(data.label):
rxn = data.label.split()[1]
index = data.label.split()[0][:-2].rpartition('dln[k')[2]
data.reaction = rxn
data.index = index
elif thermoSensPattern.search(data.label):
species = data.label[:-1].rpartition('dG[')[2]
data.species = species
if indexPattern.search(species):
data.index = species[:-1].rpartition('(')[2]
dataList.append(data)
return time, dataList
def findNearest(array, value):
"""
Returns the index of the closest value in a sorted array
"""
import numpy
idx = (numpy.abs(array-value)).argmin()
return idx
def linearlyInterpolatePoint(xArray, yArray, xValue):
"""
Returns the interpolated yValue for given xValue using data from the two sorted arrays:
"""
#Find the next largest point in xArray that is still smaller than xValue:
lowerIndex=None
for index, x in enumerate(xArray):
if x>xValue:
break
lowerIndex=index
#If xValue is outside the domain of xArray, we use either the min or max points for dydx
if lowerIndex is None:
lowerIndex=0
elif lowerIndex==len(xArray)-1:
lowerIndex=lowerIndex-1
higherIndex=lowerIndex+1
dydx=(yArray[higherIndex]-yArray[lowerIndex])/(xArray[higherIndex]-xArray[lowerIndex])
if xValue < xArray[lowerIndex]:
yValue=yArray[lowerIndex]-dydx*(xValue-xArray[lowerIndex])
else:
yValue=yArray[lowerIndex]+dydx*(xValue-xArray[lowerIndex])
return yValue
class GenericPlot(object):
"""
A generic plotting class that can be extended to plot other things.
"""
def __init__(self, xVar=None, yVar=None, title='', xlabel='', ylabel=''):
self.xVar = xVar
# Convert yVar to a list if it wasn't one already
if isinstance(yVar, GenericData):
self.yVar = [yVar]
else:
self.yVar = yVar
self.title = title
self.xlabel = xlabel
self.ylabel = ylabel
def plot(self, filename=''):
"""
Execute the actual plotting
"""
mpl.rc('font',family='sans-serif')
fig=plt.figure()
ax = fig.add_subplot(111)
xVar = self.xVar
yVar = self.yVar
if len(yVar) == 1:
y = yVar[0]
ax.plot(xVar.data, y.data)
# Create a ylabel for the label of the y variable
if not self.ylabel and y.label:
ylabel = y.label
if y.units: ylabel += ' ({0})'.format(y.units)
plt.ylabel(ylabel)
else:
for y in yVar:
ax.plot(xVar.data, y.data, '-', label=y.label)
if self.xlabel:
plt.xlabel(self.xlabel)
elif xVar.label:
xlabel = xVar.label
if xVar.units: xlabel += ' ({0})'.format(xVar.units)
plt.xlabel(xlabel)
if self.ylabel:
plt.ylabel(self.ylabel)
if self.title:
plt.title(self.title)
ax.grid('on')
handles, labels = ax.get_legend_handles_labels()
if labels:
# Create a legend outside the plot and adjust width based off of longest legend label
maxStringLength = max([len(label) for label in labels])
width = 1.05 + .011*maxStringLength
legend = ax.legend(handles,labels,loc='upper center', numpoints=1, bbox_to_anchor=(width,1)) #bbox_to_anchor=(1.01,.9)
fig.savefig(filename, bbox_extra_artists=(legend,), bbox_inches='tight')
else:
fig.savefig(filename, bbox_inches='tight')
def barplot(self, filename='', idx=None):
"""
Plot a generic barplot using just the yVars.
idx is the index of the each y-variable to be plotted. if not given, the last value will be used
"""
import numpy
mpl.rc('font',family='sans-serif')
fig = plt.figure()
ax = fig.add_subplot(111)
position = numpy.arange(len(self.yVar),0,-1)
# Reverse in order to go front top to bottom
if not idx:
idx = -1
ax.barh(position, numpy.array([y.data[idx] for y in self.yVar]), align='center', alpha=0.5)
plt.yticks(position, [y.label for y in self.yVar])
# If any labels or titles are explicitly specified, write them
if self.xlabel:
plt.xlabel(self.xlabel)
if self.ylabel:
plt.ylabel(self.ylabel)
if self.title:
plt.title(self.title)
plt.axis('tight')
fig.savefig(filename, bbox_inches='tight')
def comparePlot(self, otherGenericPlot, filename='', title='', xlabel='', ylabel=''):
"""
Plot a comparison data plot of this data vs a second GenericPlot class
"""
mpl.rc('font',family='sans-serif')
#mpl.rc('text', usetex=True)
fig=plt.figure()
ax = fig.add_subplot(111)
styles = ['-',':']
# Plot the sets of data
for i, plot in enumerate([self, otherGenericPlot]):
# Reset the color cycle per plot to get matching colors in each set
plt.gca().set_prop_cycle(None)
xVar = plot.xVar
yVar = plot.yVar
# Convert yVar to a list if it wasn't one already
if isinstance(yVar, GenericData):
yVar = [yVar]
if len(yVar) == 1:
y = yVar[0]
ax.plot(xVar.data, y.data, styles[i])
# Save a ylabel based on the y variable's label if length of yVar contains only 1 variable
if not self.ylabel and y.label:
self.ylabel = y.label
if y.units: self.ylabel += ' ({0})'.format(y.units)
else:
for y in yVar:
ax.plot(xVar.data, y.data, styles[i], label=y.label)
# Plot the second set of data
# Prioritize using the function's x and y labels, otherwise the labels from this data object
if xlabel:
plt.xlabel(xlabel)
elif self.xlabel:
plt.xlabel(self.xlabel)
elif self.xVar.label:
xlabel = self.xVar.label
if self.xVar.units: xlabel += ' ({0})'.format(self.xVar.units)
plt.xlabel(xlabel)
if ylabel:
plt.ylabel(ylabel)
elif self.ylabel:
plt.ylabel(self.ylabel)
# Use user inputted title
if title:
plt.title(title)
ax.grid('on')
handles, labels = ax.get_legend_handles_labels()
if labels:
# Create a legend outside the plot and adjust width based off of longest legend label
maxStringLength = max([len(label) for label in labels])
width = 1.2+ .011*maxStringLength*2
legend = ax.legend(handles,labels,loc='upper center', numpoints=1, bbox_to_anchor=(width,1), ncol=2) #bbox_to_anchor=(1.01,.9)
fig.savefig(filename, bbox_extra_artists=(legend,), bbox_inches='tight')
else:
fig.savefig(filename, bbox_inches='tight')
class SimulationPlot(GenericPlot):
"""
A class for plotting simulations containing mole fraction vs time data.
Can plot the top species in generic simulation csv generated by RMG-Py
i.e. simulation_1_19.csv, found in the solver folder of an RMG job
Use numSpecies as a flag to dictate how many species to plot.
This function will plot the top species, based on maximum mole fraction at
any point in the simulation.
Alternatively, the `species` flag can be used as a dictionary for
plotting specific species within the csvFile
This should be formulated as
{'desired_name_for_species': 'corresponding_chemkin_name_of_species'}
"""
def __init__(self, xVar=None, yVar=None, title='', xlabel='', ylabel='', csvFile='', numSpecies=None, species=None):
GenericPlot.__init__(self, xVar=xVar, yVar=yVar, title=title, xlabel=xlabel, ylabel=ylabel)
self.csvFile = csvFile
self.numSpecies = numSpecies
self.species = species if species else {}
def load(self):
if self.xVar == None and self.yVar == None:
time, dataList = parseCSVData(self.csvFile)
else:
time = self.xVar
dataList = self.yVar
speciesData = []
if self.species:
# A specific set of species was specified to be plotted
for speciesLabel, chemkinLabel in self.species.iteritems():
for data in dataList:
if chemkinLabel == data.label:
# replace the data label with the desired species label
data.label = speciesLabel
speciesData.append(data)
break
else:
for data in dataList:
# Only plot if RMG detects that the data corresponds with a species
# This will not include bath gases
if data.species:
speciesData.append(data)
self.xVar = time
self.yVar = speciesData
def plot(self, filename=''):
filename = filename if filename else 'simulation.png'
self.load()
self.yVar.sort(key=lambda x: max(x.data), reverse=True)
if self.numSpecies:
self.yVar = self.yVar[:self.numSpecies]
GenericPlot.plot(self, filename=filename)
def comparePlot(self, otherSimulationPlot, filename='', title='', xlabel='', ylabel=''):
filename = filename if filename else 'simulation_compare.png'
self.load()
otherSimulationPlot.load()
# Restrict the number of species
if self.numSpecies:
self.yVar = self.yVar[:self.numSpecies]
otherSimulationPlot.yVar = otherSimulationPlot.yVar[:self.numSpecies]
GenericPlot.comparePlot(self, otherSimulationPlot, filename, title, xlabel, ylabel)
class ReactionSensitivityPlot(GenericPlot):
"""
A class for plotting the top reaction sensitivites in a generic sensitivity csv file generated by RMG-Py.
`numReactions` is a flag indicating the number of reaction sensitivities to plot.
This function will plot the top sensitivities based on this number, based on the
magnitude of the sensitivity at the final time step.
`reactions` can be used to plot the sensitivities of a specific set of reactions.
This should be formulated as
{'desired_rxn_string': 'corresponding chemkin rxn string'}
barplot() will instead plot a horizontal bar plot of the sensitivities at a given
time step. If time step is not given, the end step will automatically be chosen
"""
def __init__(self, xVar=None, yVar=None, title='', xlabel='', ylabel='', csvFile='', numReactions=None, reactions=None):
GenericPlot.__init__(self, xVar=xVar, yVar=yVar, title=title, xlabel=xlabel, ylabel=ylabel)
self.csvFile = csvFile
self.numReactions = numReactions
self.reactions = reactions if reactions else {}
def load(self):
if self.xVar == None and self.yVar == None:
time, dataList = parseCSVData(self.csvFile)
else:
time = self.xVar
dataList = self.yVar
reactionData = []
if self.reactions:
# A specific set of reaction sensitivities was specified to be plotted
for reactionLabel, chemkinLabel in self.reactions.iteritems():
for data in dataList:
if chemkinLabel == data.reaction:
# replace the data label with the desired species label
data.label = reactionLabel
reactionData.append(data)
break
else:
for data in dataList:
if data.reaction:
reactionData.append(data)
self.xVar = time
self.yVar = reactionData
def plot(self, filename=''):
filename = filename if filename else "reaction_sensitivity.png"
self.load()
# Sort reactions according to absolute max value of final time point
self.yVar.sort(key=lambda x: abs(x.data[-1]), reverse=True)
if self.numReactions:
self.yVar = self.yVar[:self.numReactions]
self.ylabel = 'dln(c)/dln(k_i)'
GenericPlot.plot(self, filename=filename)
def barplot(self, filename='', t=None):
"""
Time must be indicated in seconds
The closest timepoint will then be used, otherwise if no time point is given,
the end time step will be used
"""
filename = filename if filename else "reaction_sensitivity.png"
self.load()
# Sort reactions according to absolute max value at the specified time point
# if the time point is not given, use the final time point
if t:
idx = findNearest(self.xVar.data, t)
else:
idx = -1
self.yVar.sort(key=lambda x: abs(x.data[idx]), reverse=True)
if self.numReactions:
self.yVar = self.yVar[:self.numReactions]
if not self.xlabel:
self.xlabel = 'dln(c)/dln(k_i)'
GenericPlot.barplot(self, filename=filename, idx=idx)
class ThermoSensitivityPlot(GenericPlot):
"""
A class for plotting the top sensitivities to a thermo DeltaG value of species within the model.
The value used is the sensitivity at the final time point.
`numSpecies` indicates the number of species to plot.
`species` is a dictionary corresponding to specific species thermo sensitivities to be plotted
barplot() will instead plot a horizontal bar plot of the sensitivities at a given
time step. If time step is not given, the end step will automatically be chosen
"""
def __init__(self, xVar=None, yVar=None, title='', xlabel='', ylabel='', csvFile='', numSpecies=None, species=None):
GenericPlot.__init__(self, xVar=xVar, yVar=yVar, title=title, xlabel=xlabel, ylabel=ylabel)
self.csvFile = csvFile
self.numSpecies = numSpecies
self.species = species if species else {}
def load(self):
if self.xVar == None and self.yVar == None:
time, dataList = parseCSVData(self.csvFile)
else:
time = self.xVar
dataList = self.yVar
thermoData = []
if self.species:
# A specific set of species sensitivities was specified to be plotted
for speciesLabel, chemkinLabel in self.species.iteritems():
for data in dataList:
if chemkinLabel == data.species:
# replace the data label with the desired species label
data.label = speciesLabel
thermoData.append(data)
break
else:
for data in dataList:
if data.species:
thermoData.append(data)
self.xVar = time
self.yVar = thermoData
def plot(self, filename=''):
filename = filename if filename else "thermo_sensitivity.png"
self.load()
self.yVar.sort(key=lambda x: abs(x.data[-1]), reverse = True)
if self.numSpecies:
self.yVar = self.yVar[:self.numSpecies]
if not self.ylabel:
self.ylabel = 'dln(c)/d(G_i) [(kcal/mol)^-1]'
GenericPlot.plot(self, filename=filename)
def barplot(self, filename='', t=None):
filename = filename if filename else "thermo_sensitivity.png"
self.load()
if t:
idx = findNearest(self.xVar.data, t)
else:
idx = -1
self.yVar.sort(key=lambda x: abs(x.data[idx]), reverse = True)
if self.numSpecies:
self.yVar = self.yVar[:self.numSpecies]
if not self.xlabel:
self.xlabel = 'dln(c)/d(G_i) [(kcal/mol)^-1]'
GenericPlot.barplot(self, filename=filename, idx=idx)
| mit |
cuiwei0322/cost_analysis | tall_building_zero_attack_angle_cost_analysis/Result/peak_ng.py | 1 | 2728 | import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib
from matplotlib import cm
from matplotlib import pyplot as plt
from itertools import product, combinations
from matplotlib import rc
from matplotlib.font_manager import FontProperties
font_size = 8
rc('font',**{'family':'serif','serif':['Times New Roman'],'size':font_size})
step = 0.04
maxval = 1.0
fig = plt.figure(num=1, figsize=(2.9,2.4), dpi=300, facecolor='w', edgecolor='k')
ax = fig.add_subplot(111, projection='3d')
# create supporting points in polar coordinates
r = np.linspace(0,0.8,40)
p = np.linspace(0,2*np.pi,60)
R,P = np.meshgrid(r,p)
# transform them to cartesian system
X,Y = R*np.cos(P),R*np.sin(P)
mu_x = 0.3656;
sigma_x = 0.1596;
sigma_y = 0.1964;
Z = np.exp(-(X - mu_x)**2/(2*sigma_x**2) - (Y)**2/(2*sigma_y**2)) / (2*np.pi*sigma_x*sigma_y)
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, alpha = 0.7)
cset = ax.contourf(X, Y, Z, zdir='z', offset=-1, cmap=cm.coolwarm)
# cset = ax.contourf(X, Y, Z, zdir='x', offset=0.7, cmap=cm.coolwarm, alpha = 0.5)
# cset = ax.contourf(X, Y, Z, zdir='y', offset=0.8, cmap=cm.coolwarm, alpha = 0.5)
theta = np.linspace(0, 2 * np.pi, 100)
r = 0.3
x = r * np.sin(theta)
y = r * np.cos(theta)
z = np.exp(-(x - mu_x)**2/(2*sigma_x**2) - (y)**2/(2*sigma_y**2)) / (2*np.pi*sigma_x*sigma_y)
Z = -1
ax.plot(x, y, z, '-b',zorder = 10,linewidth = 0.5,label = 'Joint PDF on integral path')
ax.plot(x, y, Z, '-.b',zorder = 9,linewidth = 0.5,label = 'Integral path')
#draw a arrow
r = 0.3
Z = -1
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
a = Arrow3D([0,r*np.cos(5/4*np.pi)],[0,r*np.sin(5/4*np.pi)],[Z,Z], mutation_scale=4, lw=0.5, arrowstyle="-|>", color="k")
ax.add_artist(a)
rt = 0.3
ax.text(rt*np.cos(4/4*np.pi)+0.1, rt*np.sin(4/4*np.pi)+0.05, Z, 'r', None)
#done with arrow
# legend
# fontP = FontProperties()
# fontP.set_size('small')
legend = ax.legend(loc='upper center',shadow=False,handlelength = 3.8,prop={'size':font_size})
#
ax.view_init(elev=30, azim=-110)
ax.set_zlim3d(-1, 5)
ax.set_xlabel(r'$r_x$', fontsize = font_size)
ax.set_ylabel(r'$r_y$',fontsize = font_size)
ax.set_zlabel(r'Joint PDF,$f(r_x,r_y)$')
plt.tight_layout()
# plt.show()
plt.savefig("peak_ng.pdf") | apache-2.0 |
TariqAHassan/BioVida | biovida/images/models/template_matching.py | 1 | 13537 | # coding: utf-8
"""
Template Matching
~~~~~~~~~~~~~~~~~
"""
import numpy as np
from scipy.misc import imread
from scipy.misc import imresize
from skimage.feature import match_template
# Notes:
# See: http://scikit-image.org/docs/dev/api/skimage.feature.html#skimage.feature.match_template.
# Here, this algorithm has been bootstrapped to make it robust against variance in scale.
# ToDo: the solution implemented here, while it works somewhat well, is lacking.
# The correct approach is to systematically crop the image as long as the signal
# continues to increases, up to some ceiling value.
def _arange_one_first(start, end, step, precision=1):
"""
Wrapper for ``numpy.arange()`` where the number '1' is always first.
Note: zero will be removed, if created, from the final array.
:param start: the starting value.
:type start: ``int``
:param end: the ending value.
:type end: ``int``
:param step: the step size.
:type step: ``int``
:param precision: number of decimals to evenly round the array.
:type precision: ``int``
:return: an array created by ``numpy.arange()`` where the number `1` is invariably the first element in the array.
:rtype: ``ndarray``
"""
arr = np.around(np.arange(start, end, step), precision)
arr = arr[arr != 0]
return np.append(1, arr[arr != 1.0])
def _cropper(base, v_prop, h_prop):
"""
Crops an image horizontally and vertically.
Notes:
- increasing ``h_prop`` increases the amount of the image's left side removed.
- increasing ``v_prop`` increases the amount of the lower part of the image removed.
:param base: an image represented as a 2D array.
:type base: ``2D ndarray``
:param v_prop: the proportion of the image to remove with respect to its height.
:type v_prop: ``float``
:param h_prop: the proportion of the image to remove with respect to its width.
:type h_prop: ``float``
:return: a cropped image as a 2D array
:rtype: ``2D ndarray``
"""
# Note: image.shape = (height, width).
# Crop to the left
hcrop_base = base[:, int(base.shape[1] * h_prop):]
# Crop with respect to height
return hcrop_base[:int(base.shape[0] * v_prop)]
def _best_guess_location(match_template_result, scaling=1):
"""
Takes the result of skimage.feature.match_template() and returns (top left x, top left y)
by selecting the item in ``match_template_result`` with the strongest signal.
:param match_template_result: the output of from skimage.feature import match_template.
:type match_template_result: ``ndarray``
:return: the upper left for location of the strongest peak and the match quality. Form: ``((x, y), match quality)``.
:rtype: ``tuple``
"""
x, y = np.unravel_index(np.argmax(match_template_result), match_template_result.shape)[::-1]
return np.ceil(np.array((x, y)) / float(scaling)), match_template_result.max()
def _robust_match_template_loading(image, param_name):
"""
Loads images for ``robust_match_template()``.
:param image: a path to an image or the image as a 2D array
:type image: ``str`` or ``2D ndarray``
:param param_name: the name of the parameter which is being loaded (i.e., `pattern_image` or `base_image`.
:type param_name: ``str``
:return: an image as an array.
:rtype: ``2D ndarray``
"""
if 'ndarray' in str(type(image)):
return image
elif isinstance(image, str):
return imread(image, flatten=True)
else:
raise ValueError(
"`{0}` must either be a `ndarray` or a path to an image.".format(param_name))
def _min_base_rescale(base, pattern, base_resizes, round_to=3):
"""
Corrects ``base_resizes`` in instances where it would result
in the ``base`` image being rescaled to a size which is smaller than the ``pattern`` image.
Notes:
- if ``abs(base_resizes[1] - base_resizes[0]) < step size`` at the end of the this function,
only the unit transformation will take place in ``_matching_engine()``.
- this function cannot handle the rare case where the pattern is larger than the base.
:param base: the base image as a 2D array.
:type base: ``2D ndarray``
:param pattern: the pattern image as a 2D array.
:type pattern: ``2D ndarray``
:param base_resizes: the range over which to rescale the base image.Define as a tuple of the
form ``(start, end, step size)``.
:type base_resizes: ``tuple``
:param round_to: how many places after the decimal to round to. Defaults to 3.
:type round_to: ``int``
:return: ``base_resizes`` either 'as is' or updated to prevent the case outlined in this function's description.
:rtype: ``tuple``
"""
# ToDo: Does not always block base < pattern.
# Pick the limiting axis in the base image (smallest)
smallest_base_axis = min(base.shape)
# Pick the limiting axis in the base (largest)
size_floor = max(pattern.shape)
min_scalar_for_base = float(np.around(size_floor / smallest_base_axis, round_to))
base_resizes = list(base_resizes)
# Move the rescale into valid range, if needed
if base_resizes[0] < min_scalar_for_base:
base_resizes[0] = min_scalar_for_base
if base_resizes[1] < min_scalar_for_base:
base_resizes[1] += min_scalar_for_base
return tuple(base_resizes)
def _matching_engine(base, pattern, base_resizes, base_image_cropping, end_search_threshold):
"""
Runs ``skimage.feature.match_template()`` against ``base`` for a given ``pattern``
at various sizes of the base image.
:param base: the base image (typically cropped)
:type base: ``2D ndarray``
:param pattern: the pattern image
:type pattern: `2D ndarray``
:param base_resizes: the range over which to rescale the base image.Define as a tuple of the
form ``(start, end, step size)``.
:type base_resizes: ``tuple``
:param base_image_cropping: see ``robust_match_template()``.
:type base_image_cropping: ``tuple``
:param end_search_threshold: if a match of this quality is found, end the search. Set ``None`` to disable.
:type end_search_threshold: ``float`` or ``None``
:return: a dictionary of matches made by the ``skimage.feature.match_template()`` function
with the base image scaled by different amounts (represented by the keys).
The values are ``tuples`` of the form ``(top left corner, bottom right corner, match quality)``.
:rtype: ``dict``
"""
# Crop the base image
base_h_crop = int(base.shape[1] * base_image_cropping[1])
cropped_base = _cropper(base, v_prop=base_image_cropping[0], h_prop=base_image_cropping[1])
# Apply tool to ensure the base will always be larger than the pattern
start, end, step = _min_base_rescale(cropped_base, pattern, base_resizes, round_to=3)
match_dict = dict()
for scale in _arange_one_first(start=start, end=end, step=step):
# Rescale the image
scaled_cropped_base = imresize(cropped_base, scale, interp='lanczos')
# ToDo: this try/except should not be needed.
try:
template_match_analysis = match_template(image=scaled_cropped_base, template=pattern)
top_left, match_quality = _best_guess_location(template_match_analysis, scaling=scale)
top_left_adj = top_left + np.array([base_h_crop, 0])
bottom_right = top_left_adj + np.floor(np.array(pattern.shape)[::-1] / scale)
match_dict[scale] = (list(top_left_adj), list(bottom_right), match_quality)
if isinstance(end_search_threshold, (int, float)):
if match_quality >= end_search_threshold:
break
except:
pass
return match_dict
def _corners_calc(top_left, bottom_right):
"""
Compute a dict. with a bounding box derived from
a top left and top right corner
:param top_left: tuple of the form: (x, y).
:param top_left: ``tuple``
:param bottom_right: tuple of the form: (x, y)
:param bottom_right: ``tuple``
:return: a dictionary with the following keys: 'top_left', 'top_right', 'bottom_left' and 'bottom_right'.
Values are keys of the form (x, y).
:rtype: ``dict``
"""
d = {'top_left': top_left,
'top_right': (bottom_right[0], top_left[1]),
'bottom_left': (top_left[0], bottom_right[1]),
'bottom_right': bottom_right}
return {k: tuple(map(int, v)) for k, v in d.items()}
def robust_match_template(pattern_image,
base_image,
base_resizes=(0.5, 2.5, 0.1),
end_search_threshold=0.875,
base_image_cropping=(0.15, 0.5)):
"""
Search for a pattern image in a base image using a algorithm which is robust
against variation in the size of the pattern in the base image.
Method: Fast Normalized Cross-Correlation.
Limitations:
- Cropping is limited to the the top left of the base image. The can be circumvented by setting
``base_image_cropping=(1, 1)`` and cropping ``base_image`` oneself.
- This function may become unstable in situations where the pattern image is larger than the base image.
:param pattern_image: the pattern image.
.. warning::
If a `ndarray` is passed to `pattern_image`, it *must* be preprocessed to be a 2D array,
e.g., ``scipy.misc.imread(pattern_image, flatten=True)``
:type pattern_image: ``str`` or ``ndarray``
:param base_image: the base image in which to look for the ``pattern_image``.
.. warning::
If a `ndarray` is passed to `base_image`, it *must* be preprocessed to be a 2D array,
e.g., ``scipy.misc.imread(base_image, flatten=True)``
:type base_image: ``str`` or ``ndarray``
:param base_resizes: the range over which to rescale the base image.Define as a tuple of the
form ``(start, end, step size)``. Defaults to ``(0.5, 2.0, 0.1)``.
:type base_resizes: ``tuple``
:param end_search_threshold: if a match of this quality is found, end the search. Set equal to ``None`` to disable.
Defaults to 0.875.
:type end_search_threshold: ``float`` or ``None``
:param base_image_cropping: the amount of the image to crop with respect to the x and y axis.
form: ``(height, width)``. Defaults to ``(0.15, 0.5)``.
Notes:
- Decreasing ``height`` will increase the amount of the lower part of the image removed.
- Increasing ``width`` will increase the amount of the image's left half removed.
- Cropping more of the base image reduces the probability that the algorithm getting confused.
However, if the image is cropped too much, the target pattern itself could be removed.
:type base_image_cropping: ``tuple``
:return: A dictionary of the form: ``{"bounding_box": ..., "match_quality": ..., "base_image_shape": ...}``.
- bounding_box (``dict``): ``{'bottom_right': (x, y), 'top_right': (x, y), 'top_left': (x, y), 'bottom_left': (x, y)}``.
- match_quality (``float``): quality of the match.
- base_image_shape (``tuple``): the size of the base image provided. Form: ``(width (x), height (y))``.
:rtype: ``dict``
"""
pattern = _robust_match_template_loading(pattern_image, "pattern_image")
base = _robust_match_template_loading(base_image, "base_image")
match_dict = _matching_engine(base, pattern, base_resizes, base_image_cropping,
end_search_threshold)
if len(list(match_dict.keys())):
best_match = max(list(match_dict.values()), key=lambda x: x[2])
bounding_box = _corners_calc(best_match[0], best_match[1])
match_quality = best_match[2]
else:
bounding_box = None
match_quality = None
# Return the bounding box, match quality and the size of the base image
return {"bounding_box": bounding_box, "match_quality": match_quality,
"base_image_shape": base.shape[::-1]}
def _box_show(base_image_path, pattern_image_path):
"""
This function uses matplotlib to show the bounding box for the pattern.
:param base_image_path: the path to the base image.
:type base_image_path: ``str``
:param pattern_image_path: the path to the pattern image.
:type pattern_image_path: ``str``
"""
import matplotlib.pyplot as plt
import matplotlib.patches as patches
# Load the Images
base_image = imread(base_image_path, flatten=True)
pattern_image = imread(pattern_image_path, flatten=True)
# Run the analysis
rslt = robust_match_template(pattern_image, base_image)
# Extract the top left and top right
top_left = rslt['bounding_box']['top_left']
bottom_right = rslt['bounding_box']['bottom_right']
# Compute the width and height
width = abs(bottom_right[0] - top_left[0])
height = abs(bottom_right[1] - top_left[1])
# Show the base image
fig, (ax1) = plt.subplots(ncols=1, figsize=(5, 5), sharex=True, sharey=True)
ax1.imshow(base_image, 'gray')
# Add the bounding box
ax1.add_patch(patches.Rectangle(top_left, width, height, fill=False, edgecolor="red"))
fig.show()
| bsd-3-clause |
eusoubrasileiro/fatiando_seismic | cookbook/seismic_wavefd_love_wave.py | 9 | 2602 | """
Seismic: 2D finite difference simulation of elastic SH wave propagation in a
medium with a discontinuity (i.e., Moho), generating Love waves.
"""
import numpy as np
from matplotlib import animation
from fatiando import gridder
from fatiando.seismic import wavefd
from fatiando.vis import mpl
# Set the parameters of the finite difference grid
shape = (200, 1000)
area = [0, 800000, 0, 160000]
# Make a density and S wave velocity model
density = 2400 * np.ones(shape)
svel = 3500 * np.ones(shape)
moho = 50
density[moho:] = 2800
svel[moho:] = 4500
mu = wavefd.lame_mu(svel, density)
# Make a wave source from a mexican hat wavelet
sources = [wavefd.MexHatSource(
10000, 10000, area, shape, 100000, 0.5, delay=2)]
# Get the iterator. This part only generates an iterator object. The actual
# computations take place at each iteration in the for loop below
dt = wavefd.maxdt(area, shape, svel.max())
duration = 250
maxit = int(duration / dt)
stations = [[100000, 0], [700000, 0]]
snapshots = int(1. / dt)
simulation = wavefd.elastic_sh(mu, density, area, dt, maxit, sources, stations,
snapshots, padding=70, taper=0.005)
# This part makes an animation using matplotlibs animation API
background = svel * 5 * 10 ** -7
fig = mpl.figure(figsize=(10, 8))
mpl.subplots_adjust(right=0.98, left=0.11, hspace=0.3, top=0.93)
mpl.subplot(3, 1, 1)
mpl.title('Seismogram 1')
seismogram1, = mpl.plot([], [], '-k')
mpl.xlim(0, duration)
mpl.ylim(-0.1, 0.1)
mpl.ylabel('Amplitude')
mpl.subplot(3, 1, 2)
mpl.title('Seismogram 2')
seismogram2, = mpl.plot([], [], '-k')
mpl.xlim(0, duration)
mpl.ylim(-0.1, 0.1)
mpl.ylabel('Amplitude')
ax = mpl.subplot(3, 1, 3)
mpl.title('time: 0.0 s')
wavefield = mpl.imshow(background, extent=area, cmap=mpl.cm.gray_r,
vmin=-0.005, vmax=0.005)
mpl.points(stations, '^b', size=8)
mpl.text(750000, 20000, 'Crust')
mpl.text(740000, 100000, 'Mantle')
fig.text(0.82, 0.33, 'Seismometer 2')
fig.text(0.16, 0.33, 'Seismometer 1')
mpl.ylim(area[2:][::-1])
mpl.xlabel('x (km)')
mpl.ylabel('z (km)')
mpl.m2km()
times = np.linspace(0, dt * maxit, maxit)
# This function updates the plot every few timesteps
def animate(i):
t, u, seismogram = simulation.next()
mpl.title('time: %0.1f s' % (times[t]))
wavefield.set_array((background + u)[::-1])
seismogram1.set_data(times[:t + 1], seismogram[0][:t + 1])
seismogram2.set_data(times[:t + 1], seismogram[1][:t + 1])
return wavefield, seismogram1, seismogram2
anim = animation.FuncAnimation(
fig, animate, frames=maxit / snapshots, interval=1)
mpl.show()
| bsd-3-clause |
freeman-lab/dask | dask/dataframe/utils.py | 1 | 3562 | import pandas as pd
import numpy as np
from collections import Iterator
import toolz
def shard_df_on_index(df, divisions):
""" Shard a DataFrame by ranges on its index
Example
-------
>>> df = pd.DataFrame({'a': [0, 10, 20, 30, 40], 'b': [5, 4 ,3, 2, 1]})
>>> df
a b
0 0 5
1 10 4
2 20 3
3 30 2
4 40 1
>>> shards = list(shard_df_on_index(df, [2, 4]))
>>> shards[0]
a b
0 0 5
1 10 4
>>> shards[1]
a b
2 20 3
3 30 2
>>> shards[2]
a b
4 40 1
"""
if isinstance(divisions, Iterator):
divisions = list(divisions)
if not len(divisions):
yield df
else:
divisions = np.array(divisions)
df = df.sort_index()
indices = df.index.searchsorted(divisions)
yield df.iloc[:indices[0]]
for i in range(len(indices) - 1):
yield df.iloc[indices[i]: indices[i+1]]
yield df.iloc[indices[-1]:]
def unique(divisions):
""" Polymorphic unique function
>>> list(unique([1, 2, 3, 1, 2, 3]))
[1, 2, 3]
>>> unique(np.array([1, 2, 3, 1, 2, 3]))
array([1, 2, 3])
>>> unique(pd.Categorical(['Alice', 'Bob', 'Alice'], ordered=False))
[Alice, Bob]
Categories (2, object): [Alice, Bob]
"""
if isinstance(divisions, np.ndarray):
return np.unique(divisions)
if isinstance(divisions, pd.Categorical):
return pd.Categorical.from_codes(np.unique(divisions.codes),
divisions.categories, divisions.ordered)
if isinstance(divisions, (tuple, list, Iterator)):
return tuple(toolz.unique(divisions))
raise NotImplementedError()
def _categorize(categories, df):
""" Categorize columns in dataframe
>>> df = pd.DataFrame({'x': [1, 2, 3], 'y': [0, 2, 0]})
>>> categories = {'y': ['A', 'B', 'c']}
>>> _categorize(categories, df)
x y
0 1 A
1 2 c
2 3 A
"""
if isinstance(df, pd.Series):
if df.name in categories:
cat = pd.Categorical.from_codes(df.values, categories[df.name])
return pd.Series(cat, index=df.index)
else:
return df
else:
return pd.DataFrame(
dict((col, pd.Categorical.from_codes(df[col], categories[col])
if col in categories
else df[col])
for col in df.columns),
columns=df.columns,
index=df.index)
def strip_categories(df):
""" Strip categories from dataframe
>>> df = pd.DataFrame({'x': [1, 2, 3], 'y': ['A', 'B', 'A']})
>>> df['y'] = df.y.astype('category')
>>> strip_categories(df)
x y
0 1 0
1 2 1
2 3 0
"""
return pd.DataFrame(dict((col, df[col].cat.codes
if iscategorical(df.dtypes[col])
else df[col])
for col in df.columns),
columns=df.columns,
index=df.index)
def iscategorical(dt):
return isinstance(dt, pd.core.common.CategoricalDtype)
def get_categories(df):
"""
Get Categories of dataframe
>>> df = pd.DataFrame({'x': [1, 2, 3], 'y': ['A', 'B', 'A']})
>>> df['y'] = df.y.astype('category')
>>> get_categories(df)
{'y': Index([u'A', u'B'], dtype='object')}
"""
return dict((col, df[col].cat.categories) for col in df.columns
if iscategorical(df.dtypes[col]))
| bsd-3-clause |
JsNoNo/scikit-learn | sklearn/metrics/pairwise.py | 49 | 44088 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck <larsmans@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal, or the equivalent
check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
X_norm_squared : array-like, shape = [n_samples_1], optional
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is not None:
YY = np.atleast_2d(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bsd-3-clause |
loli/sklearn-ensembletrees | examples/manifold/plot_manifold_sphere.py | 1 | 4619 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=============================================
Manifold Learning methods on a severed sphere
=============================================
An application of the different :ref:`manifold` techniques
on a spherical data-set. Here one can see the use of
dimensionality reduction in order to gain some intuition
regarding the Manifold learning methods. Regarding the dataset,
the poles are cut from the sphere, as well as a thin slice down its
side. This enables the manifold learning techniques to
'spread it open' whilst projecting it onto two dimensions.
For a similar example, where the methods are applied to the
S-curve dataset, see :ref:`example_manifold_plot_compare_methods.py`
Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is
to find a low-dimensional representation of the data (here 2D) in
which the distances respect well the distances in the original
high-dimensional space, unlike other manifold-learning algorithms,
it does not seeks an isotropic representation of the data in
the low-dimensional space. Here the manifold problem matches fairly
that of representing a flat map of the Earth, as with
`map projection <http://en.wikipedia.org/wiki/Map_projection>`_
"""
# Author: Jaques Grobler <jaques.grobler@inria.fr>
# License: BSD 3 clause
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold
from sklearn.utils import check_random_state
# Next line to silence pyflakes.
Axes3D
# Variables for manifold learning.
n_neighbors = 10
n_samples = 1000
# Create our sphere.
random_state = check_random_state(0)
p = random_state.rand(n_samples) * (2 * np.pi - 0.55)
t = random_state.rand(n_samples) * np.pi
# Sever the poles from the sphere.
indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8))))
colors = p[indices]
x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \
np.sin(t[indices]) * np.sin(p[indices]), \
np.cos(t[indices])
# Plot our dataset.
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(241, projection='3d')
ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow)
try:
# compatibility matplotlib < 1.0
ax.view_init(40, -10)
except:
pass
sphere_data = np.array([x, y, z]).T
# Perform Locally Linear Embedding Manifold learning
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
trans_data = manifold\
.LocallyLinearEmbedding(n_neighbors, 2,
method=method).fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(242 + i)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Isomap Manifold learning.
t0 = time()
trans_data = manifold.Isomap(n_neighbors, n_components=2)\
.fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % ('ISO', t1 - t0))
ax = fig.add_subplot(246)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % ('Isomap', t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Multi-dimensional scaling.
t0 = time()
mds = manifold.MDS(2, max_iter=100, n_init=1)
trans_data = mds.fit_transform(sphere_data).T
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(247)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Spectral Embedding.
t0 = time()
se = manifold.SpectralEmbedding(n_components=2,
n_neighbors=n_neighbors)
trans_data = se.fit_transform(sphere_data).T
t1 = time()
print("Spectral Embedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(248)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
calico/basenji | bin/basenji_data.py | 1 | 31215 | #!/usr/bin/env python
# Copyright 2017 Calico LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from __future__ import print_function
from optparse import OptionParser
import collections
import gzip
import heapq
import json
import math
import pdb
import os
import random
import shutil
import subprocess
import sys
import tempfile
import time
import h5py
import numpy as np
import pandas as pd
from basenji import genome
from basenji import util
try:
import slurm
except ModuleNotFoundError:
pass
'''
basenji_data.py
Compute model sequences from the genome, extracting DNA coverage values.
'''
################################################################################
def main():
usage = 'usage: %prog [options] <fasta_file> <targets_file>'
parser = OptionParser(usage)
parser.add_option('-b', dest='blacklist_bed',
help='Set blacklist nucleotides to a baseline value.')
parser.add_option('--break', dest='break_t',
default=786432, type='int',
help='Break in half contigs above length [Default: %default]')
parser.add_option('-c','--crop', dest='crop_bp',
default=0, type='int',
help='Crop bp off each end [Default: %default]')
parser.add_option('-d', dest='sample_pct',
default=1.0, type='float',
help='Down-sample the segments')
parser.add_option('-f', dest='folds',
default=None, type='int',
help='Generate cross fold split [Default: %default]')
parser.add_option('-g', dest='gaps_file',
help='Genome assembly gaps BED [Default: %default]')
parser.add_option('-i', dest='interp_nan',
default=False, action='store_true',
help='Interpolate NaNs [Default: %default]')
parser.add_option('-l', dest='seq_length',
default=131072, type='int',
help='Sequence length [Default: %default]')
parser.add_option('--limit', dest='limit_bed',
help='Limit to segments that overlap regions in a BED file')
parser.add_option('--local', dest='run_local',
default=False, action='store_true',
help='Run jobs locally as opposed to on SLURM [Default: %default]')
parser.add_option('-o', dest='out_dir',
default='data_out',
help='Output directory [Default: %default]')
parser.add_option('-p', dest='processes',
default=None, type='int',
help='Number parallel processes [Default: %default]')
parser.add_option('--peaks', dest='peaks_only',
default=False, action='store_true',
help='Create contigs only from peaks [Default: %default]')
parser.add_option('-r', dest='seqs_per_tfr',
default=256, type='int',
help='Sequences per TFRecord file [Default: %default]')
parser.add_option('--restart', dest='restart',
default=False, action='store_true',
help='Continue progress from midpoint. [Default: %default]')
parser.add_option('--seed', dest='seed',
default=44, type='int',
help='Random seed [Default: %default]')
parser.add_option('--snap', dest='snap',
default=1, type='int',
help='Snap sequences to multiple of the given value [Default: %default]')
parser.add_option('--st', '--split_test', dest='split_test',
default=False, action='store_true',
help='Exit after split. [Default: %default]')
parser.add_option('--stride', '--stride_train', dest='stride_train',
default=1., type='float',
help='Stride to advance train sequences [Default: seq_length]')
parser.add_option('--stride_test', dest='stride_test',
default=1., type='float',
help='Stride to advance valid and test sequences [Default: seq_length]')
parser.add_option('-t', dest='test_pct_or_chr',
default=0.05, type='str',
help='Proportion of the data for testing [Default: %default]')
parser.add_option('-u', dest='umap_bed',
help='Unmappable regions in BED format')
parser.add_option('--umap_t', dest='umap_t',
default=0.5, type='float',
help='Remove sequences with more than this unmappable bin % [Default: %default]')
parser.add_option('--umap_clip', dest='umap_clip',
default=1, type='float',
help='Clip values at unmappable positions to distribution quantiles, eg 0.25. [Default: %default]')
parser.add_option('--umap_tfr', dest='umap_tfr',
default=False, action='store_true',
help='Save umap array into TFRecords [Default: %default]')
parser.add_option('-w', dest='pool_width',
default=128, type='int',
help='Sum pool width [Default: %default]')
parser.add_option('-v', dest='valid_pct_or_chr',
default=0.05, type='str',
help='Proportion of the data for validation [Default: %default]')
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error('Must provide FASTA and sample coverage labels and paths.')
else:
fasta_file = args[0]
targets_file = args[1]
random.seed(options.seed)
np.random.seed(options.seed)
if options.break_t is not None and options.break_t < options.seq_length:
print('Maximum contig length --break cannot be less than sequence length.', file=sys.stderr)
exit(1)
# transform proportion strides to base pairs
if options.stride_train <= 1:
print('stride_train %.f'%options.stride_train, end='')
options.stride_train = options.stride_train*options.seq_length
print(' converted to %f' % options.stride_train)
options.stride_train = int(np.round(options.stride_train))
if options.stride_test <= 1:
if options.folds is None:
print('stride_test %.f'%options.stride_test, end='')
options.stride_test = options.stride_test*options.seq_length
print(' converted to %f' % options.stride_test)
options.stride_test = int(np.round(options.stride_test))
# check snap
if options.snap is not None:
if np.mod(options.seq_length, options.snap) != 0:
raise ValueError('seq_length must be a multiple of snap')
if np.mod(options.stride_train, options.snap) != 0:
raise ValueError('stride_train must be a multiple of snap')
if np.mod(options.stride_test, options.snap) != 0:
raise ValueError('stride_test must be a multiple of snap')
# setup output directory
if os.path.isdir(options.out_dir) and not options.restart:
print('Remove output directory %s or use --restart option.' % options.out_dir)
exit(1)
elif not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
# read target datasets
targets_df = pd.read_csv(targets_file, index_col=0, sep='\t')
################################################################
# define genomic contigs
################################################################
if not options.restart:
chrom_contigs = genome.load_chromosomes(fasta_file)
# remove gaps
if options.gaps_file:
chrom_contigs = genome.split_contigs(chrom_contigs,
options.gaps_file)
# ditch the chromosomes for contigs
contigs = []
for chrom in chrom_contigs:
contigs += [Contig(chrom, ctg_start, ctg_end)
for ctg_start, ctg_end in chrom_contigs[chrom]]
# limit to a BED file
if options.limit_bed is not None:
contigs = limit_contigs(contigs, options.limit_bed)
# limit to peaks
if options.peaks_only:
peaks_bed = curate_peaks(targets_df, options.out_dir, options.pool_width, options.crop_bp)
contigs = limit_contigs(contigs, peaks_bed)
# filter for large enough
contigs = [ctg for ctg in contigs if ctg.end - ctg.start >= options.seq_length]
# break up large contigs
if options.break_t is not None:
contigs = break_large_contigs(contigs, options.break_t)
# print contigs to BED file
# ctg_bed_file = '%s/contigs.bed' % options.out_dir
# write_seqs_bed(ctg_bed_file, contigs)
################################################################
# divide between train/valid/test
################################################################
# label folds
if options.folds is not None:
fold_labels = ['fold%d' % fi for fi in range(options.folds)]
num_folds = options.folds
else:
fold_labels = ['train', 'valid', 'test']
num_folds = 3
if not options.restart:
if options.folds is not None:
# divide by fold pct
fold_contigs = divide_contigs_folds(contigs, options.folds)
else:
try:
# convert to float pct
valid_pct = float(options.valid_pct_or_chr)
test_pct = float(options.test_pct_or_chr)
assert(0 <= valid_pct <= 1)
assert(0 <= test_pct <= 1)
# divide by pct
fold_contigs = divide_contigs_pct(contigs, test_pct, valid_pct)
except (ValueError, AssertionError):
# divide by chr
valid_chrs = options.valid_pct_or_chr.split(',')
test_chrs = options.test_pct_or_chr.split(',')
fold_contigs = divide_contigs_chr(contigs, test_chrs, valid_chrs)
# rejoin broken contigs within set
for fi in range(len(fold_contigs)):
fold_contigs[fi] = rejoin_large_contigs(fold_contigs[fi])
# write labeled contigs to BED file
ctg_bed_file = '%s/contigs.bed' % options.out_dir
ctg_bed_out = open(ctg_bed_file, 'w')
for fi in range(len(fold_contigs)):
for ctg in fold_contigs[fi]:
line = '%s\t%d\t%d\t%s' % (ctg.chr, ctg.start, ctg.end, fold_labels[fi])
print(line, file=ctg_bed_out)
ctg_bed_out.close()
if options.split_test:
exit()
################################################################
# define model sequences
################################################################
if not options.restart:
fold_mseqs = []
for fi in range(num_folds):
if fold_labels[fi] in ['valid','test']:
stride_fold = options.stride_test
else:
stride_fold = options.stride_train
# stride sequences across contig
fold_mseqs_fi = contig_sequences(fold_contigs[fi], options.seq_length,
stride_fold, options.snap, fold_labels[fi])
fold_mseqs.append(fold_mseqs_fi)
# shuffle
random.shuffle(fold_mseqs[fi])
# down-sample
if options.sample_pct < 1.0:
fold_mseqs[fi] = random.sample(fold_mseqs[fi], int(options.sample_pct*len(fold_mseqs[fi])))
# merge into one list
mseqs = [ms for fm in fold_mseqs for ms in fm]
################################################################
# mappability
################################################################
if not options.restart:
if options.umap_bed is not None:
if shutil.which('bedtools') is None:
print('Install Bedtools to annotate unmappable sites', file=sys.stderr)
exit(1)
# annotate unmappable positions
mseqs_unmap = annotate_unmap(mseqs, options.umap_bed, options.seq_length,
options.pool_width, options.crop_bp)
# filter unmappable
mseqs_map_mask = (mseqs_unmap.mean(axis=1, dtype='float64') < options.umap_t)
mseqs = [mseqs[i] for i in range(len(mseqs)) if mseqs_map_mask[i]]
mseqs_unmap = mseqs_unmap[mseqs_map_mask,:]
# write to file
unmap_npy = '%s/mseqs_unmap.npy' % options.out_dir
np.save(unmap_npy, mseqs_unmap)
# write sequences to BED
seqs_bed_file = '%s/sequences.bed' % options.out_dir
write_seqs_bed(seqs_bed_file, mseqs, True)
else:
# read from directory
seqs_bed_file = '%s/sequences.bed' % options.out_dir
unmap_npy = '%s/mseqs_unmap.npy' % options.out_dir
mseqs = []
fold_mseqs = []
for fi in range(num_folds):
fold_mseqs.append([])
for line in open(seqs_bed_file):
a = line.split()
msg = ModelSeq(a[0], int(a[1]), int(a[2]), a[3])
mseqs.append(msg)
if a[3] == 'train':
fi = 0
elif a[3] == 'valid':
fi = 1
elif a[3] == 'test':
fi = 2
else:
fi = int(a[3].replace('fold',''))
fold_mseqs[fi].append(msg)
################################################################
# read sequence coverage values
################################################################
seqs_cov_dir = '%s/seqs_cov' % options.out_dir
if not os.path.isdir(seqs_cov_dir):
os.mkdir(seqs_cov_dir)
read_jobs = []
for ti in range(targets_df.shape[0]):
genome_cov_file = targets_df['file'].iloc[ti]
seqs_cov_stem = '%s/%d' % (seqs_cov_dir, ti)
seqs_cov_file = '%s.h5' % seqs_cov_stem
clip_ti = None
if 'clip' in targets_df.columns:
clip_ti = targets_df['clip'].iloc[ti]
clipsoft_ti = None
if 'clip_soft' in targets_df.columns:
clipsoft_ti = targets_df['clip_soft'].iloc[ti]
scale_ti = 1
if 'scale' in targets_df.columns:
scale_ti = targets_df['scale'].iloc[ti]
if options.restart and os.path.isfile(seqs_cov_file):
print('Skipping existing %s' % seqs_cov_file, file=sys.stderr)
else:
cmd = 'basenji_data_read.py'
cmd += ' --crop %d' % options.crop_bp
cmd += ' -w %d' % options.pool_width
cmd += ' -u %s' % targets_df['sum_stat'].iloc[ti]
if clip_ti is not None:
cmd += ' -c %f' % clip_ti
if clipsoft_ti is not None:
cmd += ' --clip_soft %f' % clipsoft_ti
cmd += ' -s %f' % scale_ti
if options.blacklist_bed:
cmd += ' -b %s' % options.blacklist_bed
if options.interp_nan:
cmd += ' -i'
cmd += ' %s' % genome_cov_file
cmd += ' %s' % seqs_bed_file
cmd += ' %s' % seqs_cov_file
if options.run_local:
# breaks on some OS
# cmd += ' &> %s.err' % seqs_cov_stem
read_jobs.append(cmd)
else:
j = slurm.Job(cmd,
name='read_t%d' % ti,
out_file='%s.out' % seqs_cov_stem,
err_file='%s.err' % seqs_cov_stem,
queue='standard', mem=15000, time='12:0:0')
read_jobs.append(j)
if options.run_local:
util.exec_par(read_jobs, options.processes, verbose=True)
else:
slurm.multi_run(read_jobs, options.processes, verbose=True,
launch_sleep=1, update_sleep=5)
################################################################
# write TF Records
################################################################
# copy targets file
shutil.copy(targets_file, '%s/targets.txt' % options.out_dir)
# initialize TF Records dir
tfr_dir = '%s/tfrecords' % options.out_dir
if not os.path.isdir(tfr_dir):
os.mkdir(tfr_dir)
write_jobs = []
for fold_set in fold_labels:
fold_set_indexes = [i for i in range(len(mseqs)) if mseqs[i].label == fold_set]
fold_set_start = fold_set_indexes[0]
fold_set_end = fold_set_indexes[-1] + 1
tfr_i = 0
tfr_start = fold_set_start
tfr_end = min(tfr_start+options.seqs_per_tfr, fold_set_end)
while tfr_start <= fold_set_end:
tfr_stem = '%s/%s-%d' % (tfr_dir, fold_set, tfr_i)
cmd = 'basenji_data_write.py'
cmd += ' -s %d' % tfr_start
cmd += ' -e %d' % tfr_end
cmd += ' --umap_clip %f' % options.umap_clip
if options.umap_tfr:
cmd += ' --umap_tfr'
if options.umap_bed is not None:
cmd += ' -u %s' % unmap_npy
cmd += ' %s' % fasta_file
cmd += ' %s' % seqs_bed_file
cmd += ' %s' % seqs_cov_dir
cmd += ' %s.tfr' % tfr_stem
if options.run_local:
# breaks on some OS
# cmd += ' &> %s.err' % tfr_stem
write_jobs.append(cmd)
else:
j = slurm.Job(cmd,
name='write_%s-%d' % (fold_set, tfr_i),
out_file='%s.out' % tfr_stem,
err_file='%s.err' % tfr_stem,
queue='standard', mem=15000, time='12:0:0')
write_jobs.append(j)
# update
tfr_i += 1
tfr_start += options.seqs_per_tfr
tfr_end = min(tfr_start+options.seqs_per_tfr, fold_set_end)
if options.run_local:
util.exec_par(write_jobs, options.processes, verbose=True)
else:
slurm.multi_run(write_jobs, options.processes, verbose=True,
launch_sleep=1, update_sleep=5)
################################################################
# stats
################################################################
stats_dict = {}
stats_dict['num_targets'] = targets_df.shape[0]
stats_dict['seq_length'] = options.seq_length
stats_dict['pool_width'] = options.pool_width
stats_dict['crop_bp'] = options.crop_bp
target_length = options.seq_length - 2*options.crop_bp
target_length = target_length // options.pool_width
stats_dict['target_length'] = target_length
for fi in range(num_folds):
stats_dict['%s_seqs' % fold_labels[fi]] = len(fold_mseqs[fi])
with open('%s/statistics.json' % options.out_dir, 'w') as stats_json_out:
json.dump(stats_dict, stats_json_out, indent=4)
################################################################################
def annotate_unmap(mseqs, unmap_bed, seq_length, pool_width, crop_bp):
""" Intersect the sequence segments with unmappable regions
and annoate the segments as NaN to possible be ignored.
Args:
mseqs: list of ModelSeq's
unmap_bed: unmappable regions BED file
seq_length: sequence length
pool_width: pooled bin width
crop_bp: nucleotides cropped off ends
Returns:
seqs_unmap: NxL binary NA indicators
"""
# print sequence segments to file
seqs_temp = tempfile.NamedTemporaryFile()
seqs_bed_file = seqs_temp.name
write_seqs_bed(seqs_bed_file, mseqs)
# hash segments to indexes
chr_start_indexes = {}
for i in range(len(mseqs)):
chr_start_indexes[(mseqs[i].chr, mseqs[i].start)] = i
# initialize unmappable array
pool_seq_length = seq_length // pool_width
seqs_unmap = np.zeros((len(mseqs), pool_seq_length), dtype='bool')
# intersect with unmappable regions
p = subprocess.Popen(
'bedtools intersect -wo -a %s -b %s' % (seqs_bed_file, unmap_bed),
shell=True, stdout=subprocess.PIPE)
for line in p.stdout:
line = line.decode('utf-8')
a = line.split()
seq_chrom = a[0]
seq_start = int(a[1])
seq_end = int(a[2])
seq_key = (seq_chrom, seq_start)
unmap_start = int(a[4])
unmap_end = int(a[5])
overlap_start = max(seq_start, unmap_start)
overlap_end = min(seq_end, unmap_end)
pool_seq_unmap_start = math.floor((overlap_start - seq_start) / pool_width)
pool_seq_unmap_end = math.ceil((overlap_end - seq_start) / pool_width)
# skip minor overlaps to the first
first_start = seq_start + pool_seq_unmap_start * pool_width
first_end = first_start + pool_width
first_overlap = first_end - overlap_start
if first_overlap < 0.1 * pool_width:
pool_seq_unmap_start += 1
# skip minor overlaps to the last
last_start = seq_start + (pool_seq_unmap_end - 1) * pool_width
last_overlap = overlap_end - last_start
if last_overlap < 0.1 * pool_width:
pool_seq_unmap_end -= 1
seqs_unmap[chr_start_indexes[seq_key], pool_seq_unmap_start:pool_seq_unmap_end] = True
assert(seqs_unmap[chr_start_indexes[seq_key], pool_seq_unmap_start:pool_seq_unmap_end].sum() == pool_seq_unmap_end-pool_seq_unmap_start)
# crop
if crop_bp > 0:
pool_crop = crop_bp // pool_width
seqs_unmap = seqs_unmap[:, pool_crop:-pool_crop]
return seqs_unmap
################################################################################
def break_large_contigs(contigs, break_t, verbose=False):
"""Break large contigs in half until all contigs are under
the size threshold."""
# initialize a heapq of contigs and lengths
contig_heapq = []
for ctg in contigs:
ctg_len = ctg.end - ctg.start
heapq.heappush(contig_heapq, (-ctg_len, ctg))
ctg_len = break_t + 1
while ctg_len > break_t:
# pop largest contig
ctg_nlen, ctg = heapq.heappop(contig_heapq)
ctg_len = -ctg_nlen
# if too large
if ctg_len > break_t:
if verbose:
print('Breaking %s:%d-%d (%d nt)' % (ctg.chr,ctg.start,ctg.end,ctg_len))
# break in two
ctg_mid = ctg.start + ctg_len//2
try:
ctg_left = Contig(ctg.genome, ctg.chr, ctg.start, ctg_mid)
ctg_right = Contig(ctg.genome, ctg.chr, ctg_mid, ctg.end)
except AttributeError:
ctg_left = Contig(ctg.chr, ctg.start, ctg_mid)
ctg_right = Contig(ctg.chr, ctg_mid, ctg.end)
# add left
ctg_left_len = ctg_left.end - ctg_left.start
heapq.heappush(contig_heapq, (-ctg_left_len, ctg_left))
# add right
ctg_right_len = ctg_right.end - ctg_right.start
heapq.heappush(contig_heapq, (-ctg_right_len, ctg_right))
# return to list
contigs = [len_ctg[1] for len_ctg in contig_heapq]
return contigs
################################################################################
def contig_sequences(contigs, seq_length, stride, snap=1, label=None):
''' Break up a list of Contig's into a list of ModelSeq's. '''
mseqs = []
for ctg in contigs:
seq_start = int(np.ceil(ctg.start/snap)*snap)
seq_end = seq_start + seq_length
while seq_end <= ctg.end:
# record sequence
mseqs.append(ModelSeq(ctg.chr, seq_start, seq_end, label))
# update
seq_start += stride
seq_end += stride
return mseqs
################################################################################
def curate_peaks(targets_df, out_dir, pool_width, crop_bp):
"""Merge all peaks, round to nearest pool_width, and add cropped bp."""
# concatenate and extend peaks
cat_bed_file = '%s/peaks_cat.bed' % out_dir
cat_bed_out = open(cat_bed_file, 'w')
for bed_file in targets_df.file:
if bed_file[-3:] == '.gz':
bed_in = gzip.open(bed_file, 'rt')
else:
bed_in = open(bed_file, 'r')
for line in bed_in:
a = line.rstrip().split('\t')
chrm = a[0]
start = int(a[1])
end = int(a[2])
# extend to pool width
length = end - start
if length < pool_width:
mid = (start + end) // 2
start = mid - pool_width//2
end = start + pool_width
# add cropped bp
start = max(0, start-crop_bp)
end += crop_bp
# print
print('%s\t%d\t%d' % (chrm,start,end), file=cat_bed_out)
bed_in.close()
cat_bed_out.close()
# merge
merge_bed_file = '%s/peaks_merge.bed' % out_dir
bedtools_cmd = 'bedtools sort -i %s' % cat_bed_file
bedtools_cmd += ' | bedtools merge -i - > %s' % merge_bed_file
subprocess.call(bedtools_cmd, shell=True)
# round and add crop_bp
full_bed_file = '%s/peaks_full.bed' % out_dir
full_bed_out = open(full_bed_file, 'w')
for line in open(merge_bed_file):
a = line.rstrip().split('\t')
chrm = a[0]
start = int(a[1])
end = int(a[2])
mid = (start + end) // 2
length = end - start
# round length to nearest pool_width
bins = int(np.round(length/pool_width))
assert(bins > 0)
start = mid - (bins*pool_width)//2
start = max(0, start)
end = start + (bins*pool_width)
# add cropped bp
# start = max(0, start-crop_bp)
# end += crop_bp
# write
print('%s\t%d\t%d' % (chrm,start,end), file=full_bed_out)
full_bed_out.close()
return full_bed_file
################################################################################
def divide_contigs_chr(contigs, test_chrs, valid_chrs):
"""Divide list of contigs into train/valid/test lists
by chromosome."""
# initialize current train/valid/test nucleotides
train_nt = 0
valid_nt = 0
test_nt = 0
# initialize train/valid/test contig lists
train_contigs = []
valid_contigs = []
test_contigs = []
# process contigs
for ctg in contigs:
ctg_len = ctg.end - ctg.start
if ctg.chr in test_chrs:
test_contigs.append(ctg)
test_nt += ctg_len
elif ctg.chr in valid_chrs:
valid_contigs.append(ctg)
valid_nt += ctg_len
else:
train_contigs.append(ctg)
train_nt += ctg_len
total_nt = train_nt + valid_nt + test_nt
print('Contigs divided into')
print(' Train: %5d contigs, %10d nt (%.4f)' % \
(len(train_contigs), train_nt, train_nt/total_nt))
print(' Valid: %5d contigs, %10d nt (%.4f)' % \
(len(valid_contigs), valid_nt, valid_nt/total_nt))
print(' Test: %5d contigs, %10d nt (%.4f)' % \
(len(test_contigs), test_nt, test_nt/total_nt))
return [train_contigs, valid_contigs, test_contigs]
################################################################################
def divide_contigs_folds(contigs, folds):
"""Divide list of contigs into cross fold lists."""
# sort contigs descending by length
length_contigs = [(ctg.end-ctg.start,ctg) for ctg in contigs]
length_contigs.sort(reverse=True)
# compute total nucleotides
total_nt = sum([lc[0] for lc in length_contigs])
# compute aimed fold nucleotides
fold_nt_aim = int(np.ceil(total_nt / folds))
# initialize current fold nucleotides
fold_nt = np.zeros(folds)
# initialize fold contig lists
fold_contigs = []
for fi in range(folds):
fold_contigs.append([])
# process contigs
for ctg_len, ctg in length_contigs:
# compute gap between current and aim
fold_nt_gap = fold_nt_aim - fold_nt
fold_nt_gap = np.clip(fold_nt_gap, 0, np.inf)
# compute sample probability
fold_prob = fold_nt_gap / fold_nt_gap.sum()
# sample train/valid/test
fi = np.random.choice(folds, p=fold_prob)
fold_contigs[fi].append(ctg)
fold_nt[fi] += ctg_len
print('Contigs divided into')
for fi in range(folds):
print(' Fold%d: %5d contigs, %10d nt (%.4f)' % \
(fi, len(fold_contigs[fi]), fold_nt[fi], fold_nt[fi]/total_nt))
return fold_contigs
################################################################################
def divide_contigs_pct(contigs, test_pct, valid_pct, pct_abstain=0.2):
"""Divide list of contigs into train/valid/test lists,
aiming for the specified nucleotide percentages."""
# sort contigs descending by length
length_contigs = [(ctg.end-ctg.start,ctg) for ctg in contigs]
length_contigs.sort(reverse=True)
# compute total nucleotides
total_nt = sum([lc[0] for lc in length_contigs])
# compute aimed train/valid/test nucleotides
test_nt_aim = test_pct * total_nt
valid_nt_aim = valid_pct * total_nt
train_nt_aim = total_nt - valid_nt_aim - test_nt_aim
# initialize current train/valid/test nucleotides
train_nt = 0
valid_nt = 0
test_nt = 0
# initialize train/valid/test contig lists
train_contigs = []
valid_contigs = []
test_contigs = []
# process contigs
for ctg_len, ctg in length_contigs:
# compute gap between current and aim
test_nt_gap = max(0, test_nt_aim - test_nt)
valid_nt_gap = max(0, valid_nt_aim - valid_nt)
train_nt_gap = max(1, train_nt_aim - train_nt)
# skip if too large
if ctg_len > pct_abstain*test_nt_gap:
test_nt_gap = 0
if ctg_len > pct_abstain*valid_nt_gap:
valid_nt_gap = 0
# compute remaining %
gap_sum = train_nt_gap + valid_nt_gap + test_nt_gap
test_pct_gap = test_nt_gap / gap_sum
valid_pct_gap = valid_nt_gap / gap_sum
train_pct_gap = train_nt_gap / gap_sum
# sample train/valid/test
ri = np.random.choice(range(3), 1, p=[train_pct_gap, valid_pct_gap, test_pct_gap])[0]
if ri == 0:
train_contigs.append(ctg)
train_nt += ctg_len
elif ri == 1:
valid_contigs.append(ctg)
valid_nt += ctg_len
elif ri == 2:
test_contigs.append(ctg)
test_nt += ctg_len
else:
print('TVT random number beyond 0,1,2', file=sys.stderr)
exit(1)
print('Contigs divided into')
print(' Train: %5d contigs, %10d nt (%.4f)' % \
(len(train_contigs), train_nt, train_nt/total_nt))
print(' Valid: %5d contigs, %10d nt (%.4f)' % \
(len(valid_contigs), valid_nt, valid_nt/total_nt))
print(' Test: %5d contigs, %10d nt (%.4f)' % \
(len(test_contigs), test_nt, test_nt/total_nt))
return [train_contigs, valid_contigs, test_contigs]
################################################################################
def limit_contigs(contigs, filter_bed):
""" Limit to contigs overlapping the given BED.
Args
contigs: list of Contigs
filter_bed: BED file to filter by
Returns:
fcontigs: list of Contigs
"""
# print ctgments to BED
ctg_fd, ctg_bed_file = tempfile.mkstemp()
ctg_bed_out = open(ctg_bed_file, 'w')
for ctg in contigs:
print('%s\t%d\t%d' % (ctg.chr, ctg.start, ctg.end), file=ctg_bed_out)
ctg_bed_out.close()
# intersect w/ filter_bed
fcontigs = []
p = subprocess.Popen(
'bedtools intersect -a %s -b %s' % (ctg_bed_file, filter_bed),
shell=True,
stdout=subprocess.PIPE)
for line in p.stdout:
a = line.decode('utf-8').split()
chrom = a[0]
ctg_start = int(a[1])
ctg_end = int(a[2])
fcontigs.append(Contig(chrom, ctg_start, ctg_end))
p.communicate()
os.close(ctg_fd)
os.remove(ctg_bed_file)
return fcontigs
################################################################################
def rejoin_large_contigs(contigs):
""" Rejoin large contigs that were broken up before alignment comparison."""
# split list by chromosome
chr_contigs = {}
for ctg in contigs:
chr_contigs.setdefault(ctg.chr,[]).append(ctg)
contigs = []
for chrm in chr_contigs:
# sort within chromosome
chr_contigs[chrm].sort(key=lambda x: x.start)
ctg_ongoing = chr_contigs[chrm][0]
for i in range(1, len(chr_contigs[chrm])):
ctg_this = chr_contigs[chrm][i]
if ctg_ongoing.end == ctg_this.start:
# join
# ctg_ongoing.end = ctg_this.end
ctg_ongoing = ctg_ongoing._replace(end=ctg_this.end)
else:
# conclude ongoing
contigs.append(ctg_ongoing)
# move to next
ctg_ongoing = ctg_this
# conclude final
contigs.append(ctg_ongoing)
return contigs
################################################################################
def write_seqs_bed(bed_file, seqs, labels=False):
'''Write sequences to BED file.'''
bed_out = open(bed_file, 'w')
for i in range(len(seqs)):
line = '%s\t%d\t%d' % (seqs[i].chr, seqs[i].start, seqs[i].end)
if labels:
line += '\t%s' % seqs[i].label
print(line, file=bed_out)
bed_out.close()
################################################################################
Contig = collections.namedtuple('Contig', ['chr', 'start', 'end'])
ModelSeq = collections.namedtuple('ModelSeq', ['chr', 'start', 'end', 'label'])
################################################################################
if __name__ == '__main__':
main()
| apache-2.0 |
OshynSong/scikit-learn | examples/manifold/plot_manifold_sphere.py | 258 | 5101 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=============================================
Manifold Learning methods on a severed sphere
=============================================
An application of the different :ref:`manifold` techniques
on a spherical data-set. Here one can see the use of
dimensionality reduction in order to gain some intuition
regarding the manifold learning methods. Regarding the dataset,
the poles are cut from the sphere, as well as a thin slice down its
side. This enables the manifold learning techniques to
'spread it open' whilst projecting it onto two dimensions.
For a similar example, where the methods are applied to the
S-curve dataset, see :ref:`example_manifold_plot_compare_methods.py`
Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is
to find a low-dimensional representation of the data (here 2D) in
which the distances respect well the distances in the original
high-dimensional space, unlike other manifold-learning algorithms,
it does not seeks an isotropic representation of the data in
the low-dimensional space. Here the manifold problem matches fairly
that of representing a flat map of the Earth, as with
`map projection <http://en.wikipedia.org/wiki/Map_projection>`_
"""
# Author: Jaques Grobler <jaques.grobler@inria.fr>
# License: BSD 3 clause
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold
from sklearn.utils import check_random_state
# Next line to silence pyflakes.
Axes3D
# Variables for manifold learning.
n_neighbors = 10
n_samples = 1000
# Create our sphere.
random_state = check_random_state(0)
p = random_state.rand(n_samples) * (2 * np.pi - 0.55)
t = random_state.rand(n_samples) * np.pi
# Sever the poles from the sphere.
indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8))))
colors = p[indices]
x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \
np.sin(t[indices]) * np.sin(p[indices]), \
np.cos(t[indices])
# Plot our dataset.
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow)
try:
# compatibility matplotlib < 1.0
ax.view_init(40, -10)
except:
pass
sphere_data = np.array([x, y, z]).T
# Perform Locally Linear Embedding Manifold learning
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
trans_data = manifold\
.LocallyLinearEmbedding(n_neighbors, 2,
method=method).fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Isomap Manifold learning.
t0 = time()
trans_data = manifold.Isomap(n_neighbors, n_components=2)\
.fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % ('ISO', t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % ('Isomap', t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Multi-dimensional scaling.
t0 = time()
mds = manifold.MDS(2, max_iter=100, n_init=1)
trans_data = mds.fit_transform(sphere_data).T
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Spectral Embedding.
t0 = time()
se = manifold.SpectralEmbedding(n_components=2,
n_neighbors=n_neighbors)
trans_data = se.fit_transform(sphere_data).T
t1 = time()
print("Spectral Embedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform t-distributed stochastic neighbor embedding.
t0 = time()
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
trans_data = tsne.fit_transform(sphere_data).T
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
sarmstr5/kaggle_intel_mobleODT_cervix_classification | src/starting_with_keras.py | 1 | 5420 | from PIL import ImageFilter, ImageStat, Image, ImageDraw
from multiprocessing import Pool, cpu_count
from sklearn.preprocessing import LabelEncoder
import pandas as pd
import numpy as np
import glob
import cv2
import processing_images
from keras.wrappers.scikit_learn import KerasClassifier
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Flatten, Activation
from keras.layers.convolutional import Convolution2D, ZeroPadding2D, MaxPooling2D
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from keras import backend as K
# coding: utf-8
# Public Leader-board of 0.89094
# ====================================================
# derived code from
# https://www.kaggle.com/the1owl/intel-mobileodt-cervical-cancer-screening/artificial-intelligence-for-cc-screening/comments
# Save train and test images to normalized numpy arrays once for running multiple neural network configuration tests
def im_multi(path):
try:
im_stats_im_ = Image.open(path)
return [path, {'size': im_stats_im_.size}]
except:
print(path)
return [path, {'size': [0,0]}]
def im_stats(im_stats_df):
im_stats_d = {}
p = Pool(cpu_count()-1)
ret = p.map(im_multi, im_stats_df['path'])
for i in range(len(ret)):
im_stats_d[ret[i][0]] = ret[i][1]
im_stats_df['size'] = im_stats_df['path'].map(lambda x: ' '.join(str(s) for s in im_stats_d[x]['size']))
return im_stats_df
def get_im_cv2(path):
#img = cv2.imread(path)
img = processing_images.process_img(path, rgb=True)
#resized = cv2.resize(img, (32, 32), cv2.INTER_LINEAR) #use cv2.resize(img, (64, 64), cv2.INTER_LINEAR)
return [path, img]
def normalize_image_features(paths):
imf_d = {}
p = Pool(cpu_count())
ret = p.map(get_im_cv2, paths)
for i in range(len(ret)):
imf_d[ret[i][0]] = ret[i][1]
ret = []
fdata = [imf_d[f] for f in paths]
fdata = np.array(fdata, dtype=np.uint8)
fdata = fdata.transpose((0, 3, 1, 2))
fdata = fdata.astype('float32')
fdata = fdata / 255
return fdata
def process_data():
train = glob.glob('/data/kaggle/train/**/*.jpg') + glob.glob('/data/kaggle/additional/**/*.jpg')
train = pd.DataFrame([[p.split('/')[3],p.split('/')[4],p] for p in train], columns = ['type','image','path'])[::5] #limit for Kaggle Demo
train = im_stats(train)
train = train[train['size'] != '0 0'].reset_index(drop=True) #remove bad images
train_data = normalize_image_features(train['path'])
np.save('train.npy', train_data, allow_pickle=True, fix_imports=True)
le = LabelEncoder()
train_target = le.fit_transform(train['type'].values)
print(le.classes_) #in case not 1 to 3 order
np.save('train_target.npy', train_target, allow_pickle=True, fix_imports=True)
test = glob.glob('../input/test/*.jpg')
test = pd.DataFrame([[p.split('/')[3],p] for p in test], columns = ['image','path']) #[::20] #limit for Kaggle Demo
test_data = normalize_image_features(test['path'])
np.save('test.npy', test_data, allow_pickle=True, fix_imports=True)
test_id = test.image.values
np.save('test_id.npy', test_id, allow_pickle=True, fix_imports=True)
# Start your neural network high performance engines
def create_model(opt_='adamax'):
model = Sequential()
model.add(Convolution2D(4, 3, 3, activation='relu', dim_ordering='th', input_shape=(3, 32, 32))) #use input_shape=(3, 64, 64)
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), dim_ordering='th'))
model.add(Convolution2D(8, 3, 3, activation='relu', dim_ordering='th'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), dim_ordering='th'))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(12, activation='tanh'))
model.add(Dropout(0.1))
model.add(Dense(3, activation='softmax'))
model.compile(optimizer=opt_, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
return model
def main():
process_run = False
model_run = True
print(cpu_count())
np.random.seed(17)
if process_run:
process_data()
# K.set_image_dim_ordering('th')
if model_run:
# read in data
train_data = np.load('train.npy')
train_target = np.load('train_target.npy')
# split data
x_train,x_val_train,y_train,y_val_train = train_test_split(train_data,train_target,test_size=0.4, random_state=17)
# Image preprocessing, rotating images and performing random zooms
datagen = ImageDataGenerator(rotation_range=0.9, zoom_range=0.3)
datagen.fit(train_data)
# Create Image model
K.set_floatx('float32')
model = create_model()
model.fit_generator(datagen.flow(x_train,y_train, batch_size=15, shuffle=True), nb_epoch=200, samples_per_epoch=len(x_train), verbose=20, validation_data=(x_val_train, y_val_train))
# Load processed data
test_data = np.load('test.npy')
test_id = np.load('test_id.npy')
# run classification
pred = model.predict_proba(test_data)
df = pd.DataFrame(pred, columns=['Type_1','Type_2','Type_3'])
df['image_name'] = test_id
df.to_csv('~/kaggle_intel_mobileODT_cervix_classification/submission.csv', index=False)
if __name__=='__main__':
main()
| mit |
xdnian/pyml | code/optional-py-scripts/ch07.py | 4 | 19178 | # Sebastian Raschka, 2015 (http://sebastianraschka.com)
# Python Machine Learning - Code Examples
#
# Chapter 7 - Combining Different Models for Ensemble Learning
#
# S. Raschka. Python Machine Learning. Packt Publishing Ltd., 2015.
# GitHub Repo: https://github.com/rasbt/python-machine-learning-book
#
# License: MIT
# https://github.com/rasbt/python-machine-learning-book/blob/master/LICENSE.txt
import math
import numpy as np
import pandas as pd
import operator
from scipy.misc import comb
import matplotlib.pyplot as plt
from sklearn.base import BaseEstimator
from sklearn.base import ClassifierMixin
from sklearn.preprocessing import LabelEncoder
from sklearn.externals import six
from sklearn.base import clone
from sklearn.pipeline import _name_estimators
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import Pipeline
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from sklearn.metrics import accuracy_score
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import AdaBoostClassifier
from itertools import product
# Added version check for recent scikit-learn 0.18 checks
from distutils.version import LooseVersion as Version
from sklearn import __version__ as sklearn_version
if Version(sklearn_version) < '0.18':
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.cross_validation import GridSearchCV
else:
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
#############################################################################
print(50 * '=')
print('Section: Learning with ensembles')
print(50 * '-')
def ensemble_error(n_classifier, error):
k_start = math.ceil(n_classifier / 2.0)
probs = [comb(n_classifier, k) * error**k * (1 - error)**(n_classifier - k)
for k in range(k_start, n_classifier + 1)]
return sum(probs)
print('Ensemble error', ensemble_error(n_classifier=11, error=0.25))
error_range = np.arange(0.0, 1.01, 0.01)
ens_errors = [ensemble_error(n_classifier=11, error=error)
for error in error_range]
plt.plot(error_range,
ens_errors,
label='Ensemble error',
linewidth=2)
plt.plot(error_range,
error_range,
linestyle='--',
label='Base error',
linewidth=2)
plt.xlabel('Base error')
plt.ylabel('Base/Ensemble error')
plt.legend(loc='upper left')
plt.grid()
# plt.tight_layout()
# plt.savefig('./figures/ensemble_err.png', dpi=300)
plt.show()
#############################################################################
print(50 * '=')
print('Section: Implementing a simple majority vote classifier')
print(50 * '-')
np.argmax(np.bincount([0, 0, 1],
weights=[0.2, 0.2, 0.6]))
ex = np.array([[0.9, 0.1],
[0.8, 0.2],
[0.4, 0.6]])
p = np.average(ex,
axis=0,
weights=[0.2, 0.2, 0.6])
print('Averaged prediction', p)
print('np.argmax(p): ', np.argmax(p))
class MajorityVoteClassifier(BaseEstimator,
ClassifierMixin):
""" A majority vote ensemble classifier
Parameters
----------
classifiers : array-like, shape = [n_classifiers]
Different classifiers for the ensemble
vote : str, {'classlabel', 'probability'} (default='label')
If 'classlabel' the prediction is based on the argmax of
class labels. Else if 'probability', the argmax of
the sum of probabilities is used to predict the class label
(recommended for calibrated classifiers).
weights : array-like, shape = [n_classifiers], optional (default=None)
If a list of `int` or `float` values are provided, the classifiers
are weighted by importance; Uses uniform weights if `weights=None`.
"""
def __init__(self, classifiers, vote='classlabel', weights=None):
self.classifiers = classifiers
self.named_classifiers = {key: value for key, value
in _name_estimators(classifiers)}
self.vote = vote
self.weights = weights
def fit(self, X, y):
""" Fit classifiers.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Matrix of training samples.
y : array-like, shape = [n_samples]
Vector of target class labels.
Returns
-------
self : object
"""
if self.vote not in ('probability', 'classlabel'):
raise ValueError("vote must be 'probability' or 'classlabel'"
"; got (vote=%r)"
% self.vote)
if self.weights and len(self.weights) != len(self.classifiers):
raise ValueError('Number of classifiers and weights must be equal'
'; got %d weights, %d classifiers'
% (len(self.weights), len(self.classifiers)))
# Use LabelEncoder to ensure class labels start with 0, which
# is important for np.argmax call in self.predict
self.lablenc_ = LabelEncoder()
self.lablenc_.fit(y)
self.classes_ = self.lablenc_.classes_
self.classifiers_ = []
for clf in self.classifiers:
fitted_clf = clone(clf).fit(X, self.lablenc_.transform(y))
self.classifiers_.append(fitted_clf)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Matrix of training samples.
Returns
----------
maj_vote : array-like, shape = [n_samples]
Predicted class labels.
"""
if self.vote == 'probability':
maj_vote = np.argmax(self.predict_proba(X), axis=1)
else: # 'classlabel' vote
# Collect results from clf.predict calls
predictions = np.asarray([clf.predict(X)
for clf in self.classifiers_]).T
maj_vote = np.apply_along_axis(
lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predictions)
maj_vote = self.lablenc_.inverse_transform(maj_vote)
return maj_vote
def predict_proba(self, X):
""" Predict class probabilities for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg_proba : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
probas = np.asarray([clf.predict_proba(X)
for clf in self.classifiers_])
avg_proba = np.average(probas, axis=0, weights=self.weights)
return avg_proba
def get_params(self, deep=True):
""" Get classifier parameter names for GridSearch"""
if not deep:
return super(MajorityVoteClassifier, self).get_params(deep=False)
else:
out = self.named_classifiers.copy()
for name, step in six.iteritems(self.named_classifiers):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
#############################################################################
print(50 * '=')
print('Section: Combining different algorithms for'
' classification with majority vote')
print(50 * '-')
iris = datasets.load_iris()
X, y = iris.data[50:, [1, 2]], iris.target[50:]
le = LabelEncoder()
y = le.fit_transform(y)
X_train, X_test, y_train, y_test =\
train_test_split(X, y,
test_size=0.5,
random_state=1)
clf1 = LogisticRegression(penalty='l2',
C=0.001,
random_state=0)
clf2 = DecisionTreeClassifier(max_depth=1,
criterion='entropy',
random_state=0)
clf3 = KNeighborsClassifier(n_neighbors=1,
p=2,
metric='minkowski')
pipe1 = Pipeline([['sc', StandardScaler()],
['clf', clf1]])
pipe3 = Pipeline([['sc', StandardScaler()],
['clf', clf3]])
clf_labels = ['Logistic Regression', 'Decision Tree', 'KNN']
print('10-fold cross validation:\n')
for clf, label in zip([pipe1, clf2, pipe3], clf_labels):
scores = cross_val_score(estimator=clf,
X=X_train,
y=y_train,
cv=10,
scoring='roc_auc')
print("ROC AUC: %0.2f (+/- %0.2f) [%s]"
% (scores.mean(), scores.std(), label))
mv_clf = MajorityVoteClassifier(classifiers=[pipe1, clf2, pipe3])
clf_labels += ['Majority Voting']
all_clf = [pipe1, clf2, pipe3, mv_clf]
for clf, label in zip(all_clf, clf_labels):
scores = cross_val_score(estimator=clf,
X=X_train,
y=y_train,
cv=10,
scoring='roc_auc')
print("ROC AUC: %0.2f (+/- %0.2f) [%s]"
% (scores.mean(), scores.std(), label))
#############################################################################
print(50 * '=')
print('Section: Evaluating and tuning the ensemble classifier')
print(50 * '-')
colors = ['black', 'orange', 'blue', 'green']
linestyles = [':', '--', '-.', '-']
for clf, label, clr, ls \
in zip(all_clf,
clf_labels, colors, linestyles):
# assuming the label of the positive class is 1
y_pred = clf.fit(X_train,
y_train).predict_proba(X_test)[:, 1]
fpr, tpr, thresholds = roc_curve(y_true=y_test,
y_score=y_pred)
roc_auc = auc(x=fpr, y=tpr)
plt.plot(fpr, tpr,
color=clr,
linestyle=ls,
label='%s (auc = %0.2f)' % (label, roc_auc))
plt.legend(loc='lower right')
plt.plot([0, 1], [0, 1],
linestyle='--',
color='gray',
linewidth=2)
plt.xlim([-0.1, 1.1])
plt.ylim([-0.1, 1.1])
plt.grid()
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
# plt.tight_layout()
# plt.savefig('./figures/roc.png', dpi=300)
plt.show()
sc = StandardScaler()
X_train_std = sc.fit_transform(X_train)
all_clf = [pipe1, clf2, pipe3, mv_clf]
x_min = X_train_std[:, 0].min() - 1
x_max = X_train_std[:, 0].max() + 1
y_min = X_train_std[:, 1].min() - 1
y_max = X_train_std[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(nrows=2, ncols=2,
sharex='col',
sharey='row',
figsize=(7, 5))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
all_clf, clf_labels):
clf.fit(X_train_std, y_train)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.3)
axarr[idx[0], idx[1]].scatter(X_train_std[y_train == 0, 0],
X_train_std[y_train == 0, 1],
c='blue',
marker='^',
s=50)
axarr[idx[0], idx[1]].scatter(X_train_std[y_train == 1, 0],
X_train_std[y_train == 1, 1],
c='red',
marker='o',
s=50)
axarr[idx[0], idx[1]].set_title(tt)
plt.text(-3.5, -4.5,
s='Sepal width [standardized]',
ha='center', va='center', fontsize=12)
plt.text(-10.5, 4.5,
s='Petal length [standardized]',
ha='center', va='center',
fontsize=12, rotation=90)
# plt.tight_layout()
# plt.savefig('./figures/voting_panel', bbox_inches='tight', dpi=300)
plt.show()
print(mv_clf.get_params())
params = {'decisiontreeclassifier__max_depth': [1, 2],
'pipeline-1__clf__C': [0.001, 0.1, 100.0]}
grid = GridSearchCV(estimator=mv_clf,
param_grid=params,
cv=10,
scoring='roc_auc')
grid.fit(X_train, y_train)
if Version(sklearn_version) < '0.18':
for params, mean_score, scores in grid.grid_scores_:
print("%0.3f +/- %0.2f %r"
% (mean_score, scores.std() / 2.0, params))
else:
cv_keys = ('mean_test_score', 'std_test_score', 'params')
for r, _ in enumerate(grid.cv_results_['mean_test_score']):
print("%0.3f +/- %0.2f %r"
% (grid.cv_results_[cv_keys[0]][r],
grid.cv_results_[cv_keys[1]][r] / 2.0,
grid.cv_results_[cv_keys[2]][r]))
print('Best parameters: %s' % grid.best_params_)
print('Accuracy: %.2f' % grid.best_score_)
#############################################################################
print(50 * '=')
print('Section: Bagging -- Building an ensemble of'
'classifiers from bootstrap samples')
print(50 * '-')
df_wine = pd.read_csv('https://archive.ics.uci.edu/ml/'
'machine-learning-databases/wine/wine.data',
header=None)
df_wine.columns = ['Class label', 'Alcohol', 'Malic acid', 'Ash',
'Alcalinity of ash', 'Magnesium', 'Total phenols',
'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins',
'Color intensity', 'Hue', 'OD280/OD315 of diluted wines',
'Proline']
# drop 1 class
df_wine = df_wine[df_wine['Class label'] != 1]
y = df_wine['Class label'].values
X = df_wine[['Alcohol', 'Hue']].values
le = LabelEncoder()
y = le.fit_transform(y)
X_train, X_test, y_train, y_test =\
train_test_split(X, y,
test_size=0.40,
random_state=1)
tree = DecisionTreeClassifier(criterion='entropy',
max_depth=None,
random_state=1)
bag = BaggingClassifier(base_estimator=tree,
n_estimators=500,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
n_jobs=1,
random_state=1)
tree = tree.fit(X_train, y_train)
y_train_pred = tree.predict(X_train)
y_test_pred = tree.predict(X_test)
tree_train = accuracy_score(y_train, y_train_pred)
tree_test = accuracy_score(y_test, y_test_pred)
print('Decision tree train/test accuracies %.3f/%.3f'
% (tree_train, tree_test))
bag = bag.fit(X_train, y_train)
y_train_pred = bag.predict(X_train)
y_test_pred = bag.predict(X_test)
bag_train = accuracy_score(y_train, y_train_pred)
bag_test = accuracy_score(y_test, y_test_pred)
print('Bagging train/test accuracies %.3f/%.3f'
% (bag_train, bag_test))
x_min = X_train[:, 0].min() - 1
x_max = X_train[:, 0].max() + 1
y_min = X_train[:, 1].min() - 1
y_max = X_train[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(nrows=1, ncols=2,
sharex='col',
sharey='row',
figsize=(8, 3))
for idx, clf, tt in zip([0, 1],
[tree, bag],
['Decision Tree', 'Bagging']):
clf.fit(X_train, y_train)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx].contourf(xx, yy, Z, alpha=0.3)
axarr[idx].scatter(X_train[y_train == 0, 0],
X_train[y_train == 0, 1],
c='blue', marker='^')
axarr[idx].scatter(X_train[y_train == 1, 0],
X_train[y_train == 1, 1],
c='red', marker='o')
axarr[idx].set_title(tt)
axarr[0].set_ylabel('Alcohol', fontsize=12)
plt.text(10.2, -1.2,
s='Hue',
ha='center', va='center', fontsize=12)
# plt.tight_layout()
# plt.savefig('./figures/bagging_region.png',
# dpi=300,
# bbox_inches='tight')
plt.show()
#############################################################################
print(50 * '=')
print('Section: Leveraging weak learners via adaptive boosting')
print(50 * '-')
tree = DecisionTreeClassifier(criterion='entropy',
max_depth=1,
random_state=0)
ada = AdaBoostClassifier(base_estimator=tree,
n_estimators=500,
learning_rate=0.1,
random_state=0)
tree = tree.fit(X_train, y_train)
y_train_pred = tree.predict(X_train)
y_test_pred = tree.predict(X_test)
tree_train = accuracy_score(y_train, y_train_pred)
tree_test = accuracy_score(y_test, y_test_pred)
print('Decision tree train/test accuracies %.3f/%.3f'
% (tree_train, tree_test))
ada = ada.fit(X_train, y_train)
y_train_pred = ada.predict(X_train)
y_test_pred = ada.predict(X_test)
ada_train = accuracy_score(y_train, y_train_pred)
ada_test = accuracy_score(y_test, y_test_pred)
print('AdaBoost train/test accuracies %.3f/%.3f'
% (ada_train, ada_test))
x_min, x_max = X_train[:, 0].min() - 1, X_train[:, 0].max() + 1
y_min, y_max = X_train[:, 1].min() - 1, X_train[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(1, 2, sharex='col', sharey='row', figsize=(8, 3))
for idx, clf, tt in zip([0, 1],
[tree, ada],
['Decision Tree', 'AdaBoost']):
clf.fit(X_train, y_train)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx].contourf(xx, yy, Z, alpha=0.3)
axarr[idx].scatter(X_train[y_train == 0, 0],
X_train[y_train == 0, 1],
c='blue', marker='^')
axarr[idx].scatter(X_train[y_train == 1, 0],
X_train[y_train == 1, 1],
c='red', marker='o')
axarr[idx].set_title(tt)
axarr[0].set_ylabel('Alcohol', fontsize=12)
plt.text(10.2, -1.2,
s='Hue',
ha='center', va='center', fontsize=12)
# plt.tight_layout()
# plt.savefig('./figures/adaboost_region.png',
# dpi=300,
# bbox_inches='tight')
plt.show()
| mit |
anirudhjayaraman/scikit-learn | sklearn/tests/test_multiclass.py | 136 | 23649 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.multiclass import fit_ovr
from sklearn.multiclass import fit_ovo
from sklearn.multiclass import fit_ecoc
from sklearn.multiclass import predict_ovr
from sklearn.multiclass import predict_ovo
from sklearn.multiclass import predict_ecoc
from sklearn.multiclass import predict_proba_ovr
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.preprocessing import LabelBinarizer
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
with ignore_warnings():
assert_raises(ValueError, predict_ovr, [LinearSVC(), MultinomialNB()],
LabelBinarizer(), [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
clf = OneVsRestClassifier(base_clf).fit(X, y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
prediction_from_list = ovo.fit(iris.data,
list(iris.target)).predict(iris.data)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
@ignore_warnings
def test_deprecated():
base_estimator = DecisionTreeClassifier(random_state=0)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
all_metas = [
(OneVsRestClassifier, fit_ovr, predict_ovr, predict_proba_ovr),
(OneVsOneClassifier, fit_ovo, predict_ovo, None),
(OutputCodeClassifier, fit_ecoc, predict_ecoc, None),
]
for MetaEst, fit_func, predict_func, proba_func in all_metas:
try:
meta_est = MetaEst(base_estimator,
random_state=0).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train,
random_state=0)
except TypeError:
meta_est = MetaEst(base_estimator).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train)
if len(fitted_return) == 2:
estimators_, classes_or_lb = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
X_test),
meta_est.predict(X_test))
if proba_func is not None:
assert_almost_equal(proba_func(estimators_, X_test,
is_multilabel=False),
meta_est.predict_proba(X_test))
else:
estimators_, classes_or_lb, codebook = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
codebook, X_test),
meta_est.predict(X_test))
| bsd-3-clause |
mrshu/scikit-learn | sklearn/tests/test_grid_search.py | 2 | 8915 | """
Testing for grid search module (sklearn.grid_search)
"""
from cStringIO import StringIO
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.grid_search import GridSearchCV
from sklearn.datasets.samples_generator import make_classification, make_blobs
from sklearn.svm import LinearSVC, SVC
from sklearn.cluster import KMeans, MeanShift
from sklearn.metrics import f1_score, precision_score
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.cross_validation import KFold
class MockClassifier(BaseEstimator):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
class MockListClassifier(BaseEstimator):
"""Dummy classifier to test the cross-validation.
Checks that GridSearchCV didn't convert X to array.
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
assert_true(isinstance(X, list))
return self
def predict(self, T):
return T.shape[0]
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def test_grid_search():
"""Test that the best estimator contains the right value for foo_param"""
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0] == {'foo_param': foo_i})
# Smoke test the score:
grid_search.score(X, y)
def test_no_refit():
"""Test that grid search can be used for model selection only"""
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
"""Test that grid search will capture errors on data with different
length"""
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
"""Test that grid search works with both dense and sparse matrices"""
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_score_func():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, score_func=f1_score)
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, score_func=f1_score)
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
#np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss_func
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, loss_func=f1_loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred3)
assert_equal(C, C3)
def test_grid_search_precomputed_kernel():
"""Test that grid search works when the input features are given in the
form of a precomputed kernel matrix """
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
"""Test that grid search returns an error with a non-square precomputed
training kernel matrix"""
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
"""Test that grid search returns an error when using a kernel_function"""
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
def test_refit():
"""Regression test for bug in refitting
Simulates re-fitting a broken estimator; this used to break with
sparse SVMs.
"""
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
score_func=precision_score, refit=True)
clf.fit(X, y)
def test_X_as_list():
"""Pass X as list in GridSearchCV
"""
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = MockListClassifier()
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
score_func=adjusted_rand_score)
grid_search.fit(X)
# most number of clusters should be best
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_bad_estimator():
# test grid-search with unsupervised estimator
ms = MeanShift()
assert_raises(TypeError, GridSearchCV, ms,
param_grid=dict(gamma=[.1, 1, 10]),
score_func=adjusted_rand_score)
| bsd-3-clause |
paalge/scikit-image | doc/examples/segmentation/plot_rag_draw.py | 7 | 1031 | """
======================================
Drawing Region Adjacency Graphs (RAGs)
======================================
This example constructs a Region Adjacency Graph (RAG) and draws it with
the `rag_draw` method.
"""
from skimage import data, segmentation
from skimage.future import graph
from matplotlib import pyplot as plt
img = data.coffee()
labels = segmentation.slic(img, compactness=30, n_segments=400)
g = graph.rag_mean_color(img, labels)
fig, ax = plt.subplots(nrows=2, sharex=True, sharey=True, figsize=(6, 8))
ax[0].set_title('RAG drawn with default settings')
lc = graph.show_rag(labels, g, img, ax=ax[0])
# specify the fraction of the plot area that will be used to draw the colorbar
fig.colorbar(lc, fraction=0.03, ax=ax[0])
ax[1].set_title('RAG drawn with grayscale image and viridis colormap')
lc = graph.show_rag(labels, g, img,
img_cmap='gray', edge_cmap='viridis', ax=ax[1])
fig.colorbar(lc, fraction=0.03, ax=ax[1])
for a in ax:
a.axis('off')
plt.tight_layout()
plt.show()
| bsd-3-clause |
ahaberlie/MetPy | examples/calculations/Smoothing.py | 5 | 2418 | # Copyright (c) 2015-2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Smoothing
=========
Using MetPy's smoothing functions.
This example demonstrates the various ways that MetPy's smoothing function
can be utilized. While this example utilizes basic NumPy arrays, these
functions all work equally well with Pint Quantities or xarray DataArrays.
"""
from itertools import product
import matplotlib.pyplot as plt
import numpy as np
import metpy.calc as mpcalc
###########################################
# Start with a base pattern with random noise
np.random.seed(61461542)
size = 128
x, y = np.mgrid[:size, :size]
distance = np.sqrt((x - size / 2) ** 2 + (y - size / 2) ** 2)
raw_data = np.random.random((size, size)) * 0.3 + distance / distance.max() * 0.7
fig, ax = plt.subplots(1, 1, figsize=(4, 4))
ax.set_title('Raw Data')
ax.imshow(raw_data, vmin=0, vmax=1)
ax.axis('off')
plt.show()
###########################################
# Now, create a grid showing different smoothing options
fig, ax = plt.subplots(3, 3, figsize=(12, 12))
for i, j in product(range(3), range(3)):
ax[i, j].axis('off')
# Gaussian Smoother
ax[0, 0].imshow(mpcalc.smooth_gaussian(raw_data, 3), vmin=0, vmax=1)
ax[0, 0].set_title('Gaussian - Low Degree')
ax[0, 1].imshow(mpcalc.smooth_gaussian(raw_data, 8), vmin=0, vmax=1)
ax[0, 1].set_title('Gaussian - High Degree')
# Rectangular Smoother
ax[0, 2].imshow(mpcalc.smooth_rectangular(raw_data, (3, 7), 2), vmin=0, vmax=1)
ax[0, 2].set_title('Rectangular - 3x7 Window\n2 Passes')
# 5-point smoother
ax[1, 0].imshow(mpcalc.smooth_n_point(raw_data, 5, 1), vmin=0, vmax=1)
ax[1, 0].set_title('5-Point - 1 Pass')
ax[1, 1].imshow(mpcalc.smooth_n_point(raw_data, 5, 4), vmin=0, vmax=1)
ax[1, 1].set_title('5-Point - 4 Passes')
# Circular Smoother
ax[1, 2].imshow(mpcalc.smooth_circular(raw_data, 2, 2), vmin=0, vmax=1)
ax[1, 2].set_title('Circular - Radius 2\n2 Passes')
# 9-point smoother
ax[2, 0].imshow(mpcalc.smooth_n_point(raw_data, 9, 1), vmin=0, vmax=1)
ax[2, 0].set_title('9-Point - 1 Pass')
ax[2, 1].imshow(mpcalc.smooth_n_point(raw_data, 9, 4), vmin=0, vmax=1)
ax[2, 1].set_title('9-Point - 4 Passes')
# Arbitrary Window Smoother
ax[2, 2].imshow(mpcalc.smooth_window(raw_data, np.diag(np.ones(5)), 2), vmin=0, vmax=1)
ax[2, 2].set_title('Custom Window (Diagonal) \n2 Passes')
plt.show()
| bsd-3-clause |
bzero/arctic | tests/integration/store/test_version_store_audit.py | 4 | 8283 | from bson import ObjectId
from datetime import datetime as dt
from mock import patch
from pandas.util.testing import assert_frame_equal
from pymongo.errors import OperationFailure
import pytest
from arctic.store.audit import ArcticTransaction
from arctic.exceptions import ConcurrentModificationException, NoDataFoundException
from ...util import read_str_as_pandas
ts1 = read_str_as_pandas(""" times | near
2012-09-08 17:06:11.040 | 1.0
2012-10-08 17:06:11.040 | 2.0
2012-10-09 17:06:11.040 | 2.5
2012-11-08 17:06:11.040 | 3.0""")
ts2 = read_str_as_pandas(""" times | near
2012-09-08 17:06:11.040 | 1.0
2012-10-08 17:06:11.040 | 4.0
2012-10-09 17:06:11.040 | 4.5
2012-10-10 17:06:11.040 | 5.0
2012-11-08 17:06:11.040 | 3.0""")
ts3 = read_str_as_pandas(""" times | near
2012-09-08 17:06:11.040 | 1.0
2012-10-08 17:06:11.040 | 4.0
2012-10-09 17:06:11.040 | 4.5
2012-10-10 17:06:11.040 | 5.0
2012-11-08 17:06:11.040 | 3.0
2012-11-09 17:06:11.040 | 44.0""")
ts1_append = read_str_as_pandas(""" times | near
2012-09-08 17:06:11.040 | 1.0
2012-10-08 17:06:11.040 | 2.0
2012-10-09 17:06:11.040 | 2.5
2012-11-08 17:06:11.040 | 3.0
2012-11-09 17:06:11.040 | 3.0""")
symbol = 'TS1'
def test_ArcticTransaction_can_do_first_writes(library):
with ArcticTransaction(library, 'SYMBOL_NOT_HERE', 'user', 'log') as cwb:
cwb.write('SYMBOL_NOT_HERE', ts1)
wrote_vi = library.read('SYMBOL_NOT_HERE')
assert_frame_equal(wrote_vi.data, ts1)
def test_ArcticTransaction_detects_concurrent_writes(library):
library.write('FOO', ts1)
from threading import Event, Thread
e1 = Event()
e2 = Event()
def losing_writer():
#will attempt to write version 2, should find that version 2 is there and it ends up writing version 3
with pytest.raises(ConcurrentModificationException):
with ArcticTransaction(library, 'FOO', 'user', 'log') as cwb:
cwb.write('FOO', ts1_append, metadata={'foo': 'bar'})
e1.wait()
def winning_writer():
#will attempt to write version 2 as well
with ArcticTransaction(library, 'FOO', 'user', 'log') as cwb:
cwb.write('FOO', ts2, metadata={'foo': 'bar'})
e2.wait()
t1 = Thread(target=losing_writer)
t2 = Thread(target=winning_writer)
t1.start()
t2.start()
# both read the same timeseries and are locked doing some 'work'
e2.set()
# t2 should now be able to finish
t2.join()
e1.set()
t1.join()
# we're expecting the losing_writer to undo its write once it realises that it wrote v3 instead of v2
wrote_vi = library.read('FOO')
assert_frame_equal(wrote_vi.data, ts2)
assert {'foo': 'bar'} == wrote_vi.metadata
def test_audit_writes(library):
with ArcticTransaction(library, symbol, 'u1', 'l1') as mt:
mt.write(symbol, ts1)
with ArcticTransaction(library, symbol, 'u2', 'l2') as mt:
mt.write(symbol, ts2)
audit_log = library.read_audit_log(symbol)
assert audit_log == [{u'new_v': 2, u'symbol': u'TS1', u'message': u'l2', u'user': u'u2', u'orig_v': 1},
{u'new_v': 1, u'symbol': u'TS1', u'message': u'l1', u'user': u'u1', u'orig_v': 0}]
assert_frame_equal(ts1, library.read(symbol, audit_log[0]['orig_v']).data)
assert_frame_equal(ts2, library.read(symbol, audit_log[0]['new_v']).data)
def test_metadata_changes_writes(library):
with ArcticTransaction(library, symbol, 'u1', 'l1') as mt:
mt.write(symbol, ts1, metadata={'original': 'data'})
with ArcticTransaction(library, symbol, 'u2', 'l2') as mt:
mt.write(symbol, ts1, metadata={'some': 'data', 'original': 'data'})
audit_log = library.read_audit_log(symbol)
assert audit_log == [{u'new_v': 2, u'symbol': u'TS1', u'message': u'l2', u'user': u'u2', u'orig_v': 1},
{u'new_v': 1, u'symbol': u'TS1', u'message': u'l1', u'user': u'u1', u'orig_v': 0}]
assert_frame_equal(ts1, library.read(symbol, audit_log[0]['orig_v']).data)
assert_frame_equal(ts1, library.read(symbol, audit_log[0]['new_v']).data)
assert library.read(symbol, audit_log[0]['orig_v']).metadata == {'original': 'data'}
assert library.read(symbol, audit_log[0]['new_v']).metadata == {'some': 'data', 'original': 'data'}
def test_cleanup_orphaned_versions_integration(library):
_id = ObjectId.from_datetime(dt(2013, 1, 1))
with patch('bson.ObjectId', return_value=_id):
with ArcticTransaction(library, symbol, 'u1', 'l1') as mt:
mt.write(symbol, ts1)
assert library._versions.find({'parent': {'$size': 1}}).count() == 1
library._cleanup_orphaned_versions(False)
assert library._versions.find({'parent': {'$size': 1}}).count() == 1
def test_corrupted_read_writes_new(library):
with ArcticTransaction(library, symbol, 'u1', 'l1') as mt:
mt.write(symbol, ts1)
res = library.read(symbol)
assert res.version == 1
with ArcticTransaction(library, symbol, 'u1', 'l2') as mt:
mt.write(symbol, ts2)
res = library.read(symbol)
assert res.version == 2
with patch.object(library, 'read') as l:
l.side_effect = OperationFailure('some failure')
with ArcticTransaction(library, symbol, 'u1', 'l2') as mt:
mt.write(symbol, ts3, metadata={'a': 1, 'b': 2})
res = library.read(symbol)
# Corrupted data still increments on write to next version correctly with new data
assert res.version == 3
assert_frame_equal(ts3, library.read(symbol, 3).data)
assert res.metadata == {'a': 1, 'b': 2}
with patch.object(library, 'read') as l:
l.side_effect = OperationFailure('some failure')
with ArcticTransaction(library, symbol, 'u1', 'l2') as mt:
mt.write(symbol, ts3, metadata={'a': 1, 'b': 2})
res = library.read(symbol)
# Corrupted data still increments to next version correctly with ts & metadata unchanged
assert res.version == 4
assert_frame_equal(ts3, library.read(symbol, 4).data)
assert res.metadata == {'a': 1, 'b': 2}
def test_write_after_delete(library):
with ArcticTransaction(library, symbol, 'u1', 'l') as mt:
mt.write(symbol, ts1)
library.delete(symbol)
with ArcticTransaction(library, symbol, 'u1', 'l') as mt:
mt.write(symbol, ts1_append)
assert_frame_equal(library.read(symbol).data, ts1_append)
def test_ArcticTransaction_write_skips_for_exact_match(library):
ts = read_str_as_pandas("""times | PX_LAST
2014-10-31 21:30:00.000 | 204324.674
2014-11-13 21:30:00.000 | 193964.45
2014-11-14 21:30:00.000 | 193650.403""")
with ArcticTransaction(library, symbol, 'u1', 'l1') as mt:
mt.write(symbol, ts)
version = library.read(symbol).version
# try and store same TimeSeries again
with ArcticTransaction(library, symbol, 'u1', 'l2') as mt:
mt.write(symbol, ts)
assert library.read(symbol).version == version
def test_ArcticTransaction_write_doesnt_skip_for_close_ts(library):
orig_ts = read_str_as_pandas("""times | PX_LAST
2014-10-31 21:30:00.000 | 204324.674
2014-11-13 21:30:00.000 | 193964.45
2014-11-14 21:30:00.000 | 193650.403""")
with ArcticTransaction(library, symbol, 'u1', 'l1') as mt:
mt.write(symbol, orig_ts)
assert_frame_equal(library.read(symbol).data, orig_ts)
# try and store slighty different TimeSeries
new_ts = read_str_as_pandas("""times | PX_LAST
2014-10-31 21:30:00.000 | 204324.672
2014-11-13 21:30:00.000 | 193964.453
2014-11-14 21:30:00.000 | 193650.406""")
with ArcticTransaction(library, symbol, 'u1', 'l2') as mt:
mt.write(symbol, new_ts)
assert_frame_equal(library.read(symbol).data, new_ts)
| lgpl-2.1 |
Winand/pandas | pandas/tests/test_algos.py | 2 | 56095 | # -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from pandas import (Series, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex,
Index, IntervalIndex)
import pandas as pd
from pandas import compat
from pandas._libs import (groupby as libgroupby, algos as libalgos,
hashtable as ht)
from pandas._libs.hashtable import unique_label_indices
from pandas.compat import lrange, range
import pandas.core.algorithms as algos
import pandas.util.testing as tm
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_series_equal(result, expected)
s = Series(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(s, [2, 4], np.nan))
expected = Series(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_series_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_series_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, uniques = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
uniques, np.array(['a', 'b', 'c'], dtype=object))
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Series(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = pd.Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = pd.Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(uniques, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Series([v1, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(uniques, exp)
# period
v1 = pd.Period('201302', freq='M')
v2 = pd.Period('201303', freq='M')
x = Series([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
# GH 5986
v1 = pd.to_timedelta('1 day 1 min')
v2 = pd.to_timedelta('1 day')
x = Series([v1, v2, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should map to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key),
expected == na_sentinel)
# nan still maps to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if pd._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, 1], dtype=np.uint64)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, -1], dtype=object)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype('O')
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
len(algos.unique(lst))
def test_on_index_object(self):
mindex = pd.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = mindex.values
expected.sort()
mindex = mindex.repeat(2)
result = pd.unique(mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = pd.to_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.unique(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(dt_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = pd.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.unique(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(td_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.unique(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = pd.unique(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = pd.Categorical(list('bac'),
categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = pd.Categorical(list('bac'),
categories=list('abc'),
ordered=True)
# GH 15939
c = pd.Categorical(list('baabc'))
result = c.unique()
tm.assert_categorical_equal(result, expected)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected)
c = pd.Categorical(list('baabc'), ordered=True)
result = c.unique()
tm.assert_categorical_equal(result, expected_o)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected_o)
# Series of categorical dtype
s = Series(pd.Categorical(list('baabc')), name='foo')
result = s.unique()
tm.assert_categorical_equal(result, expected)
result = pd.unique(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = pd.CategoricalIndex(pd.Categorical(list('baabc'),
categories=list('bac')))
expected = pd.CategoricalIndex(expected)
result = ci.unique()
tm.assert_index_equal(result, expected)
result = pd.unique(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Series(
pd.Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).unique()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).unique()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(
Series(pd.Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(pd.Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = pd.unique(Series([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = pd.unique(Series([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = pd.unique(Series([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(pd.Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Series(pd.Categorical(list('aabc'))))
expected = pd.Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = pd.unique(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.isin(1, 1))
pytest.raises(TypeError, lambda: algos.isin(1, [1]))
pytest.raises(TypeError, lambda: algos.isin([1], 1))
def test_basic(self):
result = algos.isin([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), Series([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), Series(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = pd.date_range('20130101', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = pd.timedelta_range('1 day', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = pd.date_range('20000101', periods=2000000, freq='s').values
result = algos.isin(s, s[0:2])
expected = np.zeros(len(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ['a', 'b', 'c']
Sd = pd.Series(pd.Categorical(1).from_codes(vals, cats))
St = pd.Series(pd.Categorical(1).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.isin(Sd, St)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], pd.Series(), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = pd.Index(["a", "b"])
expected = np.array([False, False])
result = algos.isin(vals, empty)
tm.assert_numpy_array_equal(expected, result)
class TestValueCounts(object):
def test_value_counts(self):
np.random.seed(1234)
from pandas.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert isinstance(factor, n)
result = algos.value_counts(factor)
breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
expected_index = pd.IntervalIndex.from_breaks(
breaks).astype('category')
expected = Series([1, 1, 1, 1],
index=expected_index)
tm.assert_series_equal(result.sort_index(), expected.sort_index())
def test_value_counts_bins(self):
s = [1, 2, 3, 4]
result = algos.value_counts(s, bins=1)
expected = Series([4],
index=IntervalIndex.from_tuples([(0.996, 4.0)]))
tm.assert_series_equal(result, expected)
result = algos.value_counts(s, bins=2, sort=False)
expected = Series([2, 2],
index=IntervalIndex.from_tuples([(0.996, 2.5),
(2.5, 4.0)]))
tm.assert_series_equal(result, expected)
def test_value_counts_dtypes(self):
result = algos.value_counts([1, 1.])
assert len(result) == 1
result = algos.value_counts([1, 1.], bins=1)
assert len(result) == 1
result = algos.value_counts(Series([1, 1., '1'])) # object
assert len(result) == 2
pytest.raises(TypeError, lambda s: algos.value_counts(s, bins=1),
['1', 1])
def test_value_counts_nat(self):
td = Series([np.timedelta64(10000), pd.NaT], dtype='timedelta64[ns]')
dt = pd.to_datetime(['NaT', '2014-01-01'])
for s in [td, dt]:
vc = algos.value_counts(s)
vc_with_na = algos.value_counts(s, dropna=False)
assert len(vc) == 1
assert len(vc_with_na) == 2
exp_dt = Series({Timestamp('2014-01-01 00:00:00'): 1})
tm.assert_series_equal(algos.value_counts(dt), exp_dt)
# TODO same for (timedelta)
def test_value_counts_datetime_outofbounds(self):
# GH 13663
s = Series([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1),
datetime(3000, 1, 1), datetime(3000, 1, 1)])
res = s.value_counts()
exp_index = pd.Index([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(6000, 1, 1)], dtype=object)
exp = Series([3, 2, 1], index=exp_index)
tm.assert_series_equal(res, exp)
# GH 12424
res = pd.to_datetime(Series(['2362-01-01', np.nan]),
errors='ignore')
exp = Series(['2362-01-01', np.nan], dtype=object)
tm.assert_series_equal(res, exp)
def test_categorical(self):
s = Series(pd.Categorical(list('aaabbc')))
result = s.value_counts()
expected = Series([3, 2, 1],
index=pd.CategoricalIndex(['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
# preserve order?
s = s.cat.as_ordered()
result = s.value_counts()
expected.index = expected.index.as_ordered()
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_nans(self):
s = Series(pd.Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan)
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=pd.CategoricalIndex(
['a', 'b', 'c'], categories=['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([
4, 3, 2, 1
], index=CategoricalIndex(['a', 'b', 'c', np.nan]))
tm.assert_series_equal(result, expected, check_index_type=True)
# out of order
s = Series(pd.Categorical(
list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c']))
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=pd.CategoricalIndex(
['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([4, 3, 2, 1], index=pd.CategoricalIndex(
['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_zeroes(self):
# keep the `d` category with 0
s = Series(pd.Categorical(
list('bbbaac'), categories=list('abcd'), ordered=True))
result = s.value_counts()
expected = Series([3, 2, 1, 0], index=pd.Categorical(
['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_dropna(self):
# https://github.com/pandas-dev/pandas/issues/9443#issuecomment-73719328
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=False),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=False),
Series([2, 1, 1], index=[True, False, np.nan]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=False),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5., None]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
result = Series([10.3, 5., 5., None]).value_counts(dropna=False)
expected = Series([2, 1, 1], index=[5., 10.3, np.nan])
tm.assert_series_equal(result, expected)
def test_value_counts_normalized(self):
# GH12558
s = Series([1, 2, np.nan, np.nan, np.nan])
dtypes = (np.float64, np.object, 'M8[ns]')
for t in dtypes:
s_typed = s.astype(t)
result = s_typed.value_counts(normalize=True, dropna=False)
expected = Series([0.6, 0.2, 0.2],
index=Series([np.nan, 2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
result = s_typed.value_counts(normalize=True, dropna=True)
expected = Series([0.5, 0.5],
index=Series([2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
def test_value_counts_uint64(self):
arr = np.array([2**63], dtype=np.uint64)
expected = Series([1], index=[2**63])
result = algos.value_counts(arr)
tm.assert_series_equal(result, expected)
arr = np.array([-1, 2**63], dtype=object)
expected = Series([1, 1], index=[-1, 2**63])
result = algos.value_counts(arr)
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
tm.assert_series_equal(result, expected)
class TestDuplicated(object):
def test_duplicated_with_nas(self):
keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object)
result = algos.duplicated(keys)
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='first')
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='last')
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep=False)
expected = np.array([True, False, True, True, False, True])
tm.assert_numpy_array_equal(result, expected)
keys = np.empty(8, dtype=object)
for i, t in enumerate(zip([0, 0, np.nan, np.nan] * 2,
[0, np.nan, 0, np.nan] * 2)):
keys[i] = t
result = algos.duplicated(keys)
falses = [False] * 4
trues = [True] * 4
expected = np.array(falses + trues)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='last')
expected = np.array(trues + falses)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep=False)
expected = np.array(trues + trues)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('case', [
np.array([1, 2, 1, 5, 3,
2, 4, 1, 5, 6]),
np.array([1.1, 2.2, 1.1, np.nan, 3.3,
2.2, 4.4, 1.1, np.nan, 6.6]),
pytest.mark.xfail(resaon="Complex bug. GH 16399")(
np.array([1 + 1j, 2 + 2j, 1 + 1j, 5 + 5j, 3 + 3j,
2 + 2j, 4 + 4j, 1 + 1j, 5 + 5j, 6 + 6j])
),
np.array(['a', 'b', 'a', 'e', 'c',
'b', 'd', 'a', 'e', 'f'], dtype=object),
np.array([1, 2**63, 1, 3**5, 10, 2**63, 39, 1, 3**5, 7],
dtype=np.uint64),
])
def test_numeric_object_likes(self, case):
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_last = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_last
res_first = algos.duplicated(case, keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = algos.duplicated(case, keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = algos.duplicated(case, keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# index
for idx in [pd.Index(case), pd.Index(case, dtype='category')]:
res_first = idx.duplicated(keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# series
for s in [Series(case), Series(case, dtype='category')]:
res_first = s.duplicated(keep='first')
tm.assert_series_equal(res_first, Series(exp_first))
res_last = s.duplicated(keep='last')
tm.assert_series_equal(res_last, Series(exp_last))
res_false = s.duplicated(keep=False)
tm.assert_series_equal(res_false, Series(exp_false))
def test_datetime_likes(self):
dt = ['2011-01-01', '2011-01-02', '2011-01-01', 'NaT', '2011-01-03',
'2011-01-02', '2011-01-04', '2011-01-01', 'NaT', '2011-01-06']
td = ['1 days', '2 days', '1 days', 'NaT', '3 days',
'2 days', '4 days', '1 days', 'NaT', '6 days']
cases = [np.array([Timestamp(d) for d in dt]),
np.array([Timestamp(d, tz='US/Eastern') for d in dt]),
np.array([pd.Period(d, freq='D') for d in dt]),
np.array([np.datetime64(d) for d in dt]),
np.array([pd.Timedelta(d) for d in td])]
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_last = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_last
for case in cases:
res_first = algos.duplicated(case, keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = algos.duplicated(case, keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = algos.duplicated(case, keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# index
for idx in [pd.Index(case), pd.Index(case, dtype='category'),
pd.Index(case, dtype=object)]:
res_first = idx.duplicated(keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# series
for s in [Series(case), Series(case, dtype='category'),
Series(case, dtype=object)]:
res_first = s.duplicated(keep='first')
tm.assert_series_equal(res_first, Series(exp_first))
res_last = s.duplicated(keep='last')
tm.assert_series_equal(res_last, Series(exp_last))
res_false = s.duplicated(keep=False)
tm.assert_series_equal(res_false, Series(exp_false))
def test_unique_index(self):
cases = [pd.Index([1, 2, 3]), pd.RangeIndex(0, 3)]
for case in cases:
assert case.is_unique
tm.assert_numpy_array_equal(case.duplicated(),
np.array([False, False, False]))
@pytest.mark.parametrize('arr, unique', [
([(0, 0), (0, 1), (1, 0), (1, 1), (0, 0), (0, 1), (1, 0), (1, 1)],
[(0, 0), (0, 1), (1, 0), (1, 1)]),
([('b', 'c'), ('a', 'b'), ('a', 'b'), ('b', 'c')],
[('b', 'c'), ('a', 'b')]),
([('a', 1), ('b', 2), ('a', 3), ('a', 1)],
[('a', 1), ('b', 2), ('a', 3)]),
])
def test_unique_tuples(self, arr, unique):
# https://github.com/pandas-dev/pandas/issues/16519
expected = np.empty(len(unique), dtype=object)
expected[:] = unique
result = pd.unique(arr)
tm.assert_numpy_array_equal(result, expected)
class GroupVarTestMixin(object):
def test_group_var_generic_1d(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 1))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(15, 1).astype(self.dtype)
labels = np.tile(np.arange(5), (3, )).astype('int64')
expected_out = (np.squeeze(values)
.reshape((5, 3), order='F')
.std(axis=1, ddof=1) ** 2)[:, np.newaxis]
expected_counts = counts + 3
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_1d_flat_labels(self):
prng = RandomState(1234)
out = (np.nan * np.ones((1, 1))).astype(self.dtype)
counts = np.zeros(1, dtype='int64')
values = 10 * prng.rand(5, 1).astype(self.dtype)
labels = np.zeros(5, dtype='int64')
expected_out = np.array([[values.std(ddof=1) ** 2]])
expected_counts = counts + 5
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_all_finite(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 2))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(10, 2).astype(self.dtype)
labels = np.tile(np.arange(5), (2, )).astype('int64')
expected_out = np.std(values.reshape(2, 5, 2), ddof=1, axis=0) ** 2
expected_counts = counts + 2
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_some_nan(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 2))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(10, 2).astype(self.dtype)
values[:, 1] = np.nan
labels = np.tile(np.arange(5), (2, )).astype('int64')
expected_out = np.vstack([values[:, 0]
.reshape(5, 2, order='F')
.std(ddof=1, axis=1) ** 2,
np.nan * np.ones(5)]).T.astype(self.dtype)
expected_counts = counts + 2
self.algo(out, counts, values, labels)
tm.assert_almost_equal(out, expected_out, check_less_precise=6)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_constant(self):
# Regression test from GH 10448.
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype='int64')
values = 0.832845131556193 * np.ones((3, 1), dtype=self.dtype)
labels = np.zeros(3, dtype='int64')
self.algo(out, counts, values, labels)
assert counts[0] == 3
assert out[0, 0] >= 0
tm.assert_almost_equal(out[0, 0], 0.0)
class TestGroupVarFloat64(GroupVarTestMixin):
__test__ = True
algo = libgroupby.group_var_float64
dtype = np.float64
rtol = 1e-5
def test_group_var_large_inputs(self):
prng = RandomState(1234)
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype='int64')
values = (prng.rand(10 ** 6) + 10 ** 12).astype(self.dtype)
values.shape = (10 ** 6, 1)
labels = np.zeros(10 ** 6, dtype='int64')
self.algo(out, counts, values, labels)
assert counts[0] == 10 ** 6
tm.assert_almost_equal(out[0, 0], 1.0 / 12, check_less_precise=True)
class TestGroupVarFloat32(GroupVarTestMixin):
__test__ = True
algo = libgroupby.group_var_float32
dtype = np.float32
rtol = 1e-2
class TestHashTable(object):
def test_lookup_nan(self):
xs = np.array([2.718, 3.14, np.nan, -7, 5, 2, 3])
m = ht.Float64HashTable()
m.map_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs),
dtype=np.int64))
def test_lookup_overflow(self):
xs = np.array([1, 2, 2**63], dtype=np.uint64)
m = ht.UInt64HashTable()
m.map_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs),
dtype=np.int64))
def test_get_unique(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(s.unique(), exp)
def test_vector_resize(self):
# Test for memory errors after internal vector
# reallocations (pull request #7157)
def _test_vector_resize(htable, uniques, dtype, nvals, safely_resizes):
vals = np.array(np.random.randn(1000), dtype=dtype)
# get_labels may append to uniques
htable.get_labels(vals[:nvals], uniques, 0, -1)
# to_array() set an external_view_exists flag on uniques.
tmp = uniques.to_array()
oldshape = tmp.shape
# subsequent get_labels() calls can no longer append to it
# (for all but StringHashTables + ObjectVector)
if safely_resizes:
htable.get_labels(vals, uniques, 0, -1)
else:
with pytest.raises(ValueError) as excinfo:
htable.get_labels(vals, uniques, 0, -1)
assert str(excinfo.value).startswith('external reference')
uniques.to_array() # should not raise here
assert tmp.shape == oldshape
test_cases = [
(ht.PyObjectHashTable, ht.ObjectVector, 'object', False),
(ht.StringHashTable, ht.ObjectVector, 'object', True),
(ht.Float64HashTable, ht.Float64Vector, 'float64', False),
(ht.Int64HashTable, ht.Int64Vector, 'int64', False),
(ht.UInt64HashTable, ht.UInt64Vector, 'uint64', False)]
for (tbl, vect, dtype, safely_resizes) in test_cases:
# resizing to empty is a special case
_test_vector_resize(tbl(), vect(), dtype, 0, safely_resizes)
_test_vector_resize(tbl(), vect(), dtype, 10, safely_resizes)
def test_quantile():
s = Series(np.random.randn(100))
result = algos.quantile(s, [0, .25, .5, .75, 1.])
expected = algos.quantile(s.values, [0, .25, .5, .75, 1.])
tm.assert_almost_equal(result, expected)
def test_unique_label_indices():
a = np.random.randint(1, 1 << 10, 1 << 15).astype('i8')
left = unique_label_indices(a)
right = np.unique(a, return_index=True)[1]
tm.assert_numpy_array_equal(left, right,
check_dtype=False)
a[np.random.choice(len(a), 10)] = -1
left = unique_label_indices(a)
right = np.unique(a, return_index=True)[1][1:]
tm.assert_numpy_array_equal(left, right,
check_dtype=False)
class TestRank(object):
def test_scipy_compat(self):
tm._skip_if_no_scipy()
from scipy.stats import rankdata
def _check(arr):
mask = ~np.isfinite(arr)
arr = arr.copy()
result = libalgos.rank_1d_float64(arr)
arr[mask] = np.inf
exp = rankdata(arr)
exp[mask] = nan
assert_almost_equal(result, exp)
_check(np.array([nan, nan, 5., 5., 5., nan, 1, 2, 3, nan]))
_check(np.array([4., nan, 5., 5., 5., nan, 1, 2, 4., nan]))
def test_basic(self):
exp = np.array([1, 2], dtype=np.float64)
for dtype in np.typecodes['AllInteger']:
s = Series([1, 100], dtype=dtype)
tm.assert_numpy_array_equal(algos.rank(s), exp)
def test_uint64_overflow(self):
exp = np.array([1, 2], dtype=np.float64)
for dtype in [np.float64, np.uint64]:
s = Series([1, 2**63], dtype=dtype)
tm.assert_numpy_array_equal(algos.rank(s), exp)
def test_too_many_ndims(self):
arr = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]])
msg = "Array with ndim > 2 are not supported"
with tm.assert_raises_regex(TypeError, msg):
algos.rank(arr)
def test_pad_backfill_object_segfault():
old = np.array([], dtype='O')
new = np.array([datetime(2010, 12, 31)], dtype='O')
result = libalgos.pad_object(old, new)
expected = np.array([-1], dtype=np.int64)
assert (np.array_equal(result, expected))
result = libalgos.pad_object(new, old)
expected = np.array([], dtype=np.int64)
assert (np.array_equal(result, expected))
result = libalgos.backfill_object(old, new)
expected = np.array([-1], dtype=np.int64)
assert (np.array_equal(result, expected))
result = libalgos.backfill_object(new, old)
expected = np.array([], dtype=np.int64)
assert (np.array_equal(result, expected))
def test_arrmap():
values = np.array(['foo', 'foo', 'bar', 'bar', 'baz', 'qux'], dtype='O')
result = libalgos.arrmap_object(values, lambda x: x in ['foo', 'bar'])
assert (result.dtype == np.bool_)
class TestTseriesUtil(object):
def test_combineFunc(self):
pass
def test_reindex(self):
pass
def test_isna(self):
pass
def test_groupby(self):
pass
def test_groupby_withnull(self):
pass
def test_backfill(self):
old = Index([1, 5, 10])
new = Index(lrange(12))
filler = libalgos.backfill_int64(old.values, new.values)
expect_filler = np.array([0, 0, 1, 1, 1, 1,
2, 2, 2, 2, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
# corner case
old = Index([1, 4])
new = Index(lrange(5, 10))
filler = libalgos.backfill_int64(old.values, new.values)
expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
def test_pad(self):
old = Index([1, 5, 10])
new = Index(lrange(12))
filler = libalgos.pad_int64(old.values, new.values)
expect_filler = np.array([-1, 0, 0, 0, 0, 1,
1, 1, 1, 1, 2, 2], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
# corner case
old = Index([5, 10])
new = Index(lrange(5))
filler = libalgos.pad_int64(old.values, new.values)
expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
def test_is_lexsorted():
failure = [
np.array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3,
3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0]),
np.array([30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
15, 14,
13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28,
27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13,
12, 11,
10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25,
24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10,
9, 8,
7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25, 24, 23, 22,
21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7,
6, 5,
4, 3, 2, 1, 0])]
assert (not libalgos.is_lexsorted(failure))
# def test_get_group_index():
# a = np.array([0, 1, 2, 0, 2, 1, 0, 0], dtype=np.int64)
# b = np.array([1, 0, 3, 2, 0, 2, 3, 0], dtype=np.int64)
# expected = np.array([1, 4, 11, 2, 8, 6, 3, 0], dtype=np.int64)
# result = lib.get_group_index([a, b], (3, 4))
# assert(np.array_equal(result, expected))
def test_groupsort_indexer():
a = np.random.randint(0, 1000, 100).astype(np.int64)
b = np.random.randint(0, 1000, 100).astype(np.int64)
result = libalgos.groupsort_indexer(a, 1000)[0]
# need to use a stable sort
expected = np.argsort(a, kind='mergesort')
assert (np.array_equal(result, expected))
# compare with lexsort
key = a * 1000 + b
result = libalgos.groupsort_indexer(key, 1000000)[0]
expected = np.lexsort((b, a))
assert (np.array_equal(result, expected))
def test_infinity_sort():
# GH 13445
# numpy's argsort can be unhappy if something is less than
# itself. Instead, let's give our infinities a self-consistent
# ordering, but outside the float extended real line.
Inf = libalgos.Infinity()
NegInf = libalgos.NegInfinity()
ref_nums = [NegInf, float("-inf"), -1e100, 0, 1e100, float("inf"), Inf]
assert all(Inf >= x for x in ref_nums)
assert all(Inf > x or x is Inf for x in ref_nums)
assert Inf >= Inf and Inf == Inf
assert not Inf < Inf and not Inf > Inf
assert all(NegInf <= x for x in ref_nums)
assert all(NegInf < x or x is NegInf for x in ref_nums)
assert NegInf <= NegInf and NegInf == NegInf
assert not NegInf < NegInf and not NegInf > NegInf
for perm in permutations(ref_nums):
assert sorted(perm) == ref_nums
# smoke tests
np.array([libalgos.Infinity()] * 32).argsort()
np.array([libalgos.NegInfinity()] * 32).argsort()
def test_ensure_platform_int():
arr = np.arange(100, dtype=np.intp)
result = libalgos.ensure_platform_int(arr)
assert (result is arr)
def test_int64_add_overflow():
# see gh-14068
msg = "Overflow in int64 addition"
m = np.iinfo(np.int64).max
n = np.iinfo(np.int64).min
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), m)
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([n, n]), n)
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([n, n]), np.array([n, n]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, n]), np.array([n, n]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([False, True]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
b_mask=np.array([False, True]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([False, True]),
b_mask=np.array([False, True]))
with tm.assert_raises_regex(OverflowError, msg):
with tm.assert_produces_warning(RuntimeWarning):
algos.checked_add_with_arr(np.array([m, m]),
np.array([np.nan, m]))
# Check that the nan boolean arrays override whether or not
# the addition overflows. We don't check the result but just
# the fact that an OverflowError is not raised.
with pytest.raises(AssertionError):
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([True, True]))
with pytest.raises(AssertionError):
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
b_mask=np.array([True, True]))
with pytest.raises(AssertionError):
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([True, False]),
b_mask=np.array([False, True]))
class TestMode(object):
def test_no_mode(self):
exp = Series([], dtype=np.float64)
tm.assert_series_equal(algos.mode([]), exp)
def test_mode_single(self):
# GH 15714
exp_single = [1]
data_single = [1]
exp_multi = [1]
data_multi = [1, 1]
for dt in np.typecodes['AllInteger'] + np.typecodes['Float']:
s = Series(data_single, dtype=dt)
exp = Series(exp_single, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
s = Series(data_multi, dtype=dt)
exp = Series(exp_multi, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
exp = Series([1], dtype=np.int)
tm.assert_series_equal(algos.mode([1]), exp)
exp = Series(['a', 'b', 'c'], dtype=np.object)
tm.assert_series_equal(algos.mode(['a', 'b', 'c']), exp)
def test_number_mode(self):
exp_single = [1]
data_single = [1] * 5 + [2] * 3
exp_multi = [1, 3]
data_multi = [1] * 5 + [2] * 3 + [3] * 5
for dt in np.typecodes['AllInteger'] + np.typecodes['Float']:
s = Series(data_single, dtype=dt)
exp = Series(exp_single, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
s = Series(data_multi, dtype=dt)
exp = Series(exp_multi, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
def test_strobj_mode(self):
exp = ['b']
data = ['a'] * 2 + ['b'] * 3
s = Series(data, dtype='c')
exp = Series(exp, dtype='c')
tm.assert_series_equal(algos.mode(s), exp)
exp = ['bar']
data = ['foo'] * 2 + ['bar'] * 3
for dt in [str, object]:
s = Series(data, dtype=dt)
exp = Series(exp, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
def test_datelike_mode(self):
exp = Series(['1900-05-03', '2011-01-03',
'2013-01-02'], dtype="M8[ns]")
s = Series(['2011-01-03', '2013-01-02',
'1900-05-03'], dtype='M8[ns]')
tm.assert_series_equal(algos.mode(s), exp)
exp = Series(['2011-01-03', '2013-01-02'], dtype='M8[ns]')
s = Series(['2011-01-03', '2013-01-02', '1900-05-03',
'2011-01-03', '2013-01-02'], dtype='M8[ns]')
tm.assert_series_equal(algos.mode(s), exp)
def test_timedelta_mode(self):
exp = Series(['-1 days', '0 days', '1 days'],
dtype='timedelta64[ns]')
s = Series(['1 days', '-1 days', '0 days'],
dtype='timedelta64[ns]')
tm.assert_series_equal(algos.mode(s), exp)
exp = Series(['2 min', '1 day'], dtype='timedelta64[ns]')
s = Series(['1 day', '1 day', '-1 day', '-1 day 2 min',
'2 min', '2 min'], dtype='timedelta64[ns]')
tm.assert_series_equal(algos.mode(s), exp)
def test_mixed_dtype(self):
exp = Series(['foo'])
s = Series([1, 'foo', 'foo'])
tm.assert_series_equal(algos.mode(s), exp)
def test_uint64_overflow(self):
exp = Series([2**63], dtype=np.uint64)
s = Series([1, 2**63, 2**63], dtype=np.uint64)
tm.assert_series_equal(algos.mode(s), exp)
exp = Series([1, 2**63], dtype=np.uint64)
s = Series([1, 2**63], dtype=np.uint64)
tm.assert_series_equal(algos.mode(s), exp)
def test_categorical(self):
c = Categorical([1, 2])
exp = c
tm.assert_categorical_equal(algos.mode(c), exp)
tm.assert_categorical_equal(c.mode(), exp)
c = Categorical([1, 'a', 'a'])
exp = Categorical(['a'], categories=[1, 'a'])
tm.assert_categorical_equal(algos.mode(c), exp)
tm.assert_categorical_equal(c.mode(), exp)
c = Categorical([1, 1, 2, 3, 3])
exp = Categorical([1, 3], categories=[1, 2, 3])
tm.assert_categorical_equal(algos.mode(c), exp)
tm.assert_categorical_equal(c.mode(), exp)
def test_index(self):
idx = Index([1, 2, 3])
exp = Series([1, 2, 3], dtype=np.int64)
tm.assert_series_equal(algos.mode(idx), exp)
idx = Index([1, 'a', 'a'])
exp = Series(['a'], dtype=object)
tm.assert_series_equal(algos.mode(idx), exp)
idx = Index([1, 1, 2, 3, 3])
exp = Series([1, 3], dtype=np.int64)
tm.assert_series_equal(algos.mode(idx), exp)
exp = Series(['2 min', '1 day'], dtype='timedelta64[ns]')
idx = Index(['1 day', '1 day', '-1 day', '-1 day 2 min',
'2 min', '2 min'], dtype='timedelta64[ns]')
tm.assert_series_equal(algos.mode(idx), exp)
| bsd-3-clause |
IshankGulati/scikit-learn | examples/classification/plot_digits_classification.py | 82 | 2414 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 4 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# matplotlib.pyplot.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples // 2], digits.target[:n_samples // 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples // 2:]
predicted = classifier.predict(data[n_samples // 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples // 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| bsd-3-clause |
xiaoxiamii/scikit-learn | sklearn/metrics/cluster/unsupervised.py | 230 | 8281 | """ Unsupervised evaluation metrics. """
# Authors: Robert Layton <robertlayton@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ..pairwise import pairwise_distances
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to randomly select a subset of samples if
``sample_size is not None``. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
n_labels = len(np.unique(labels))
n_samples = X.shape[0]
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
distances = pairwise_distances(X, metric=metric, **kwds)
n = labels.shape[0]
A = np.array([_intra_cluster_distance(distances[i], labels, i)
for i in range(n)])
B = np.array([_nearest_cluster_distance(distances[i], labels, i)
for i in range(n)])
sil_samples = (B - A) / np.maximum(A, B)
return sil_samples
def _intra_cluster_distance(distances_row, labels, i):
"""Calculate the mean intra-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is excluded from calculation and
used to determine the current label
Returns
-------
a : float
Mean intra-cluster distance for sample i
"""
mask = labels == labels[i]
mask[i] = False
if not np.any(mask):
# cluster of size 1
return 0
a = np.mean(distances_row[mask])
return a
def _nearest_cluster_distance(distances_row, labels, i):
"""Calculate the mean nearest-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is used to determine the current
label.
Returns
-------
b : float
Mean nearest-cluster distance for sample i
"""
label = labels[i]
b = np.min([np.mean(distances_row[labels == cur_label])
for cur_label in set(labels) if not cur_label == label])
return b
| bsd-3-clause |
robin-lai/scikit-learn | sklearn/covariance/tests/test_covariance.py | 69 | 11116 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# Create X with 1 sample and 5 features
X_1sample = np.arange(5).reshape(1, 5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0:1]
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
makersauce/stocks | strategy1.py | 1 | 4415 | ##Stragegy File
import datetime
from stock import Stock, piggy
from sys import argv
if __name__ == "__main__":
if len(argv) > 1:
if argv[1] == '--simulate':
if len(argv) < 3:
print 'Please specify symbol'
exit()
symbol = argv[2]
stock = Stock(symbol)
stock.update_history()
stock.analyze()
buy_dates = []
buy_prices = []
sell_dates = []
sell_prices = []
wiggly = piggy(sim=True,holdings=300)
for itx, date in enumerate(stock.history_data['Date']):
## Sell if the 10_day drops
trigger_drop = .02
trigger_starved = 1.5
if float(stock.history_data['10_Day'][itx]) <= float(stock.history_data['10_Day'][itx-5]) - trigger_drop*float(stock.history_data['10_Day'][itx]) \
and bool(wiggly.current_stock.get(stock.symbol)) and wiggly.current_stock[stock.symbol] >= 1:
wiggly.sell(stock,-1,date=date)
sell_dates.append(date)
sell_prices.append(float(stock.history_data['Close'][itx]))
## Buy if the 10_day busts through the 30_day
if (float(stock.history_data['10_Day'][itx]) > float(stock.history_data['30_Day'][itx]) ## If the 10 Day was below the 30 day
and float(stock.history_data['10_Day'][itx-1]) < float(stock.history_data['30_Day'][itx-1]) ## and now the 10 day is above the 30
and float(stock.history_data['Open'][itx-1]) > float(stock.history_data['10_Day'][itx-1]) ## and the current value is above the 10 day
and float(stock.history_data['Close'][itx]) < (float(stock.history_data['30_Day'][itx]) * trigger_starved)
and wiggly.holdings > float(stock.history_data['Close'][itx]) + wiggly.broker.tradeFee): ## and I have enough money
num = int(wiggly.holdings * .5 / float(stock.history_data['Close'][itx]))
wiggly.buy(stock,num,date=date)
buy_dates.append(date)
buy_prices.append(float(stock.history_data['Close'][itx]))
print "\n\n#####Closing Out######"
wiggly.sell(stock,-1,date=date)
##Make a plot
import matplotlib.pyplot as plt
import matplotlib.dates as plotdate
import matplotlib.lines as line
import numpy as np
months = plotdate.MonthLocator() # every year
days = plotdate.DayLocator() # every month
monthsFmt = plotdate.DateFormatter('%m %d')
fig, ax = plt.subplots()
#ax2 = ax.twinx()
t = [datetime.datetime.strptime(date,'%Y-%m-%d') for date in stock.history_data['Date']]
ax.axis('auto')
# format the ticks
ax.xaxis.set_major_locator(months)
ax.xaxis.set_major_formatter(monthsFmt)
ax.xaxis.set_minor_locator(days)
fig.autofmt_xdate()
ax.plot(t, stock.history_data['5_Day'], '#0000FF')
ax.plot(t, stock.history_data['10_Day'], '#5555FF')
ax.plot(t, stock.history_data['30_Day'], '#9999FF')
#ax.plot(t, stock.history_data['80_Day'], '#AAAAFF')
#ax2.plot(t, stock.history_data['Volume'], '#CCFFCC')
#ax2.plot(t, stock.history_data['10_Day_Vol'], '#88AA88')
buy_dates = [datetime.datetime.strptime(date,'%Y-%m-%d') for date in buy_dates]
ax.plot(buy_dates,buy_prices, 'g|',ms=100)
sell_dates = [datetime.datetime.strptime(date,'%Y-%m-%d') for date in sell_dates]
ax.plot(sell_dates,sell_prices, 'b|',ms=100)
ax.plot(t, stock.history_data['Open'], 'r-')
plt.title(stock.symbol)
#ax.text(t[12], 250, 'hello')
plt.show()
elif argv[1] == '--deploy':
print 'Sorry, Deploy function not ready yet. Try some more simulations'
elif argv[1] == '--help' or argv[1] == '-h':
print 'Sorry, can\'t help you yet'
else:
print 'Sorry, ' + argv[1] + 'is not a valid argument. Try \'-h\' for help'
else:
print 'Invalid Number of Arguments'
print 'try \"--help\" for information on this module'
| mit |
leewujung/ooi_sonar | during_incubator/concat_raw.py | 1 | 8982 |
import glob, os, sys
import datetime as dt # quick fix to avoid datetime and datetime.datetime confusion
from matplotlib.dates import date2num, num2date
from calendar import monthrange
import h5py
import matplotlib.pylab as plt
# from modest_image import imshow
# import numpy as np # already imported in zplsc_b
sys.path.append('/Users/wujung/code/mi-instrument/')
from mi.instrument.kut.ek60.ooicore.zplsc_b import *
def get_data_mtx(data_dict,frequencies):
'''
Convert data_dict to numpy array
Input:
data_dict power_data_dict or Sv from power2Sv()
frequencies unpacked dict from parse_echogram_file()
'''
fval = frequencies.values()
fidx = sorted(range(len(fval)), key=lambda k: fval[k]) # get key sequence for low to high freq
fidx = [x+1 for x in fidx]
return np.array((data_dict[fidx[0]],\
data_dict[fidx[1]],\
data_dict[fidx[2]])) # organize all values into matrix
def get_date_idx(date_wanted,fname_all):
'''
Index the files in the wanted date range
'''
raw_file_times = [FILE_NAME_MATCHER.match(x) for x in fname_all];
# idx = [x for x in range(len(X)) if X[x].group('Date')=='20150912'] # solution 1
date_list = map(lambda x: x.group('Date'),raw_file_times) # if in python 3 need to do list(map()) to convert map object to list
time_list = map(lambda x: x.group('Time'),raw_file_times)
if len(date_wanted)==1:
idx_date = [i for i,dd in enumerate(date_list) if dd==date_wanted[0]]
elif len(date_wanted)==2:
idx_date = [i for i,dd in enumerate(date_list) if dd>=date_wanted[0] and dd<=date_wanted[-1]]
else:
print 'Invalid range: date_wanted!'
idx_date = []
if len(idx_date)!=0:
# if midnight was recorded in the previous file
# AND if the previous record is the day before
day_diff = dt.datetime.strptime(date_list[idx_date[0]], "%Y%m%d").toordinal() -\
dt.datetime.strptime(date_list[idx_date[0]-1], "%Y%m%d").toordinal()
if time_list[idx_date[0]]>'000000' and day_diff==1:
idx_date.insert(0,idx_date[0]-1)
else:
print 'date wanted does not exist!'
return idx_date
def unpack_raw_to_h5(fname,h5_fname,deci_len=[]):
'''
Unpack *.raw files and save directly into h5 files
INPUT:
fname file to be unpacked
h5_fname hdf5 file to be written in to
deci_len number of pings to skip over,
default=[], i.e., no skipping
'''
# Unpack data
particle_data, data_times, power_data_dict, freq, bin_size, config_header, config_transducer = \
parse_echogram_file(fname)
# Open hdf5 file
f = h5py.File(h5_fname,"a")
# Convert from power to Sv
cal_params = get_cal_params(power_data_dict,particle_data,config_header,config_transducer)
Sv = power2Sv(power_data_dict,cal_params) # convert from power to Sv
Sv_mtx = get_data_mtx(Sv,freq)
if deci_len:
Sv_mtx = Sv_mtx[:,:,::deci_len]
data_times = data_times[::deci_len]
sz = Sv_mtx.shape
# Write to hdf5 file
if "Sv" in f: # if file alread exist and contains Sv mtx
# Check if start time of this file is before last time point of last file
# Is yes, discard current file and break out from function
time_diff = dt.timedelta(data_times[0]-f['data_times'][-1])
hr_diff = (time_diff.days*86400+time_diff.seconds)/3600
if hr_diff<0:
print '-- New file time bad'
return
else:
print '-- H5 file exists, append new data mtx...'
# append new data
sz_exist = f['Sv'].shape # shape of existing Sv mtx
f['Sv'].resize((sz_exist[0],sz_exist[1],sz_exist[2]+sz[2]))
f['Sv'][:,:,sz_exist[2]:] = Sv_mtx
f['data_times'].resize((sz_exist[2]+sz[2],))
f['data_times'][sz_exist[2]:] = data_times
else:
print '-- New H5 file, create new dataset...'
# create dataset and save Sv
f.create_dataset("Sv", sz, maxshape=(sz[0],sz[1],None), data=Sv_mtx, chunks=True)
f.create_dataset("data_times", (sz[2],), maxshape=(None,), data=data_times, chunks=True)
# label dimensions
f['Sv'].dims[0].label = 'frequency'
f['Sv'].dims[1].label = 'depth'
f['Sv'].dims[2].label = 'time'
# create frequency dimension scale, use f['Sv'].dims[0][0][0] to access
f['frequency'] = freq.values()
f['Sv'].dims.create_scale(f['frequency'])
f['Sv'].dims[0].attach_scale(f['frequency'])
# save bin_size
f.create_dataset("bin_size",data=bin_size)
f.close()
# print 'original size is ' + str({k: v.shape for (k,v) in power_data_dict.items()})
# freq = frequencies.value() # get freuqency values
# freq = {k: str(int(v/1E3))+'k' for (k,v) in frequencies.items()} # get frequencies
# plotting
def plot_Sv(h5_fname,save_path):
f = h5py.File(h5_fname,"r")
# Get time stamp
time_format = '%Y-%m-%d\n%H:%M:%S'
time_length = f['data_times'].size
# X axis label
# subset the xticks so that we don't plot every one
xticks = np.linspace(0, time_length, 11)
xstep = int(round(xticks[1]))
# format trans_array_time array so that it can be used to label the x-axis
xticklabels = [i for i in num2date(f['data_times'][::xstep])] + [num2date(f['data_times'][-1])]
xticklabels = [i.strftime(time_format) for i in xticklabels]
# Plot figure
print 'plotting figure...'
fig, ax = plt.subplots(3, sharex=True)
for ff in range(f['Sv'].shape[0]):
imshow(ax[ff],f['Sv'][ff,:,:],aspect='auto',vmax=-34,vmin=-80,interpolation='none')
ax[-1].set_xlabel('time (UTC)')
ax[-1].set_xticks(xticks)
ax[-1].set_xticklabels(xticklabels, rotation=45, horizontalalignment='center')
#ax[-1].set_xlim(0, time_length)
fig.set_figwidth(16)
fig.set_figheight(10)
# Save figure
save_fname = os.path.join(save_path,h5_fname+'.png')
print 'saving figure...'
fig.savefig(save_fname)
plt.close(fig)
def get_num_days_pings(h5_fname):
''' Get the total number of days and number of pings per day for the given year and month '''
H5_FILENAME_MATCHER = re.compile('(?P<SITE_CODE>\S*)_(?P<YearMonth>\S*)\.\S*')
# Get month and day range
ym = datetime.datetime.strptime(H5_FILENAME_MATCHER.match(h5_fname).group('YearMonth'),'%Y%m')
year = ym.year
month = ym.month
_,daynum = monthrange(year,month)
# Get datetime object for on the hour every hour in all days in the month
all_day = range(1,daynum+1) # list of all days
all_hr = range(24) # list of all hour: 0-23
all_minutes = range(1,11) # list of all minutes: 0-9
every_ping = [datetime.datetime(year,month,day,hr,minutes,0) \
for day in all_day for hr in all_hr for minutes in all_minutes]
pings_per_day = len(all_hr)*len(all_minutes)
return pings_per_day,daynum
def get_data_from_h5(data_path,h5_fname):
''' Retrieve data from h5 files '''
f = h5py.File(os.path.join(data_path,h5_fname),'r')
# Get month and day range
ym = datetime.datetime.strptime(H5_FILENAME_MATCHER.match(h5_fname).group('YearMonth'),'%Y%m')
year = ym.year
month = ym.month
_,daynum = monthrange(year,month)
# Get datetime object for on the hour every hour in all days in the month
all_day = range(1,daynum+1) # list of all days
all_hr = range(24) # list of all hour: 0-23
all_minutes = range(1,11) # list of all minutes: 0-9
every_ping = [datetime.datetime(year,month,day,hr,minutes,0) \
for day in all_day for hr in all_hr for minutes in all_minutes]
pings_per_day = len(all_hr)*len(all_minutes)
# Get f['data_times'] idx for every hour in all days in the month
all_idx = [find_nearest_time_idx(f['data_times'],hr) for hr in every_ping]
all_idx = np.array(all_idx) # to allow numpy operation
# Clean up all_idx
# --> throw away days with more than 5 pings missing
# --> fill in occasional missing pings with neighboring values
all_idx_rshp = np.reshape(all_idx,(-1,pings_per_day))
num_nan_of_day = np.sum(np.isnan(all_idx_rshp),1)
for day in range(len(num_nan_of_day)):
if num_nan_of_day[day]>5:
all_idx_rshp[day,:] = np.nan
all_idx = np.reshape(all_idx_rshp,-1)
# Extract timing and Sv data
notnanidx = np.int_(all_idx[~np.isnan(all_idx)])
data_times = np.empty(all_idx.shape) # initialize empty array
data_times[~np.isnan(all_idx)] = f['data_times'][notnanidx.tolist()]
Sv_tmp = f['Sv'][:,:,0]
Sv_mtx = np.empty((Sv_tmp.shape[0],Sv_tmp.shape[1],all_idx.shape[0]))
Sv_mtx[:,:,~np.isnan(all_idx)] = f['Sv'][:,:,notnanidx.tolist()]
bin_size = f['bin_size'][0] # size of each depth bin
return Sv_mtx,data_times,bin_size,pings_per_day,all_idx
| apache-2.0 |
rseubert/scikit-learn | sklearn/neighbors/tests/test_dist_metrics.py | 48 | 4949 | import itertools
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from nose import SkipTest
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
assert_array_almost_equal(D1, D2)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
google-research/google-research | talk_about_random_splits/probing/split_with_cross_validation_main.py | 1 | 4750 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Splits SentEval data into k stratified folds.
This script generates the following directory structure under the
`base_out_dir`. This follows the original SentEval directory structure and thus
allows running the original scripts with no change.
base_out_dir/
probing/TASK_NAME.txt # Data for this task.
probing/TASK_NAME.txt-settings.json # Some information on the generated data.
TASK_NAME.txt contains all examples with their respective set labels attached.
This file follows the original SentEval data format.
Example call:
python -m \
talk_about_random_splits.probing.split_with_cross_validation \
--senteval_path="/tmp/senteval/task_data/probing" \
--base_out_dir="YOUR_PATH_HERE" --alsologtostderr
"""
import csv
import json
import os
from absl import app
from absl import flags
from absl import logging
import pandas as pd
from sklearn import model_selection
from talk_about_random_splits.probing import probing_utils
FLAGS = flags.FLAGS
flags.DEFINE_string('senteval_path', None,
'Path to the original SentEval data in tsv format.')
flags.DEFINE_string(
'base_out_dir', None,
'Base working dir in which to create subdirs for this script\'s results.')
flags.DEFINE_string(
'split_name', 'fold_xval',
'Determines the base name of result sub-directories in `base_out_dir`.')
flags.DEFINE_integer('num_folds', 10,
'Number of folds into which to split the data.')
flags.DEFINE_list('tasks', [
'word_content.txt', 'sentence_length.txt', 'bigram_shift.txt',
'tree_depth.txt', 'top_constituents.txt', 'past_present.txt',
'subj_number.txt', 'obj_number.txt', 'odd_man_out.txt',
'coordination_inversion.txt'
], 'Tasks for which to generate new data splits.')
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
for task_name in FLAGS.tasks:
logging.info('Starting task: %s.', task_name)
df = probing_utils.read_senteval_data(FLAGS.senteval_path, task_name)
experiment_base_dir = os.path.join(
FLAGS.base_out_dir,
'{}{}'.format(FLAGS.num_folds, FLAGS.split_name) + '-{}')
skf = model_selection.StratifiedKFold(n_splits=FLAGS.num_folds)
for current_fold_id, (train_indexes, test_indexes) in enumerate(
skf.split(df['text'], df['target'])):
split_dir = experiment_base_dir.format(current_fold_id)
probing_dir = os.path.join(split_dir, 'probing')
settings_path = os.path.join(probing_dir,
'{}-settings.json'.format(task_name))
data_out_path = os.path.join(probing_dir, '{}'.format(task_name))
logging.info('Starting run: %d.', current_fold_id)
# Use the same data for train and dev, because the probing code does some
# hyperparameter search on dev. We don't wanna tune on the test portion.
train_set = df.iloc[train_indexes].copy()
train_set.loc[:, 'set'] = 'tr'
dev_set = df.iloc[train_indexes].copy()
dev_set.loc[:, 'set'] = 'va'
test_set = df.iloc[test_indexes].copy()
test_set.loc[:, 'set'] = 'te'
new_data = pd.concat([train_set, dev_set, test_set], ignore_index=True)
logging.info('Writing output to file: %s.', data_out_path)
os.make_dirs(probing_dir)
with open(settings_path, 'w') as settings_file:
settings = {
'task_name': task_name,
'fold_id': current_fold_id,
'train_size': len(train_indexes),
'dev_size': len(train_indexes),
'test_size': len(test_indexes),
}
logging.info('Settings:\n%r', settings)
json.dump(settings, settings_file, indent=2)
with open(data_out_path, 'w') as data_file:
# Don't add quoting to retain the original format unaltered.
new_data[['set', 'target', 'text']].to_csv(
data_file,
sep='\t',
header=False,
index=False,
quoting=csv.QUOTE_NONE,
doublequote=False)
if __name__ == '__main__':
flags.mark_flags_as_required(['senteval_path', 'base_out_dir'])
app.run(main)
| apache-2.0 |
zuku1985/scikit-learn | examples/decomposition/plot_ica_vs_pca.py | 306 | 3329 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
tayebzaidi/HonorsThesisTZ | ThesisCode/DES_Pipeline/gen_lightcurves/visualizeLCurves.py | 1 | 3137 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import json
import os
import sys
import numpy as np
import math
import pickle
def main():
path = "./des_sn.p"
output_lightcurves_file = 'selectedLightcurves'
output_lightcurves = []
with open(path, 'rb') as f:
lightcurves = pickle.load(f)
filenames = list(lightcurves.keys())
#Randomize the file order to allow for fairer selection of the sub-sample
filenames = np.random.permutation(filenames)
num_files = len(filenames)
j = 0
for filename in filenames:
j += 1
objname = str(filename)
file_data = lightcurves[filename]
#Ignore all non-CSP or CfA entries
#for k in list(file_data.keys()):
# if not (k.endswith('CSP') or ('CfA' in k)):
# del file_data[k]
if len(file_data) == 0:
continue
N = len(file_data)
if N < 3:
cols = 1
else:
cols = 3
rows = int(math.ceil(N / cols))
gs = gridspec.GridSpec(rows, cols)
#fig = plt.figure(figsize=(10, 12))
#fig.suptitle(objname)
for i, filt in enumerate(file_data.keys()):
mjd = file_data[filt]['mjd']
mag = file_data[filt]['mag']
mag_err = file_data[filt]['dmag']
model_phase = file_data[filt]['modeldate']
model_mag = file_data[filt]['modelmag']
#bspline_mag = file_data[filt]['bsplinemag']
#modelmag_sub = file_data[filt]['modelmag_sub']
type = file_data[filt]['type']
#ax = fig.add_subplot(gs[i])
#ax.errorbar(mjd, mag, fmt='r', yerr=mag_err,label='Original Data', alpha=0.7)
#ymin, ymax = ax.get_ylim()
#ax.plot(model_phase, model_mag, '-k', label='GP Smoothed Data')
#ax.plot(model_phase, bspline_mag, '-g', label='Spline Smoothed Data')
#ax.plot(model_phase, modelmag_sub, '-k', label='GP/Bspline subtracted', linewidth=1.5)
#ax.set_ylim(ymin, ymax)
#Print outlier stats
mag_range = np.ptp(model_mag)
old_mag_range = np.ptp(mag)
#print(objname, filt)
#plt.draw()
#plt.pause(0.05)
print("Number of files currently: ", len(output_lightcurves))
print("Supernova Type: ", type)
#keystroke = input("<Hit Enter To Close>")
if j>2:
keystroke = 'q'
else:
print(i)
keystroke = '.'
if keystroke == '.':
output_lightcurves.append(objname)
elif keystroke == 'q':
print("Writing to file")
with open(output_lightcurves_file, 'w') as out:
for objname in output_lightcurves:
out.write(objname + '\n')
#plt.close()
sys.exit()
#plt.close()
with open(output_lightcurves_file, 'w') as out:
for objname in output_lightcurves:
out.write(objname + '\n')
if __name__=="__main__":
sys.exit(main())
| gpl-3.0 |
lancezlin/ml_template_py | lib/python2.7/site-packages/sklearn/mixture/tests/test_dpgmm.py | 84 | 7866 | # Important note for the deprecation cleaning of 0.20 :
# All the function and classes of this file have been deprecated in 0.18.
# When you remove this file please also remove the related files
# - 'sklearn/mixture/dpgmm.py'
# - 'sklearn/mixture/gmm.py'
# - 'sklearn/mixture/test_gmm.py'
import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.utils.testing import assert_warns_message, ignore_warnings
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.mixture.dpgmm import digamma, gammaln
from sklearn.mixture.dpgmm import wishart_log_det, wishart_logz
np.seterr(all='warn')
@ignore_warnings(category=DeprecationWarning)
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
@ignore_warnings(category=DeprecationWarning)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_digamma():
assert_warns_message(DeprecationWarning, "The function digamma is"
" deprecated in 0.18 and will be removed in 0.20. "
"Use scipy.special.digamma instead.", digamma, 3)
@ignore_warnings(category=DeprecationWarning)
def test_gammaln():
assert_warns_message(DeprecationWarning, "The function gammaln"
" is deprecated in 0.18 and will be removed"
" in 0.20. Use scipy.special.gammaln instead.",
gammaln, 3)
@ignore_warnings(category=DeprecationWarning)
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
result = assert_warns_message(DeprecationWarning, "The function "
"log_normalize is deprecated in 0.18 and"
" will be removed in 0.20.",
log_normalize, a)
assert np.allclose(v, result, rtol=0.01)
@ignore_warnings(category=DeprecationWarning)
def test_wishart_log_det():
a = np.array([0.1, 0.8, 0.01, 0.09])
b = np.array([0.2, 0.7, 0.05, 0.1])
assert_warns_message(DeprecationWarning, "The function "
"wishart_log_det is deprecated in 0.18 and"
" will be removed in 0.20.",
wishart_log_det, a, b, 2, 4)
@ignore_warnings(category=DeprecationWarning)
def test_wishart_logz():
assert_warns_message(DeprecationWarning, "The function "
"wishart_logz is deprecated in 0.18 and "
"will be removed in 0.20.", wishart_logz,
3, np.identity(3), 1, 3)
@ignore_warnings(category=DeprecationWarning)
def test_DPGMM_deprecation():
assert_warns_message(
DeprecationWarning, "The `DPGMM` class is not working correctly and "
"it's better to use `sklearn.mixture.BayesianGaussianMixture` class "
"with parameter `weight_concentration_prior_type='dirichlet_process'` "
"instead. DPGMM is deprecated in 0.18 and will be removed in 0.20.",
DPGMM)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
def test_VBGMM_deprecation():
assert_warns_message(
DeprecationWarning, "The `VBGMM` class is not working correctly and "
"it's better to use `sklearn.mixture.BayesianGaussianMixture` class "
"with parameter `weight_concentration_prior_type="
"'dirichlet_distribution'` instead. VBGMM is deprecated "
"in 0.18 and will be removed in 0.20.", VBGMM)
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
def test_vbgmm_no_modify_alpha():
alpha = 2.
n_components = 3
X, y = make_blobs(random_state=1)
vbgmm = VBGMM(n_components=n_components, alpha=alpha, n_iter=1)
assert_equal(vbgmm.alpha, alpha)
assert_equal(vbgmm.fit(X).alpha_, float(alpha) / n_components)
| mit |
anurag313/scikit-learn | sklearn/metrics/setup.py | 299 | 1024 | import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.c"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
spallavolu/scikit-learn | examples/plot_isotonic_regression.py | 303 | 1767 | """
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
| bsd-3-clause |
renhaocui/activityExtractor | trainFullModel.py | 1 | 22297 | from keras.preprocessing.text import Tokenizer
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout, Merge, Input, concatenate, Lambda
from keras.layers.embeddings import Embedding
from keras.models import Model
from keras.preprocessing import sequence
from keras.utils import np_utils
from sklearn.preprocessing import LabelEncoder
import numpy as np
from utilities import word2vecReader
from sklearn.utils.class_weight import compute_sample_weight, compute_class_weight
import math, pickle, json, sys
from keras_self_attention import SeqSelfAttention
reload(sys)
sys.setdefaultencoding('utf8')
vocabSize = 10000
tweetLength = 25
posEmbLength = 25
embeddingVectorLength = 200
embeddingPOSVectorLength = 20
charLengthLimit = 20
batch_size = 100
dayMapper = {'Mon': 1, 'Tue': 2, 'Wed': 3, 'Thu': 4, 'Fri': 5, 'Sat': 6, 'Sun': 0}
def hourMapper(hour):
input = int(hour)
if 0 <= input < 6:
output = 0
elif 6 <= input < 12:
output = 1
elif 12 <= input < 18:
output = 2
else:
output = 3
return output
def loadHistData(modelName, histName, char, embedding, resultName, histNum=5):
print('Loading...')
histData = {}
histFile = open('data/consolidateHistData_' + histName + '.json', 'r')
for line in histFile:
data = json.loads(line.strip())
histData[int(data.keys()[0])] = data.values()[0]
histFile.close()
histContents_train = {}
histDayVectors_train = {}
histHourVectors_train = {}
histPOSLists_train = {}
for i in range(histNum):
histContents_train[i] = []
histDayVectors_train[i] = []
histHourVectors_train[i] = []
histPOSLists_train[i] = []
contents_train = []
labels_train = []
places_train = []
days_train = []
hours_train = []
poss_train = []
ids_train = []
inputFileList = ['data/consolidateData_' + modelName + '_train.json', 'data/consolidateData_' + modelName + '_dev.json', 'data/consolidateData_' + modelName + '_test.json']
for inputFilename in inputFileList:
inputFile = open(inputFilename, 'r')
for line in inputFile:
data = json.loads(line.strip())
if data['id'] in histData:
histTweets = histData[data['id']]
if len(histTweets) >= 5:
contents_train.append(data['content'].encode('utf-8'))
labels_train.append(data['label'])
places_train.append(data['place'])
ids_train.append(str(data['id']))
days_train.append(np.full((tweetLength), data['day'], dtype='int'))
hours_train.append(np.full((tweetLength), data['hour'], dtype='int'))
poss_train.append(data['pos'].encode('utf-8'))
for i in range(histNum):
histContents_train[i].append(histTweets[i]['content'].encode('utf-8'))
histPOSLists_train[i].append(histTweets[i]['pos'].encode('utf-8'))
histDayVectors_train[i].append(np.full((tweetLength), histTweets[i]['day'], dtype='int'))
histHourVectors_train[i].append(np.full((tweetLength), histTweets[i]['hour'], dtype='int'))
inputFile.close()
for i in range(histNum):
histDayVectors_train[i] = np.array(histDayVectors_train[i])
histHourVectors_train[i] = np.array(histHourVectors_train[i])
days_train = np.array(days_train)
hours_train = np.array(hours_train)
places_train = np.array(places_train)
ids_train = np.array(ids_train)
if char:
tk = Tokenizer(num_words=vocabSize, char_level=char, filters='')
else:
tk = Tokenizer(num_words=vocabSize, char_level=char)
totalList = contents_train[:]
for i in range(histNum):
totalList += histContents_train[i]
tk.fit_on_texts(totalList)
tweetSequences_train = tk.texts_to_sequences(contents_train)
tweetVector_train = sequence.pad_sequences(tweetSequences_train, maxlen=tweetLength, truncating='post', padding='post')
with open(resultName + '_tweet.tk', 'wb') as handle:
pickle.dump(tk, handle, protocol=pickle.HIGHEST_PROTOCOL)
histTweetVectors_train = []
for i in range(histNum):
histSequence_train = tk.texts_to_sequences(histContents_train[i])
tempVector_train = sequence.pad_sequences(histSequence_train, maxlen=tweetLength, truncating='post', padding='post')
histTweetVectors_train.append(tempVector_train)
if embedding == 'glove':
embeddings_index = {}
embFile = open('../tweetEmbeddingData/glove.twitter.27B.200d.txt', 'r')
for line in embFile:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
embFile.close()
print('Found %s word vectors.' % len(embeddings_index))
word_index = tk.word_index
embMatrix = np.zeros((len(word_index) + 1, 200))
for word, i in word_index.items():
embVector = embeddings_index.get(word)
if embVector is not None:
embMatrix[i] = embVector
elif embedding == 'word2vec':
word_index = tk.word_index
w2v = word2vecReader.Word2Vec()
embModel = w2v.loadModel()
embMatrix = np.zeros((len(word_index) + 1, 400))
for word, i in word_index.items():
if word in embModel:
embMatrix[i] = embModel[word]
else:
embMatrix = None
word_index = None
posVocabSize = 25
tkPOS = Tokenizer(num_words=posVocabSize, filters='', lower=False)
totalPOSList = poss_train[:]
for i in range(histNum):
totalPOSList += histPOSLists_train[i]
tkPOS.fit_on_texts(totalPOSList)
posSequences_train = tkPOS.texts_to_sequences(poss_train)
posVector_train = sequence.pad_sequences(posSequences_train, maxlen=tweetLength, truncating='post', padding='post')
with open(resultName + '_pos.tk', 'wb') as handle:
pickle.dump(tkPOS, handle, protocol=pickle.HIGHEST_PROTOCOL)
histPOSVectors_train = []
for i in range(histNum):
histPOSSequences_train = tkPOS.texts_to_sequences(histPOSLists_train[i])
histPOSVector_train = sequence.pad_sequences(histPOSSequences_train, maxlen=tweetLength, truncating='post', padding='post')
histPOSVectors_train.append(histPOSVector_train)
return ids_train, labels_train, places_train, contents_train, days_train, hours_train, poss_train, tweetVector_train, posVector_train, histTweetVectors_train, histDayVectors_train, histHourVectors_train, histPOSVectors_train, posVocabSize, embMatrix, word_index
def trainLSTM(modelName, balancedWeight='None', char=False, epochs=4):
placeList = []
placeListFile = open('lists/google_place_long.category', 'r')
for line in placeListFile:
if not line.startswith('#'):
placeList.append(line.strip())
placeListFile.close()
activityList = []
activityListFile = open('lists/google_place_activity_' + modelName + '.list', 'r')
for line in activityListFile:
if not line.startswith('#'):
activityList.append(line.strip())
activityListFile.close()
labelNum = len(np.unique(activityList))
if 'NONE' in activityList:
labelNum -= 1
contents = []
labels = []
timeList = []
labelTweetCount = {}
placeTweetCount = {}
labelCount = {}
for index, place in enumerate(placeList):
activity = activityList[index]
if activity != 'NONE':
if activity not in labelTweetCount:
labelTweetCount[activity] = 0.0
tweetFile = open('data/POIplace/' + place + '.json', 'r')
tweetCount = 0
for line in tweetFile:
data = json.loads(line.strip())
if len(data['text']) > charLengthLimit:
contents.append(data['text'].encode('utf-8'))
dateTemp = data['created_at'].split()
timeList.append([dayMapper[dateTemp[0]], hourMapper(dateTemp[3].split(':')[0])])
if activity not in labelCount:
labelCount[activity] = 1.0
else:
labelCount[activity] += 1.0
labels.append(activity)
tweetCount += 1
tweetFile.close()
labelTweetCount[activity] += tweetCount
placeTweetCount[place] = tweetCount
activityLabels = np.array(labels)
timeVector = np.array(timeList)
encoder = LabelEncoder()
encoder.fit(labels)
labelFile = open('model/LSTM_'+modelName + '_' + str(balancedWeight) + '.label', 'w')
labelFile.write(str(encoder.classes_).replace('\n', ' ').replace("'", "")[1:-1].replace(' ', '\t'))
labelFile.close()
encodedLabels = encoder.transform(labels)
labels = np_utils.to_categorical(encodedLabels)
labelList = encoder.classes_.tolist()
tk = Tokenizer(num_words=vocabSize, char_level=char)
tk.fit_on_texts(contents)
pickle.dump(tk, open('model/LSTM_' + modelName + '_' + str(balancedWeight) + '.tk', 'wb'))
tweetSequences = tk.texts_to_sequences(contents)
tweetVector = sequence.pad_sequences(tweetSequences, maxlen=tweetLength, padding='post', truncating='post')
model_text = Sequential()
model_text.add(Embedding(vocabSize, embeddingVectorLength))
model_text.add(Dropout(0.2))
model_text.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))
model_time = Sequential()
model_time.add(Dense(2, input_shape=(2,), activation='relu'))
model = Sequential()
model.add(Merge([model_text, model_time], mode='concat'))
model.add(Dense(labelNum, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
if balancedWeight == 'sample':
sampleWeight = compute_sample_weight('balanced', labels)
model.fit([tweetVector, timeVector], labels, epochs=epochs, batch_size=10, sample_weight=sampleWeight)
elif balancedWeight == 'class':
classWeight = compute_class_weight('balanced', np.unique(activityLabels), activityLabels)
model.fit([tweetVector, timeVector], labels, epochs=epochs, batch_size=10, class_weight=classWeight, verbose=1)
elif balancedWeight == 'class_label':
classWeight = []
countSum = sum(labelCount.values())
for label in labelList:
classWeight.append(countSum/labelCount[label])
model.fit([tweetVector, timeVector], labels, epochs=epochs, batch_size=10, class_weight=classWeight)
elif balancedWeight == 'class_label_log':
classWeight = []
countSum = sum(labelCount.values())
for label in labelList:
classWeight.append(-math.log(labelCount[label] / countSum))
model.fit([tweetVector, timeVector], labels, epochs=epochs, batch_size=10, class_weight=classWeight)
else:
model.fit([tweetVector, timeVector], labels, epochs=epochs, batch_size=10)
model_json = model.to_json()
with open('model/LSTM_'+modelName + '_' + str(balancedWeight) + '.json', 'w') as modelFile:
modelFile.write(model_json)
model.save_weights('model/LSTM_' + modelName + '_' + str(balancedWeight) + '.h5')
def trainHybridLSTM(modelName, histName, balancedWeight='None', embedding='glove', char=False, histNum=5, epochs=7):
resultName = 'model/J-Hist-Context-POST-LSTM_' + modelName + '_' + balancedWeight
ids_train, labels_train, places_train, contents_train, days_train, hours_train, poss_train, tweetVector_train, posVector_train, histTweetVectors_train, histDayVectors_train, \
histHourVectors_train, histPOSVectors_train, posVocabSize, embMatrix, word_index = loadHistData(modelName, histName, char, embedding, resultName=resultName, histNum=histNum)
labelNum = len(np.unique(labels_train))
encoder = LabelEncoder()
encoder.fit(labels_train)
labels_train = encoder.transform(labels_train)
labelList = encoder.classes_.tolist()
print('Labels: ' + str(labelList))
labelFile = open(resultName + '.label', 'a')
labelFile.write(str(labelList) + '\n')
labelFile.close()
# training
print('training...')
input_tweet = Input(batch_shape=(batch_size, tweetLength,), name='tweet_input')
shared_embedding_tweet = Embedding(len(word_index) + 1, 200, weights=[embMatrix], trainable=True)
embedding_tweet = shared_embedding_tweet(input_tweet)
input_day = Input(batch_shape=(batch_size, tweetLength,))
input_hour = Input(batch_shape=(batch_size, tweetLength,))
input_pos = Input(batch_shape=(batch_size, posEmbLength,))
shared_embedding_pos = Embedding(posVocabSize, embeddingPOSVectorLength)
shared_embedding_day = Embedding(20, embeddingPOSVectorLength)
shared_embedding_hour = Embedding(20, embeddingPOSVectorLength)
embedding_day = shared_embedding_day(input_day)
embedding_hour = shared_embedding_hour(input_hour)
embedding_pos = shared_embedding_pos(input_pos)
comb = concatenate([embedding_tweet, embedding_day, embedding_hour, embedding_pos])
tweet_lstm = LSTM(200, dropout=0.2, recurrent_dropout=0.2)(comb)
conList = [tweet_lstm]
inputList = [input_tweet, input_day, input_hour, input_pos]
for i in range(histNum):
input_hist = Input(batch_shape=(batch_size, tweetLength,))
input_day_temp = Input(batch_shape=(batch_size, tweetLength,))
input_hour_temp = Input(batch_shape=(batch_size, tweetLength,))
input_pos_temp = Input(batch_shape=(batch_size, posEmbLength,))
embedding_hist_temp = shared_embedding_tweet(input_hist)
embedding_day_temp = shared_embedding_day(input_day_temp)
embedding_hour_temp = shared_embedding_hour(input_hour_temp)
embedding_pos_temp = shared_embedding_pos(input_pos_temp)
comb_temp = concatenate([embedding_hist_temp, embedding_day_temp, embedding_hour_temp, embedding_pos_temp])
lstm_temp = LSTM(200, dropout=0.2, recurrent_dropout=0.2)(comb_temp)
conList.append(lstm_temp)
inputList += [input_hist, input_day_temp, input_hour_temp, input_pos_temp]
comb_total = concatenate(conList)
output = Dense(labelNum, activation='softmax', name='output')(comb_total)
model = Model(inputs=inputList, outputs=output)
#print(model.summary())
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
tweet_train = tweetVector_train[:-(len(tweetVector_train) % batch_size)]
labels_train = labels_train[:-(len(labels_train) % batch_size)]
days_train = days_train[:-(len(days_train) % batch_size)]
hours_train = hours_train[:-(len(hours_train) % batch_size)]
posVector_train = posVector_train[:-(len(posVector_train) % batch_size)]
for i in range(histNum):
histTweetVectors_train[i] = histTweetVectors_train[i][:-(len(histTweetVectors_train[i]) % batch_size)]
histDayVectors_train[i] = histDayVectors_train[i][:-(len(histDayVectors_train[i]) % batch_size)]
histHourVectors_train[i] = histHourVectors_train[i][:-(len(histHourVectors_train[i]) % batch_size)]
histPOSVectors_train[i] = histPOSVectors_train[i][:-(len(histPOSVectors_train[i]) % batch_size)]
labelVector_train = np_utils.to_categorical(labels_train)
trainList = [tweet_train, days_train, hours_train, posVector_train]
for i in range(histNum):
trainList += [histTweetVectors_train[i], histDayVectors_train[i], histHourVectors_train[i], histPOSVectors_train[i]]
verbose = 1
if balancedWeight == 'sample':
sampleWeight = compute_sample_weight('balanced', labels_train)
model.fit(trainList, labelVector_train, epochs=epochs, batch_size=batch_size, sample_weight=sampleWeight, verbose=verbose)
elif balancedWeight == 'class':
classWeight = compute_class_weight('balanced', np.unique(labels_train), labels_train)
model.fit(trainList, labelVector_train, epochs=epochs, batch_size=batch_size, class_weight=classWeight, verbose=verbose)
else:
model.fit(trainList, labelVector_train, epochs=epochs, batch_size=batch_size, verbose=verbose)
model_json = model.to_json()
with open(resultName+'_model.json', 'w') as json_file:
json_file.write(model_json)
model.save_weights(resultName+'_model.h5')
print('FINSIHED')
def trainHybridAttLSTM(modelName, histName, balancedWeight='None', embedding='glove', char=False, histNum=5, epochs=7):
resultName = 'model/J-Hist-Context-POST-LSTM_' + modelName + '_' + balancedWeight
ids_train, labels_train, places_train, contents_train, days_train, hours_train, poss_train, tweetVector_train, posVector_train, histTweetVectors_train, histDayVectors_train, \
histHourVectors_train, histPOSVectors_train, posVocabSize, embMatrix, word_index = loadHistData(modelName, histName, char, embedding, resultName=resultName, histNum=histNum)
labelNum = len(np.unique(labels_train))
encoder = LabelEncoder()
encoder.fit(labels_train)
labels_train = encoder.transform(labels_train)
labelList = encoder.classes_.tolist()
print('Labels: ' + str(labelList))
labelFile = open(resultName + '.label', 'a')
labelFile.write(str(labelList) + '\n')
labelFile.close()
# training
print('training...')
input_tweet = Input(batch_shape=(batch_size, tweetLength,), name='tweet_input')
shared_embedding_tweet = Embedding(len(word_index) + 1, 200, weights=[embMatrix], trainable=True)
embedding_tweet = shared_embedding_tweet(input_tweet)
input_day = Input(batch_shape=(batch_size, tweetLength,))
input_hour = Input(batch_shape=(batch_size, tweetLength,))
input_pos = Input(batch_shape=(batch_size, posEmbLength,))
shared_embedding_pos = Embedding(posVocabSize, embeddingPOSVectorLength)
shared_embedding_day = Embedding(20, embeddingPOSVectorLength)
shared_embedding_hour = Embedding(20, embeddingPOSVectorLength)
embedding_day = shared_embedding_day(input_day)
embedding_hour = shared_embedding_hour(input_hour)
embedding_pos = shared_embedding_pos(input_pos)
comb = concatenate([embedding_tweet, embedding_day, embedding_hour, embedding_pos])
tweet_lstm = LSTM(200, dropout=0.2, recurrent_dropout=0.2, return_sequences=True)(comb)
self_attention = SeqSelfAttention(attention_activation='sigmoid')(tweet_lstm)
last_timestep = Lambda(lambda x: x[:, -1, :])(self_attention)
conList = [last_timestep]
inputList = [input_tweet, input_day, input_hour, input_pos]
for i in range(histNum):
input_hist = Input(batch_shape=(batch_size, tweetLength,))
input_day_temp = Input(batch_shape=(batch_size, tweetLength,))
input_hour_temp = Input(batch_shape=(batch_size, tweetLength,))
input_pos_temp = Input(batch_shape=(batch_size, posEmbLength,))
embedding_hist_temp = shared_embedding_tweet(input_hist)
embedding_day_temp = shared_embedding_day(input_day_temp)
embedding_hour_temp = shared_embedding_hour(input_hour_temp)
embedding_pos_temp = shared_embedding_pos(input_pos_temp)
comb_temp = concatenate([embedding_hist_temp, embedding_day_temp, embedding_hour_temp, embedding_pos_temp])
lstm_temp = LSTM(200, dropout=0.2, recurrent_dropout=0.2, return_sequences=True)(comb_temp)
self_attention_temp = SeqSelfAttention(attention_activation='sigmoid')(lstm_temp)
last_timestep_temp = Lambda(lambda x: x[:, -1, :])(self_attention_temp)
conList.append(last_timestep_temp)
inputList += [input_hist, input_day_temp, input_hour_temp, input_pos_temp]
comb_total = concatenate(conList)
output = Dense(labelNum, activation='softmax', name='output')(comb_total)
model = Model(inputs=inputList, outputs=output)
#print(model.summary())
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
tweet_train = tweetVector_train[:-(len(tweetVector_train) % batch_size)]
labels_train = labels_train[:-(len(labels_train) % batch_size)]
days_train = days_train[:-(len(days_train) % batch_size)]
hours_train = hours_train[:-(len(hours_train) % batch_size)]
posVector_train = posVector_train[:-(len(posVector_train) % batch_size)]
for i in range(histNum):
histTweetVectors_train[i] = histTweetVectors_train[i][:-(len(histTweetVectors_train[i]) % batch_size)]
histDayVectors_train[i] = histDayVectors_train[i][:-(len(histDayVectors_train[i]) % batch_size)]
histHourVectors_train[i] = histHourVectors_train[i][:-(len(histHourVectors_train[i]) % batch_size)]
histPOSVectors_train[i] = histPOSVectors_train[i][:-(len(histPOSVectors_train[i]) % batch_size)]
labelVector_train = np_utils.to_categorical(labels_train)
trainList = [tweet_train, days_train, hours_train, posVector_train]
for i in range(histNum):
trainList += [histTweetVectors_train[i], histDayVectors_train[i], histHourVectors_train[i], histPOSVectors_train[i]]
verbose = 1
if balancedWeight == 'sample':
sampleWeight = compute_sample_weight('balanced', labels_train)
model.fit(trainList, labelVector_train, epochs=epochs, batch_size=batch_size, sample_weight=sampleWeight, verbose=verbose)
elif balancedWeight == 'class':
classWeight = compute_class_weight('balanced', np.unique(labels_train), labels_train)
model.fit(trainList, labelVector_train, epochs=epochs, batch_size=batch_size, class_weight=classWeight, verbose=verbose)
else:
model.fit(trainList, labelVector_train, epochs=epochs, batch_size=batch_size, verbose=verbose)
model_json = model.to_json()
with open(resultName+'_model.json', 'w') as json_file:
json_file.write(model_json)
model.save_weights(resultName+'_model.h5')
print('FINSIHED')
if __name__ == '__main__':
#trainLSTM('long1.5', 'none', char=False)
#trainHybridLSTM('long1.5', 'long1.5', 'class', 'glove', char=False, histNum=5, epochs=26)
trainHybridAttLSTM('long1.5', 'long1.5', 'class', 'glove', char=False, histNum=5, epochs=14)
| mit |
bthirion/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
UNR-AERIAL/scikit-learn | examples/model_selection/plot_precision_recall.py | 249 | 6150 | """
================
Precision-Recall
================
Example of Precision-Recall metric to evaluate classifier output quality.
In information retrieval, precision is a measure of result relevancy, while
recall is a measure of how many truly relevant results are returned. A high
area under the curve represents both high recall and high precision, where high
precision relates to a low false positive rate, and high recall relates to a
low false negative rate. High scores for both show that the classifier is
returning accurate results (high precision), as well as returning a majority of
all positive results (high recall).
A system with high recall but low precision returns many results, but most of
its predicted labels are incorrect when compared to the training labels. A
system with high precision but low recall is just the opposite, returning very
few results, but most of its predicted labels are correct when compared to the
training labels. An ideal system with high precision and high recall will
return many results, with all results labeled correctly.
Precision (:math:`P`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false positives
(:math:`F_p`).
:math:`P = \\frac{T_p}{T_p+F_p}`
Recall (:math:`R`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false negatives
(:math:`F_n`).
:math:`R = \\frac{T_p}{T_p + F_n}`
These quantities are also related to the (:math:`F_1`) score, which is defined
as the harmonic mean of precision and recall.
:math:`F1 = 2\\frac{P \\times R}{P+R}`
It is important to note that the precision may not decrease with recall. The
definition of precision (:math:`\\frac{T_p}{T_p + F_p}`) shows that lowering
the threshold of a classifier may increase the denominator, by increasing the
number of results returned. If the threshold was previously set too high, the
new results may all be true positives, which will increase precision. If the
previous threshold was about right or too low, further lowering the threshold
will introduce false positives, decreasing precision.
Recall is defined as :math:`\\frac{T_p}{T_p+F_n}`, where :math:`T_p+F_n` does
not depend on the classifier threshold. This means that lowering the classifier
threshold may increase recall, by increasing the number of true positive
results. It is also possible that lowering the threshold may leave recall
unchanged, while the precision fluctuates.
The relationship between recall and precision can be observed in the
stairstep area of the plot - at the edges of these steps a small change
in the threshold considerably reduces precision, with only a minor gain in
recall. See the corner at recall = .59, precision = .8 for an example of this
phenomenon.
Precision-recall curves are typically used in binary classification to study
the output of a classifier. In order to extend Precision-recall curve and
average precision to multi-class or multi-label classification, it is necessary
to binarize the output. One curve can be drawn per label, but one can also draw
a precision-recall curve by considering each element of the label indicator
matrix as a binary prediction (micro-averaging).
.. note::
See also :func:`sklearn.metrics.average_precision_score`,
:func:`sklearn.metrics.recall_score`,
:func:`sklearn.metrics.precision_score`,
:func:`sklearn.metrics.f1_score`
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn import svm, datasets
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# Split into training and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=random_state)
# Run classifier
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute Precision-Recall and plot curve
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_test[:, i],
y_score[:, i])
average_precision[i] = average_precision_score(y_test[:, i], y_score[:, i])
# Compute micro-average ROC curve and ROC area
precision["micro"], recall["micro"], _ = precision_recall_curve(y_test.ravel(),
y_score.ravel())
average_precision["micro"] = average_precision_score(y_test, y_score,
average="micro")
# Plot Precision-Recall curve
plt.clf()
plt.plot(recall[0], precision[0], label='Precision-Recall curve')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall example: AUC={0:0.2f}'.format(average_precision[0]))
plt.legend(loc="lower left")
plt.show()
# Plot Precision-Recall curve for each class
plt.clf()
plt.plot(recall["micro"], precision["micro"],
label='micro-average Precision-recall curve (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i in range(n_classes):
plt.plot(recall[i], precision[i],
label='Precision-recall curve of class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Extension of Precision-Recall curve to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
Flaviolib/dx | dx/dx_portfolio.py | 5 | 14390 | #
# DX Analytics Portfolio
# dx_portfolio.py
#
# (c) Dr. Yves J. Hilpisch
# The Python Quants GmbH
# You are not allowed to copy or distribute the dx library.
# Rights are only granted for a limited period of time to
# test the library in connection with the Python Quant Platform.
# See also the terms and conditions under
# http://analytics.quant-platform.com/documents/PQP_Terms_Conditions_Trial.pdf
#
# DX Analytics (the "dx library") comes with no representations
# or warranties, to the extent permitted by applicable law.
#
from dx_frame import *
import pandas.io.data as web
import math
import scipy.optimize as sco
import scipy.interpolate as sci
class mean_variance_portfolio(object):
'''
Class to implement the mean variance portfolio theory of Markowitz
'''
def __init__(self, name, mar_env):
self.name = name
try:
self.symbols = mar_env.get_list("symbols")
self.start_date = mar_env.pricing_date
except:
raise ValueError("Error parsing market environment.")
self.number_of_assets = len(self.symbols)
try:
self.final_date = mar_env.get_constant("final date")
except:
self.final_date = dt.date.today()
try:
self.source = mar_env.get_constant("source")
except:
self.source = 'google'
try:
self.weights = mar_env.get_constant("weights")
except:
self.weights = np.ones(self.number_of_assets, 'float')
self.weights /= self.number_of_assets
try:
weights_sum = sum(self.weights)
except:
msg = "Weights must be an iterable of numbers."
raise TypeError(msg)
if round(weights_sum, 6) != 1:
raise ValueError("Sum of weights must be one.")
if len(self.weights) != self.number_of_assets:
msg = "Expected %s weights, got %s"
raise ValueError(msg % (self.number_of_assets,
len(self.weights)))
self.load_data()
self.make_raw_stats()
self.apply_weights()
def __str__(self):
string = "Portfolio %s \n" % self.name
string += len(string) * '-' + '\n'
string += "return %10.3f\n" % self.portfolio_return
string += "volatility %10.3f\n" % math.sqrt(self.variance)
string += "Sharpe ratio %10.3f\n" % (self.portfolio_return /
math.sqrt(self.variance))
string += "\n"
string += "Positions\n"
string += "symbol | weight | ret. con. \n"
string += "--------------------------- \n"
for i in range(len(self.symbols)):
string += "{:<6} | {:6.3f} | {:9.3f} \n".format(self.symbols[i],
self.weights[i],
self.mean_returns[i])
return string
def load_data(self):
'''
Loads asset values from the web.
'''
self.data = pd.DataFrame()
# if self.source == "yahoo" or self.source == "google":
for sym in self.symbols:
try:
self.data[sym] = web.DataReader(sym, self.source,
self.start_date,
self.final_date)['Close']
except:
print "Can not find data for source %s and symbol %s." \
% (self.source, sym)
print "Will try other source."
try:
if self.source == "yahoo":
source = "google"
if self.source == "google":
source = "yahoo"
self.data[sym] = web.DataReader(sym, source,
self.start_date,
self.final_date)['Close']
except:
msg = "Can not find data for source %s and symbol %s"
raise IOError(msg % (source, sym))
self.data.columns = self.symbols
# To do: add more sources
def make_raw_stats(self):
'''
Computes returns and variances
'''
self.raw_returns = np.log(self.data / self.data.shift(1))
self.mean_raw_return = self.raw_returns.mean()
self.raw_covariance = self.raw_returns.cov()
def apply_weights(self):
'''
Applies weights to the raw returns and covariances
'''
self.returns = self.raw_returns * self.weights
self.mean_returns = self.returns.mean() * 252
self.portfolio_return = np.sum(self.mean_returns)
self.variance = np.dot(self.weights.T,
np.dot(self.raw_covariance * 252, self.weights))
def test_weights(self, weights):
'''
Returns the theoretical portfolio return, portfolio volatility
and Sharpe ratio for given weights.
Please note:
The method does not set the weight.
Parameter
---------
weight: iterable,
the weights of the portfolio content.
'''
weights = np.array(weights)
portfolio_return = np.sum(self.raw_returns.mean() * weights) * 252
portfolio_vol = math.sqrt(np.dot(weights.T,
np.dot(self.raw_covariance * 252, weights)))
return np.array([portfolio_return, portfolio_vol,
portfolio_return / portfolio_vol])
def set_weights(self, weights):
'''
Sets new weights
Parameter
---------
weights: interable
new set of weights
'''
try:
weights = np.array(weights)
weights_sum = sum(weights).round(3)
except:
msg = "weights must be an interable of numbers"
raise TypeError(msg)
if weights_sum != 1:
raise ValueError("Sum of weights must be one")
if len(weights) != self.number_of_assets:
msg = "Expected %s weights, got %s"
raise ValueError(msg % (self.number_of_assets,
len(weights)))
self.weights = weights
self.apply_weights()
def get_weights(self):
'''
Returns a dictionary with entries symbol:weights
'''
d = dict()
for i in range(len(self.symbols)):
d[self.symbols[i]] = self.weights[i]
return d
def get_portfolio_return(self):
'''
Returns the average return of the weighted portfolio
'''
return self.portfolio_return
def get_portfolio_variance(self):
'''
Returns the average variance of the weighted portfolio
'''
return self.variance
def get_volatility(self):
'''
Returns the average volatility of the portfolio
'''
return math.sqrt(self.variance)
def optimize(self, target, constraint=None, constraint_type='Exact'):
'''
Optimize the weights of the portfolio according to the value of the
string 'target'
Parameters
==========
target: string
one of:
Sharpe: maximizes the ratio return/volatility
Vol: minimizes the expected volatility
Return: maximizes the expected return
constraint: number
only for target options 'Vol' and 'Return'.
For target option 'Return', the function tries to optimize
the expected return given the constraint on the volatility.
For target option 'Vol', the optimization returns the minimum
volatility given the constraint for the expected return.
If constraint is None (default), the optimization is made
without concerning the other value.
constraint_type: string, one of 'Exact' or 'Bound'
only relevant if constraint is not None.
For 'Exact' (default) the value of the constraint must be hit
(if possible), for 'Bound', constraint is only the upper/lower
bound of the volatility or return resp.
'''
weights = self.get_optimal_weights(target, constraint, constraint_type)
if weights is not False:
self.set_weights(weights)
else:
raise ValueError("Optimization failed.")
def get_capital_market_line(self, riskless_asset):
'''
Returns the capital market line as a lambda function and
the coordinates of the intersection between the captal market
line and the efficient frontier
Parameters
==========
riskless_asset: float
the return of the riskless asset
'''
x, y = self.get_efficient_frontier(100)
if len(x) == 1:
raise ValueError("Efficient Frontier seems to be constant.")
f_eff = sci.UnivariateSpline(x, y, s=0)
f_eff_der = f_eff.derivative(1)
def tangent(x, rl=riskless_asset):
return f_eff_der(x)*x/(f_eff(x)-rl)-1
left_start = x[0]
right_start = x[-1]
left, right = self.search_sign_changing(left_start, right_start,
tangent, right_start-left_start)
if left == 0 and right == 0:
raise ValueError("Can not find tangent.")
zero_x = sco.brentq(tangent, left, right)
opt_return = f_eff(zero_x)
cpl = lambda x: f_eff_der(zero_x)*x+riskless_asset
return cpl, zero_x, float(opt_return)
def get_efficient_frontier(self, n):
'''
Returns the efficient frontier in form of lists containing the x and y
coordinates of points of the frontier.
Parameters
==========
n : int >= 3
number of points
'''
if type(n) is not int:
raise TypeError("n must be an int")
if n < 3:
raise ValueError("n must be at least 3")
min_vol_weights = self.get_optimal_weights("Vol")
min_vol = self.test_weights(min_vol_weights)[1]
min_return_weights = self.get_optimal_weights("Return",
constraint=min_vol)
min_return = self.test_weights(min_return_weights)[0]
max_return_weights = self.get_optimal_weights("Return")
max_return = self.test_weights(max_return_weights)[0]
delta = (max_return-min_return)/(n-1)
if delta > 0:
returns = np.arange(min_return, max_return+delta, delta)
vols =list()
rets = list()
for r in returns:
w = self.get_optimal_weights('Vol', constraint=r,
constraint_type="Exact")
if w is not False:
result = self.test_weights(w)[:2]
rets.append(result[0])
vols.append(result[1])
else:
rets = [max_return, ]
vols = [min_vol, ]
return np.array(vols), np.array(rets)
def get_optimal_weights(self, target, constraint=None, constraint_type="Exact"):
'''
'''
if target == "Sharpe":
def optimize_function(weights):
return -self.test_weights(weights)[2]
cons = ({'type': 'eq', 'fun': lambda x: np.sum(x) - 1})
elif target == "Vol":
def optimize_function(weights):
return self.test_weights(weights)[1]
cons = [{'type': 'eq', 'fun': lambda x: np.sum(x) - 1}, ]
if constraint is not None:
d = dict()
if constraint_type == "Exact":
d['type'] = 'eq'
d['fun'] = lambda x: self.test_weights(x)[0] - constraint
cons.append(d)
elif constraint_type == "Bound":
d['type'] = 'ineq'
d['fun'] = lambda x: self.test_weights(x)[0] - constraint
cons.append(d)
else:
msg = "Value for constraint_type must be either "
msg += "Exact or Bound, not %s" % constraint_type
raise ValueError(msg)
elif target == "Return":
def optimize_function(weights):
return -self.test_weights(weights)[0]
cons = [{'type': 'eq', 'fun': lambda x: np.sum(x) - 1}, ]
if constraint is not None:
d = dict()
if constraint_type == "Exact":
d['type'] = 'eq'
d['fun'] = lambda x: self.test_weights(x)[1] - constraint
cons.append(d)
elif constraint_type == "Bound":
d['type'] = 'ineq'
d['fun'] = lambda x: constraint - self.test_weights(x)[1]
cons.append(d)
else:
msg = "Value for constraint_type must be either "
msg += "Exact or Bound, not %s" % constraint_type
raise ValueError(msg)
else:
raise ValueError("Unknown target %s" % target)
bounds = tuple((0, 1) for x in range(self.number_of_assets))
start = self.number_of_assets * [1. / self.number_of_assets, ]
result = sco.minimize(optimize_function, start,
method='SLSQP', bounds=bounds, constraints=cons)
if bool(result['success']) is True:
new_weights = result['x'].round(6)
return new_weights
else:
return False
def search_sign_changing(self, l, r, f, d):
if d < 0.000001:
return (0, 0)
for x in np.arange(l, r+d, d):
if f(l)*f(x) < 0:
ret= (x-d, x)
return ret
ret = self.search_sign_changing(l, r, f, d/2.)
return ret
if __name__ == '__main__':
ma = market_environment("ma", dt.date(2010, 1, 1))
ma.add_constant("symbols", ['AAPL', 'GOOG', 'MSFT', 'DB'])
ma.add_constant("final date", dt.date(2014, 3, 1))
port = mean_variance_portfolio("My Portfolio", ma)
| agpl-3.0 |
zuku1985/scikit-learn | sklearn/utils/tests/test_metaestimators.py | 86 | 2304 | from sklearn.utils.testing import assert_true, assert_false
from sklearn.utils.metaestimators import if_delegate_has_method
class Prefix(object):
def func(self):
pass
class MockMetaEstimator(object):
"""This is a mock meta estimator"""
a_prefix = Prefix()
@if_delegate_has_method(delegate="a_prefix")
def func(self):
"""This is a mock delegated function"""
pass
def test_delegated_docstring():
assert_true("This is a mock delegated function"
in str(MockMetaEstimator.__dict__['func'].__doc__))
assert_true("This is a mock delegated function"
in str(MockMetaEstimator.func.__doc__))
assert_true("This is a mock delegated function"
in str(MockMetaEstimator().func.__doc__))
class MetaEst(object):
"""A mock meta estimator"""
def __init__(self, sub_est, better_sub_est=None):
self.sub_est = sub_est
self.better_sub_est = better_sub_est
@if_delegate_has_method(delegate='sub_est')
def predict(self):
pass
class MetaEstTestTuple(MetaEst):
"""A mock meta estimator to test passing a tuple of delegates"""
@if_delegate_has_method(delegate=('sub_est', 'better_sub_est'))
def predict(self):
pass
class MetaEstTestList(MetaEst):
"""A mock meta estimator to test passing a list of delegates"""
@if_delegate_has_method(delegate=['sub_est', 'better_sub_est'])
def predict(self):
pass
class HasPredict(object):
"""A mock sub-estimator with predict method"""
def predict(self):
pass
class HasNoPredict(object):
"""A mock sub-estimator with no predict method"""
pass
def test_if_delegate_has_method():
assert_true(hasattr(MetaEst(HasPredict()), 'predict'))
assert_false(hasattr(MetaEst(HasNoPredict()), 'predict'))
assert_false(
hasattr(MetaEstTestTuple(HasNoPredict(), HasNoPredict()), 'predict'))
assert_true(
hasattr(MetaEstTestTuple(HasPredict(), HasNoPredict()), 'predict'))
assert_false(
hasattr(MetaEstTestTuple(HasNoPredict(), HasPredict()), 'predict'))
assert_false(
hasattr(MetaEstTestList(HasNoPredict(), HasPredict()), 'predict'))
assert_true(
hasattr(MetaEstTestList(HasPredict(), HasPredict()), 'predict'))
| bsd-3-clause |
jj-umn/tools-iuc | tools/vsnp/vsnp_add_zero_coverage.py | 12 | 6321 | #!/usr/bin/env python
import argparse
import os
import re
import shutil
import pandas
import pysam
from Bio import SeqIO
def get_sample_name(file_path):
base_file_name = os.path.basename(file_path)
if base_file_name.find(".") > 0:
# Eliminate the extension.
return os.path.splitext(base_file_name)[0]
return base_file_name
def get_coverage_df(bam_file):
# Create a coverage dictionary.
coverage_dict = {}
coverage_list = pysam.depth(bam_file, split_lines=True)
for line in coverage_list:
chrom, position, depth = line.split('\t')
coverage_dict["%s-%s" % (chrom, position)] = depth
# Convert it to a data frame.
coverage_df = pandas.DataFrame.from_dict(coverage_dict, orient='index', columns=["depth"])
return coverage_df
def get_zero_df(reference):
# Create a zero coverage dictionary.
zero_dict = {}
for record in SeqIO.parse(reference, "fasta"):
chrom = record.id
total_len = len(record.seq)
for pos in list(range(1, total_len + 1)):
zero_dict["%s-%s" % (str(chrom), str(pos))] = 0
# Convert it to a data frame with depth_x
# and depth_y columns - index is NaN.
zero_df = pandas.DataFrame.from_dict(zero_dict, orient='index', columns=["depth"])
return zero_df
def output_zc_vcf_file(base_file_name, vcf_file, zero_df, total_zero_coverage, output_vcf):
column_names = ["CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO", "FORMAT", "Sample"]
vcf_df = pandas.read_csv(vcf_file, sep='\t', header=None, names=column_names, comment='#')
good_snp_count = len(vcf_df[(vcf_df['ALT'].str.len() == 1) & (vcf_df['REF'].str.len() == 1) & (vcf_df['QUAL'] > 150)])
if total_zero_coverage > 0:
header_file = "%s_header.csv" % base_file_name
with open(header_file, 'w') as outfile:
with open(vcf_file) as infile:
for line in infile:
if re.search('^#', line):
outfile.write("%s" % line)
vcf_df_snp = vcf_df[vcf_df['REF'].str.len() == 1]
vcf_df_snp = vcf_df_snp[vcf_df_snp['ALT'].str.len() == 1]
vcf_df_snp['ABS_VALUE'] = vcf_df_snp['CHROM'].map(str) + "-" + vcf_df_snp['POS'].map(str)
vcf_df_snp = vcf_df_snp.set_index('ABS_VALUE')
cat_df = pandas.concat([vcf_df_snp, zero_df], axis=1, sort=False)
cat_df = cat_df.drop(columns=['CHROM', 'POS', 'depth'])
cat_df[['ID', 'ALT', 'QUAL', 'FILTER', 'INFO']] = cat_df[['ID', 'ALT', 'QUAL', 'FILTER', 'INFO']].fillna('.')
cat_df['REF'] = cat_df['REF'].fillna('N')
cat_df['FORMAT'] = cat_df['FORMAT'].fillna('GT')
cat_df['Sample'] = cat_df['Sample'].fillna('./.')
cat_df['temp'] = cat_df.index.str.rsplit('-', n=1)
cat_df[['CHROM', 'POS']] = pandas.DataFrame(cat_df.temp.values.tolist(), index=cat_df.index)
cat_df = cat_df[['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT', 'Sample']]
cat_df['POS'] = cat_df['POS'].astype(int)
cat_df = cat_df.sort_values(['CHROM', 'POS'])
body_file = "%s_body.csv" % base_file_name
cat_df.to_csv(body_file, sep='\t', header=False, index=False)
with open(output_vcf, "w") as outfile:
for cf in [header_file, body_file]:
with open(cf, "r") as infile:
for line in infile:
outfile.write("%s" % line)
else:
shutil.move(vcf_file, output_vcf)
return good_snp_count
def output_metrics_file(base_file_name, average_coverage, genome_coverage, good_snp_count, output_metrics):
bam_metrics = [base_file_name, "", "%4f" % average_coverage, genome_coverage]
vcf_metrics = [base_file_name, str(good_snp_count), "", ""]
metrics_columns = ["File", "Number of Good SNPs", "Average Coverage", "Genome Coverage"]
with open(output_metrics, "w") as fh:
fh.write("# %s\n" % "\t".join(metrics_columns))
fh.write("%s\n" % "\t".join(bam_metrics))
fh.write("%s\n" % "\t".join(vcf_metrics))
def output_files(vcf_file, total_zero_coverage, zero_df, output_vcf, average_coverage, genome_coverage, output_metrics):
base_file_name = get_sample_name(vcf_file)
good_snp_count = output_zc_vcf_file(base_file_name, vcf_file, zero_df, total_zero_coverage, output_vcf)
output_metrics_file(base_file_name, average_coverage, genome_coverage, good_snp_count, output_metrics)
def get_coverage_and_snp_count(bam_file, vcf_file, reference, output_metrics, output_vcf):
coverage_df = get_coverage_df(bam_file)
zero_df = get_zero_df(reference)
coverage_df = zero_df.merge(coverage_df, left_index=True, right_index=True, how='outer')
# depth_x "0" column no longer needed.
coverage_df = coverage_df.drop(columns=['depth_x'])
coverage_df = coverage_df.rename(columns={'depth_y': 'depth'})
# Covert the NaN to 0 coverage and get some metrics.
coverage_df = coverage_df.fillna(0)
coverage_df['depth'] = coverage_df['depth'].apply(int)
total_length = len(coverage_df)
average_coverage = coverage_df['depth'].mean()
zero_df = coverage_df[coverage_df['depth'] == 0]
total_zero_coverage = len(zero_df)
total_coverage = total_length - total_zero_coverage
genome_coverage = "{:.2%}".format(total_coverage / total_length)
# Output a zero-coverage vcf fil and the metrics file.
output_files(vcf_file, total_zero_coverage, zero_df, output_vcf, average_coverage, genome_coverage, output_metrics)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--bam_input', action='store', dest='bam_input', help='bam input file')
parser.add_argument('--output_metrics', action='store', dest='output_metrics', required=False, default=None, help='Output metrics text file')
parser.add_argument('--output_vcf', action='store', dest='output_vcf', required=False, default=None, help='Output VCF file')
parser.add_argument('--reference', action='store', dest='reference', help='Reference dataset')
parser.add_argument('--vcf_input', action='store', dest='vcf_input', help='vcf input file')
args = parser.parse_args()
get_coverage_and_snp_count(args.bam_input, args.vcf_input, args.reference, args.output_metrics, args.output_vcf)
| mit |
mkoledoye/mds_examples | experiments/evaluation.py | 2 | 1384 | import numpy as np
from matplotlib import pyplot as plt
COLORS = iter(['blue', 'red', 'green', 'magenta'])
def rmse(computed, real):
return np.sqrt(((computed - real)**2).mean())
def first_third_quartile_and_median(data):
first_quartile = np.percentile(data, 25, axis=1)
third_quartile = np.percentile(data, 75, axis=1)
median = np.median(data, axis=1)
return first_quartile, median, third_quartile
def plot_rmse(first_quartile, median, third_quartile, x_axis=None):
plt.figure(1)
if x_axis is None:
x_axis = np.arange(median.shape[0])
color = next(COLORS)
plot1, = plt.plot(x_axis, median, 'k-', color=color)
plot2 = plt.fill_between(x_axis, first_quartile, third_quartile, color=color, alpha=0.3)
axes = plt.gca()
return plot1, plot2, axes
def plot_rmse_vs_noise(*args, **kwargs):
plot1, plot2, axes = plot_rmse(*args, **kwargs)
axes.set_xlabel('Measurement noise $\sigma$ [m]')
axes.set_ylabel('RMSE of computed configuration [m]')
return plot1, plot2
def plot_rmse_vs_anchors(*args, **kwargs):
plot1, plot2, axes = plot_rmse(*args, **kwargs)
axes.set_xlabel('No of anchors')
axes.set_ylabel('RMSE of computed configuration [m]')
return plot1, plot2
def plot_rmse_vs_ntags(*args, **kwargs):
plot1, plot2, axes = plot_rmse(*args, **kwargs)
axes.set_xlabel('No of tags')
axes.set_ylabel('RMSE of computed configuration [m]')
return plot1, plot2
| mit |
marktrovinger/Fremont-Bike-Data | jupyterworkflow/data.py | 1 | 1025 | import os
from urllib.request import urlretrieve
import pandas as pd
FREMONT_URL = 'https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD'
def get_fremont_data(filename='Fremont.csv', url=FREMONT_URL, force_download=False):
''''Download and cache Fremont data
Parameters
-----------
filename : string (optional)
location to save the data
url : string (optional)
web location of the data
force_download : bool (optional)
if True, force redownload of the data
Returns
--------
data : pandas.DataFrame
The Fremont bridge data
'''
if force_download or not os.path.exists(filename):
urlretrieve(URL, 'Fremont.csv')
data = pd.read_csv('Fremont.csv', index_col='Date')
try:
data.index = pd.to_datetime(data.index, format='%m/%d/%Y %I:%M:%S %p')
except TypeError:
data.index = pd.to_datetime(data.index)
data.columns = ['West', 'East']
data['Total'] = data['East'] + data['West']
return data | mit |
joakim-hove/ert | python/python/ert_gui/plottery/plots/histogram.py | 4 | 5535 | from math import sqrt, ceil, floor, log10
from matplotlib.patches import Rectangle
import numpy
from .plot_tools import PlotTools
import pandas as pd
def plotHistogram(plot_context):
""" @type plot_context: ert_gui.plottery.PlotContext """
ert = plot_context.ert()
key = plot_context.key()
config = plot_context.plotConfig()
case_list = plot_context.cases()
case_count = len(case_list)
plot_context.x_axis = plot_context.VALUE_AXIS
plot_context.Y_axis = plot_context.COUNT_AXIS
if config.xLabel() is None:
config.setXLabel("Value")
if config.yLabel() is None:
config.setYLabel("Count")
use_log_scale = False
if key.startswith("LOG10_"):
key = key[6:]
use_log_scale = True
data = {}
minimum = None
maximum = None
categories = set()
max_element_count = 0
categorical = False
for case in case_list:
data[case] = plot_context.dataGatherer().gatherData(ert, case, key)
if data[case].dtype == "object":
try:
data[case] = pd.to_numeric(data[case], errors='ignore')
except AttributeError:
data[case] = data[case].convert_objects(convert_numeric=True)
if data[case].dtype == "object":
categorical = True
if categorical:
categories = categories.union(set(data[case].unique()))
else:
if minimum is None:
minimum = data[case].min()
else:
minimum = min(minimum, data[case].min())
if maximum is None:
maximum = data[case].max()
else:
maximum = max(maximum, data[case].max())
max_element_count = max(max_element_count, len(data[case].index))
categories = sorted(categories)
bin_count = int(ceil(sqrt(max_element_count)))
axes = {}
""":type: dict of (str, matplotlib.axes.Axes) """
for index, case in enumerate(case_list):
axes[case] = plot_context.figure().add_subplot(case_count, 1, index + 1)
axes[case].set_title("%s (%s)" % (config.title(), case))
if use_log_scale:
axes[case].set_xscale("log")
if not data[case].empty:
if categorical:
_plotCategoricalHistogram(axes[case], config, data[case], case, categories)
else:
_plotHistogram(axes[case], config, data[case], case, bin_count, use_log_scale, minimum, maximum)
config.nextColor()
PlotTools.showGrid(axes[case], plot_context)
min_count = 0
max_count = max([subplot.get_ylim()[1] for subplot in axes.values()])
custom_limits = plot_context.plotConfig().limits
if custom_limits.count_maximum is not None:
max_count = custom_limits.count_maximum
if custom_limits.count_minimum is not None:
min_count = custom_limits.count_minimum
for subplot in axes.values():
subplot.set_ylim(min_count, max_count)
subplot.set_xlim(custom_limits.value_minimum, custom_limits.value_maximum)
def _plotCategoricalHistogram(axes, plot_config, data, label, categories):
"""
@type axes: matplotlib.axes.Axes
@type plot_config: PlotConfig
@type data: DataFrame
@type label: str
@type categories: list of str
"""
axes.set_xlabel(plot_config.xLabel())
axes.set_ylabel(plot_config.yLabel())
style = plot_config.histogramStyle()
counts = data.value_counts()
freq = [counts[category] if category in counts else 0 for category in categories]
pos = numpy.arange(len(categories))
width = 1.0
axes.set_xticks(pos + (width / 2.0))
axes.set_xticklabels(categories)
axes.bar(pos, freq, alpha=style.alpha, color=style.color, width=width)
rectangle = Rectangle((0, 0), 1, 1, color=style.color) # creates rectangle patch for legend use.
plot_config.addLegendItem(label, rectangle)
def _plotHistogram(axes, plot_config, data, label, bin_count, use_log_scale=False, minimum=None, maximum=None):
"""
@type axes: matplotlib.axes.Axes
@type plot_config: PlotConfig
@type data: DataFrame
@type label: str
"""
axes.set_xlabel(plot_config.xLabel())
axes.set_ylabel(plot_config.yLabel())
style = plot_config.histogramStyle()
if minimum is not None and maximum is not None:
if use_log_scale:
bins = _histogramLogBins(bin_count, minimum, maximum)
else:
bins = numpy.linspace(minimum, maximum, bin_count)
else:
bins = bin_count
axes.hist(data.values, alpha=style.alpha, bins=bins, color=style.color)
if minimum == maximum:
minimum -= 0.5
maximum += 0.5
axes.set_xlim(minimum, maximum)
rectangle = Rectangle((0, 0), 1, 1, color=style.color) # creates rectangle patch for legend use.'
plot_config.addLegendItem(label, rectangle)
def _histogramLogBins(bin_count, minimum=None, maximum=None):
"""
@type data: pandas.DataFrame
@rtype: int
"""
minimum = log10(float(minimum))
maximum = log10(float(maximum))
min_value = int(floor(minimum))
max_value = int(ceil(maximum))
log_bin_count = max_value - min_value
if log_bin_count < bin_count:
next_bin_count = log_bin_count * 2
if bin_count - log_bin_count > next_bin_count - bin_count:
log_bin_count = next_bin_count
else:
log_bin_count = bin_count
return 10 ** numpy.linspace(minimum, maximum, log_bin_count)
| gpl-3.0 |
jlegendary/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 230 | 2649 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn import cross_validation
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_validation.cross_val_score(pipeline,
X[:, np.newaxis], y, scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
mblondel/scikit-learn | examples/model_selection/randomized_search.py | 57 | 3208 | """
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from operator import itemgetter
from scipy.stats import randint as sp_randint
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
iris = load_digits()
X, y = iris.data, iris.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.grid_scores_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.grid_scores_)))
report(grid_search.grid_scores_)
| bsd-3-clause |
ccasotto/rmtk | rmtk/parsers/vulnerability_model_converter.py | 3 | 7101 | #!/usr/bin/env python
# LICENSE
#
# Copyright (c) 2014, GEM Foundation, Anirudh Rao
#
# The rmtk is free software: you can redistribute
# it and/or modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>
#
# DISCLAIMER
#
# The software rmtk provided herein is released as a prototype
# implementation on behalf of scientists and engineers working within the GEM
# Foundation (Global Earthquake Model).
#
# It is distributed for the purpose of open collaboration and in the
# hope that it will be useful to the scientific, engineering, disaster
# risk and software design communities.
#
# The software is NOT distributed as part of GEM's OpenQuake suite
# (http://www.globalquakemodel.org/openquake) and must be considered as a
# separate entity. The software provided herein is designed and implemented
# by scientific staff. It is not developed to the design standards, nor
# subject to same level of critical review by professional software
# developers, as GEM's OpenQuake software suite.
#
# Feedback and contribution to the software is welcome, and can be
# directed to the risk scientific staff of the GEM Model Facility
# (risk@globalquakemodel.org).
#
# The nrml_converters is therefore distributed WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# The GEM Foundation, and the authors of the software, assume no liability for
# use of the software.
"""
Convert vulnerability model csv files to xml.
"""
import os
import argparse
import pandas as pd
from lxml import etree
NAMESPACE = 'http://openquake.org/xmlns/nrml/0.4'
GML_NAMESPACE = 'http://www.opengis.net/gml'
SERIALIZE_NS_MAP = {None: NAMESPACE, 'gml': GML_NAMESPACE}
def csv_to_xml(input_csv, output_xml):
"""
Converts the CSV vulnerability model file to the NRML format
"""
data = pd.io.parsers.read_csv(input_csv)
grouped_by_set = data.groupby(['vuln_set_id','vuln_func_id'])
vuln_model = {}
for (vuln_set_id, vuln_func_id), group in grouped_by_set:
if vuln_set_id not in vuln_model:
vuln_model[vuln_set_id] = {}
vuln_model[vuln_set_id]['asset_cat'] = group['asset_cat'].tolist()[0]
vuln_model[vuln_set_id]['loss_cat'] = group['loss_cat'].tolist()[0]
vuln_model[vuln_set_id]['imt'] = group['imt'].tolist()[0]
vuln_model[vuln_set_id]['iml'] = group['iml'].tolist()
vuln_model[vuln_set_id]['functions'] = []
vuln_func = {}
vuln_func['vuln_func_id'] = vuln_func_id
vuln_func['distr'] = group['distr'].tolist()[0]
vuln_func['mean_lr'] = group['mean_lr'].tolist()
vuln_func['stddev_lr'] = group['stddev_lr'].tolist()
vuln_model[vuln_set_id]['functions'].append(vuln_func)
with open(output_xml, "w") as f:
root = etree.Element('nrml', nsmap=SERIALIZE_NS_MAP)
node_vm = etree.SubElement(root, "vulnerabilityModel")
for vuln_set_id, vuln_func_set in vuln_model.iteritems():
node_dvs = etree.SubElement(node_vm, "discreteVulnerabilitySet")
node_dvs.set("vulnerabilitySetID", vuln_set_id)
node_dvs.set("assetCategory", vuln_func_set['asset_cat'])
node_dvs.set("lossCategory", vuln_func_set['loss_cat'])
node_iml = etree.SubElement(node_dvs, "IML")
node_iml.set("IMT", vuln_func_set['imt'])
node_iml.text = " ".join(map(str, vuln_func_set['iml']))
for vuln_func in vuln_func_set['functions']:
node_dvf = etree.SubElement(node_dvs, "discreteVulnerability")
node_dvf.set("vulnerabilityFunctionID", vuln_func['vuln_func_id'])
node_dvf.set("probabilisticDistribution", vuln_func['distr'])
node_lr = etree.SubElement(node_dvf, "lossRatio")
node_lr.text = " ".join(map(str, vuln_func['mean_lr']))
node_cv = etree.SubElement(node_dvf, "coefficientsVariation")
node_cv.text = " ".join(map(str, vuln_func['stddev_lr']))
f.write(etree.tostring(root, pretty_print=True, xml_declaration=True, encoding='UTF-8'))
def xml_to_csv (input_xml, output_csv):
"""
Converts the XML vulnerability model file to the CSV format
"""
print('This feature will be implemented in a future release.')
def set_up_arg_parser():
"""
Can run as executable. To do so, set up the command line parser
"""
description = ('Convert a Vulnerability Model from CSV to XML and '
'vice versa.\n\nTo convert from CSV to XML: '
'\npython vulnerability_model_converter.py '
'--input-csv-file PATH_TO_VULNERABILITY_MODEL_CSV_FILE '
'--output-xml-file PATH_TO_OUTPUT_XML_FILE'
'\n\nTo convert from XML to CSV type: '
'\npython vulnerability_model_converter.py '
'--input-xml-file PATH_TO_VULNERABILITY_MODEL_XML_FILE '
'--output-csv-file PATH_TO_OUTPUT_CSV_FILE')
parser = argparse.ArgumentParser(description=description, formatter_class=argparse.RawTextHelpFormatter)
flags = parser.add_argument_group('flag arguments')
group_input = flags.add_mutually_exclusive_group(required=True)
group_input.add_argument('--input-xml-file',
help='path to vulnerability model XML file',
default=None)
group_input.add_argument('--input-csv-file',
help='path to vulnerability model CSV file',
default=None)
group_output = flags.add_mutually_exclusive_group()
group_output.add_argument('--output-xml-file',
help='path to output XML file',
default=None,
required=False)
group_output.add_argument('--output-csv-file',
help='path to output CSV file',
default=None,
required=False)
return parser
if __name__ == "__main__":
parser = set_up_arg_parser()
args = parser.parse_args()
if args.input_csv_file:
if args.output_xml_file:
output_file = args.output_xml_file
else:
(filename, ext) = os.path.splitext(args.input_csv_file)
output_file = filename + '.xml'
csv_to_xml(args.input_csv_file, output_file)
elif args.input_xml_file:
if args.output_csv_file:
output_file = args.output_csv_file
else:
(filename, ext) = os.path.splitext(args.input_xml_file)
output_file = filename + '.csv'
xml_to_csv(args.input_xml_file, output_file)
else:
parser.print_usage() | agpl-3.0 |
BlueBrain/NEST | topology/pynest/tests/test_plotting.py | 13 | 4111 | # -*- coding: utf-8 -*-
#
# test_plotting.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Tests for basic topology hl_api functions.
NOTE: These tests only test whether the code runs, it does not check
whether the results produced are correct.
"""
import unittest
import nest
import nest.topology as topo
try:
import matplotlib.pyplot as plt
plt.figure() # make sure we can open a window; on Jenkins, DISPLAY is not set
PLOTTING_POSSIBLE = True
except:
PLOTTING_POSSIBLE = False
@unittest.skipIf(not PLOTTING_POSSIBLE, 'Plotting is impossible because matplotlib or display missing')
class PlottingTestCase(unittest.TestCase):
def test_PlotLayer(self):
"""Test plotting layer."""
ldict = {'elements': 'iaf_neuron', 'rows': 3, 'columns':3,
'extent': [2., 2.], 'edge_wrap': True}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
topo.PlotLayer(l)
self.assertTrue(True)
def test_PlotTargets(self):
"""Test plotting targets."""
ldict = {'elements': ['iaf_neuron', 'iaf_psc_alpha'], 'rows': 3, 'columns':3,
'extent': [2., 2.], 'edge_wrap': True}
cdict = {'connection_type': 'divergent',
'mask': {'grid': {'rows':2, 'columns':2}}}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
ian = [gid for gid in nest.GetLeaves(l)[0]
if nest.GetStatus([gid], 'model')[0] == 'iaf_neuron']
ipa = [gid for gid in nest.GetLeaves(l)[0]
if nest.GetStatus([gid], 'model')[0] == 'iaf_psc_alpha']
# connect ian -> all using static_synapse
cdict.update({'sources': {'model': 'iaf_neuron'},
'synapse_model': 'static_synapse'})
topo.ConnectLayers(l, l, cdict)
for k in ['sources', 'synapse_model']: cdict.pop(k)
# connect ipa -> ipa using stdp_synapse
cdict.update({'sources': {'model': 'iaf_psc_alpha'},
'targets': {'model': 'iaf_psc_alpha'},
'synapse_model': 'stdp_synapse'})
topo.ConnectLayers(l, l, cdict)
for k in ['sources', 'targets', 'synapse_model']: cdict.pop(k)
ctr = topo.FindCenterElement(l)
fig = topo.PlotTargets(ctr, l)
fig.gca().set_title('Plain call')
self.assertTrue(True)
def test_PlotKernel(self):
"""Test plotting kernels."""
ldict = {'elements': 'iaf_neuron', 'rows': 3, 'columns':3,
'extent': [2., 2.], 'edge_wrap': True}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
f = plt.figure()
a1 = f.add_subplot(221)
ctr = topo.FindCenterElement(l)
topo.PlotKernel(a1, ctr, {'circular': {'radius': 1.}}, {'gaussian': {'sigma':0.2}})
a2 = f.add_subplot(222)
topo.PlotKernel(a2, ctr, {'doughnut': {'inner_radius': 0.5, 'outer_radius':0.75}})
a3 = f.add_subplot(223)
topo.PlotKernel(a3, ctr, {'rectangular': {'lower_left': [-.5,-.5],
'upper_right':[0.5,0.5]}})
self.assertTrue(True)
def suite():
suite = unittest.makeSuite(PlottingTestCase,'test')
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
import matplotlib.pyplot as plt
plt.show()
| gpl-2.0 |
mirkix/ardupilot | Tools/scripts/tempcal_IMU.py | 16 | 20088 | #!/usr/bin/env python
'''
Create temperature calibration parameters for IMUs based on log data.
'''
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--outfile", default="tcal.parm", help='set output file')
parser.add_argument("--no-graph", action='store_true', default=False, help='disable graph display')
parser.add_argument("--log-parm", action='store_true', default=False, help='show corrections using coefficients from log file')
parser.add_argument("--online", action='store_true', default=False, help='use online polynomial fitting')
parser.add_argument("--tclr", action='store_true', default=False, help='use TCLR messages from log instead of IMU messages')
parser.add_argument("log", metavar="LOG")
args = parser.parse_args()
import sys
import math
import re
from pymavlink import mavutil
import numpy as np
import matplotlib.pyplot as pyplot
from scipy import signal
from pymavlink.rotmat import Vector3, Matrix3
# fit an order 3 polynomial
POLY_ORDER = 3
# we use a fixed reference temperature of 35C. This has the advantage that
# we don't need to know the final temperature when doing an online calibration
# which allows us to have a calibration timeout
TEMP_REF = 35.0
# we scale the parameters so the values work nicely in
# parameter editors and parameter files that don't
# use exponential notation
SCALE_FACTOR = 1.0e6
AXES = ['X','Y','Z']
AXEST = ['X','Y','Z','T','time']
class Coefficients:
'''class representing a set of coefficients'''
def __init__(self):
self.acoef = {}
self.gcoef = {}
self.enable = [0]*3
self.tmin = [-100]*3
self.tmax = [-100]*3
self.gtcal = {}
self.atcal = {}
self.gofs = {}
self.aofs = {}
def set_accel_poly(self, imu, axis, values):
if imu not in self.acoef:
self.acoef[imu] = {}
self.acoef[imu][axis] = values
def set_gyro_poly(self, imu, axis, values):
if imu not in self.gcoef:
self.gcoef[imu] = {}
self.gcoef[imu][axis] = values
def set_acoeff(self, imu, axis, order, value):
if imu not in self.acoef:
self.acoef[imu] = {}
if not axis in self.acoef[imu]:
self.acoef[imu][axis] = [0]*4
self.acoef[imu][axis][POLY_ORDER-order] = value
def set_gcoeff(self, imu, axis, order, value):
if imu not in self.gcoef:
self.gcoef[imu] = {}
if not axis in self.gcoef[imu]:
self.gcoef[imu][axis] = [0]*4
self.gcoef[imu][axis][POLY_ORDER-order] = value
def set_aoffset(self, imu, axis, value):
if imu not in self.aofs:
self.aofs[imu] = {}
self.aofs[imu][axis] = value
def set_goffset(self, imu, axis, value):
if imu not in self.gofs:
self.gofs[imu] = {}
self.gofs[imu][axis] = value
def set_tmin(self, imu, tmin):
self.tmin[imu] = tmin
def set_tmax(self, imu, tmax):
self.tmax[imu] = tmax
def set_gyro_tcal(self, imu, value):
self.gtcal[imu] = value
def set_accel_tcal(self, imu, value):
self.atcal[imu] = value
def set_enable(self, imu, value):
self.enable[imu] = value
def correction(self, coeff, imu, temperature, axis, cal_temp):
'''calculate correction from temperature calibration from log data using parameters'''
if self.enable[imu] != 1.0:
return 0.0
if cal_temp < -80:
return 0.0
if not axis in coeff:
return 0.0
temperature = constrain(temperature, self.tmin[imu], self.tmax[imu])
cal_temp = constrain(cal_temp, self.tmin[imu], self.tmax[imu])
poly = np.poly1d(coeff[axis])
return poly(cal_temp - TEMP_REF) - poly(temperature - TEMP_REF)
def correction_accel(self, imu, temperature):
'''calculate accel correction from temperature calibration from
log data using parameters'''
cal_temp = self.atcal.get(imu, TEMP_REF)
return Vector3(self.correction(self.acoef[imu], imu, temperature, 'X', cal_temp),
self.correction(self.acoef[imu], imu, temperature, 'Y', cal_temp),
self.correction(self.acoef[imu], imu, temperature, 'Z', cal_temp))
def correction_gyro(self, imu, temperature):
'''calculate gyro correction from temperature calibration from
log data using parameters'''
cal_temp = self.gtcal.get(imu, TEMP_REF)
return Vector3(self.correction(self.gcoef[imu], imu, temperature, 'X', cal_temp),
self.correction(self.gcoef[imu], imu, temperature, 'Y', cal_temp),
self.correction(self.gcoef[imu], imu, temperature, 'Z', cal_temp))
def param_string(self, imu):
params = ''
params += 'INS_TCAL%u_ENABLE 1\n' % (imu+1)
params += 'INS_TCAL%u_TMIN %.1f\n' % (imu+1, self.tmin[imu])
params += 'INS_TCAL%u_TMAX %.1f\n' % (imu+1, self.tmax[imu])
# note that we don't save the first term of the polynomial as that is a
# constant offset which is already handled by the accel/gyro constant
# offsets. We only same the temperature dependent part of the
# calibration
for p in range(POLY_ORDER):
for axis in AXES:
params += 'INS_TCAL%u_ACC%u_%s %.9f\n' % (imu+1, p+1, axis, self.acoef[imu][axis][POLY_ORDER-(p+1)]*SCALE_FACTOR)
for p in range(POLY_ORDER):
for axis in AXES:
params += 'INS_TCAL%u_GYR%u_%s %.9f\n' % (imu+1, p+1, axis, self.gcoef[imu][axis][POLY_ORDER-(p+1)]*SCALE_FACTOR)
return params
class OnlineIMUfit:
'''implement the online learning used in ArduPilot'''
def __init__(self):
pass
def update(self, x, y):
temp = 1.0
for i in range(2*(self.porder - 1), -1, -1):
k = 0 if (i < self.porder) else (i - self.porder + 1)
for j in range(i - k, k-1, -1):
self.mat[j][i-j] += temp
temp *= x
temp = 1.0
for i in range(self.porder-1, -1, -1):
self.vec[i] += y * temp
temp *= x
def get_polynomial(self):
inv_mat = np.linalg.inv(self.mat)
res = np.zeros(self.porder)
for i in range(self.porder):
for j in range(self.porder):
res[i] += inv_mat[i][j] * self.vec[j]
return res
def polyfit(self, x, y, order):
self.porder = order + 1
self.mat = np.zeros((self.porder, self.porder))
self.vec = np.zeros(self.porder)
for i in range(len(x)):
self.update(x[i], y[i])
return self.get_polynomial()
class IMUData:
def __init__(self):
self.accel = {}
self.gyro = {}
def IMUs(self):
'''return list of IMUs'''
if len(self.accel.keys()) != len(self.gyro.keys()):
print("accel and gyro data doesn't match")
sys.exit(1)
return self.accel.keys()
def add_accel(self, imu, temperature, time, value):
if imu not in self.accel:
self.accel[imu] = {}
for axis in AXEST:
self.accel[imu][axis] = np.zeros(0,dtype=float)
self.accel[imu]['T'] = np.append(self.accel[imu]['T'], temperature)
self.accel[imu]['X'] = np.append(self.accel[imu]['X'], value.x)
self.accel[imu]['Y'] = np.append(self.accel[imu]['Y'], value.y)
self.accel[imu]['Z'] = np.append(self.accel[imu]['Z'], value.z)
self.accel[imu]['time'] = np.append(self.accel[imu]['time'], time)
def add_gyro(self, imu, temperature, time, value):
if imu not in self.gyro:
self.gyro[imu] = {}
for axis in AXEST:
self.gyro[imu][axis] = np.zeros(0,dtype=float)
self.gyro[imu]['T'] = np.append(self.gyro[imu]['T'], temperature)
self.gyro[imu]['X'] = np.append(self.gyro[imu]['X'], value.x)
self.gyro[imu]['Y'] = np.append(self.gyro[imu]['Y'], value.y)
self.gyro[imu]['Z'] = np.append(self.gyro[imu]['Z'], value.z)
self.gyro[imu]['time'] = np.append(self.gyro[imu]['time'], time)
def moving_average(self, data, w):
'''apply a moving average filter over a window of width w'''
ret = np.cumsum(data)
ret[w:] = ret[w:] - ret[:-w]
return ret[w - 1:] / w
def FilterArray(self, data, width_s):
'''apply moving average filter of width width_s seconds'''
nseconds = data['time'][-1] - data['time'][0]
nsamples = len(data['time'])
window = int(nsamples / nseconds) * width_s
if window > 1:
for axis in AXEST:
data[axis] = self.moving_average(data[axis], window)
return data
def Filter(self, width_s):
'''apply moving average filter of width width_s seconds'''
for imu in self.IMUs():
self.accel[imu] = self.FilterArray(self.accel[imu], width_s)
self.gyro[imu] = self.FilterArray(self.gyro[imu], width_s)
def accel_at_temp(self, imu, axis, temperature):
'''return the accel value closest to the given temperature'''
if temperature < self.accel[imu]['T'][0]:
return self.accel[imu][axis][0]
for i in range(len(self.accel[imu]['T'])-1):
if temperature >= self.accel[imu]['T'][i] and temperature <= self.accel[imu]['T'][i+1]:
v1 = self.accel[imu][axis][i]
v2 = self.accel[imu][axis][i+1]
p = (temperature - self.accel[imu]['T'][i]) / (self.accel[imu]['T'][i+1]-self.accel[imu]['T'][i])
return v1 + (v2-v1) * p
return self.accel[imu][axis][-1]
def gyro_at_temp(self, imu, axis, temperature):
'''return the gyro value closest to the given temperature'''
if temperature < self.gyro[imu]['T'][0]:
return self.gyro[imu][axis][0]
for i in range(len(self.gyro[imu]['T'])-1):
if temperature >= self.gyro[imu]['T'][i] and temperature <= self.gyro[imu]['T'][i+1]:
v1 = self.gyro[imu][axis][i]
v2 = self.gyro[imu][axis][i+1]
p = (temperature - self.gyro[imu]['T'][i]) / (self.gyro[imu]['T'][i+1]-self.gyro[imu]['T'][i])
return v1 + (v2-v1) * p
return self.gyro[imu][axis][-1]
def constrain(value, minv, maxv):
"""Constrain a value to a range."""
if value < minv:
value = minv
if value > maxv:
value = maxv
return value
def IMUfit(logfile):
'''find IMU calibration parameters from a log file'''
print("Processing log %s" % logfile)
mlog = mavutil.mavlink_connection(logfile)
data = IMUData()
c = Coefficients()
orientation = 0
stop_capture = [ False ] * 3
if args.tclr:
messages = ['PARM','TCLR']
else:
messages = ['PARM','IMU']
while True:
msg = mlog.recv_match(type=messages)
if msg is None:
break
if msg.get_type() == 'PARM':
# build up the old coefficients so we can remove the impact of
# existing coefficients from the data
m = re.match("^INS_TCAL(\d)_ENABLE$", msg.Name)
if m:
imu = int(m.group(1))-1
if stop_capture[imu]:
continue
if msg.Value == 1 and c.enable[imu] == 2:
print("TCAL[%u] enabled" % imu)
stop_capture[imu] = True
continue
if msg.Value == 0 and c.enable[imu] == 1:
print("TCAL[%u] disabled" % imu)
stop_capture[imu] = True
continue
c.set_enable(imu, msg.Value)
m = re.match("^INS_TCAL(\d)_(ACC|GYR)([1-3])_([XYZ])$", msg.Name)
if m:
imu = int(m.group(1))-1
stype = m.group(2)
p = int(m.group(3))
axis = m.group(4)
if stop_capture[imu]:
continue
if stype == 'ACC':
c.set_acoeff(imu, axis, p, msg.Value/SCALE_FACTOR)
if stype == 'GYR':
c.set_gcoeff(imu, axis, p, msg.Value/SCALE_FACTOR)
m = re.match("^INS_TCAL(\d)_TMIN$", msg.Name)
if m:
imu = int(m.group(1))-1
if stop_capture[imu]:
continue
c.set_tmin(imu, msg.Value)
m = re.match("^INS_TCAL(\d)_TMAX", msg.Name)
if m:
imu = int(m.group(1))-1
if stop_capture[imu]:
continue
c.set_tmax(imu, msg.Value)
m = re.match("^INS_GYR(\d)_CALTEMP", msg.Name)
if m:
imu = int(m.group(1))-1
if stop_capture[imu]:
continue
c.set_gyro_tcal(imu, msg.Value)
m = re.match("^INS_ACC(\d)_CALTEMP", msg.Name)
if m:
imu = int(m.group(1))-1
if stop_capture[imu]:
continue
c.set_accel_tcal(imu, msg.Value)
m = re.match("^INS_(ACC|GYR)(\d?)OFFS_([XYZ])$", msg.Name)
if m:
stype = m.group(1)
if m.group(2) == "":
imu = 0
else:
imu = int(m.group(2))-1
axis = m.group(3)
if stop_capture[imu]:
continue
if stype == 'ACC':
c.set_aoffset(imu, axis, msg.Value)
if stype == 'GYR':
c.set_goffset(imu, axis, msg.Value)
if msg.Name == 'AHRS_ORIENTATION':
orientation = int(msg.Value)
print("Using orientation %d" % orientation)
if msg.get_type() == 'TCLR' and args.tclr:
imu = msg.I
T = msg.Temp
if msg.SType == 0:
# accel
acc = Vector3(msg.X, msg.Y, msg.Z)
time = msg.TimeUS*1.0e-6
data.add_accel(imu, T, time, acc)
elif msg.SType == 1:
# gyro
gyr = Vector3(msg.X, msg.Y, msg.Z)
time = msg.TimeUS*1.0e-6
data.add_gyro(imu, T, time, gyr)
if msg.get_type() == 'IMU' and not args.tclr:
imu = msg.I
if stop_capture[imu]:
continue
T = msg.T
acc = Vector3(msg.AccX, msg.AccY, msg.AccZ)
gyr = Vector3(msg.GyrX, msg.GyrY, msg.GyrZ)
# invert the board orientation rotation. Corrections are in sensor frame
if orientation != 0:
acc = acc.rotate_by_inverse_id(orientation)
gyr = gyr.rotate_by_inverse_id(orientation)
if acc is None or gyr is None:
print("Invalid AHRS_ORIENTATION %u" % orientation)
sys.exit(1)
if c.enable[imu] == 1:
acc -= c.correction_accel(imu, T)
gyr -= c.correction_gyro(imu, T)
time = msg.TimeUS*1.0e-6
data.add_accel(imu, T, time, acc)
data.add_gyro (imu, T, time, gyr)
if len(data.IMUs()) == 0:
print("No data found")
sys.exit(1)
print("Loaded %u accel and %u gyro samples" % (len(data.accel[0]['T']),len(data.gyro[0]['T'])))
if not args.tclr:
# apply moving average filter with 2s width
data.Filter(2)
clog = c
c = Coefficients()
calfile = open(args.outfile, "w")
for imu in data.IMUs():
tmin = np.amin(data.accel[imu]['T'])
tmax = np.amax(data.accel[imu]['T'])
c.set_tmin(imu, tmin)
c.set_tmax(imu, tmax)
for axis in AXES:
if args.online:
fit = OnlineIMUfit()
trel = data.accel[imu]['T'] - TEMP_REF
ofs = data.accel_at_temp(imu, axis, clog.atcal[imu])
c.set_accel_poly(imu, axis, fit.polyfit(trel, data.accel[imu][axis] - ofs, POLY_ORDER))
trel = data.gyro[imu]['T'] - TEMP_REF
c.set_gyro_poly(imu, axis, fit.polyfit(trel, data.gyro[imu][axis], POLY_ORDER))
else:
trel = data.accel[imu]['T'] - TEMP_REF
if imu in clog.atcal:
ofs = data.accel_at_temp(imu, axis, clog.atcal[imu])
else:
ofs = np.mean(data.accel[imu][axis])
c.set_accel_poly(imu, axis, np.polyfit(trel, data.accel[imu][axis] - ofs, POLY_ORDER))
trel = data.gyro[imu]['T'] - TEMP_REF
c.set_gyro_poly(imu, axis, np.polyfit(trel, data.gyro[imu][axis], POLY_ORDER))
params = c.param_string(imu)
print(params)
calfile.write(params)
calfile.close()
print("Calibration written to %s" % args.outfile)
if args.no_graph:
return
fig, axs = pyplot.subplots(len(data.IMUs()), 1, sharex=True)
num_imus = len(data.IMUs())
if num_imus == 1:
axs = [axs]
for imu in data.IMUs():
scale = math.degrees(1)
for axis in AXES:
axs[imu].plot(data.gyro[imu]['time'], data.gyro[imu][axis]*scale, label='Uncorrected %s' % axis)
for axis in AXES:
poly = np.poly1d(c.gcoef[imu][axis])
trel = data.gyro[imu]['T'] - TEMP_REF
correction = poly(trel)
axs[imu].plot(data.gyro[imu]['time'], (data.gyro[imu][axis] - correction)*scale, label='Corrected %s' % axis)
if args.log_parm:
for axis in AXES:
if clog.enable[imu] == 0.0:
print("IMU[%u] disabled in log parms" % imu)
continue
poly = np.poly1d(clog.gcoef[imu][axis])
correction = poly(data.gyro[imu]['T'] - TEMP_REF) - poly(clog.gtcal[imu] - TEMP_REF) + clog.gofs[imu][axis]
axs[imu].plot(data.gyro[imu]['time'], (data.gyro[imu][axis] - correction)*scale, label='Corrected %s (log parm)' % axis)
ax2 = axs[imu].twinx()
ax2.plot(data.gyro[imu]['time'], data.gyro[imu]['T'], label='Temperature(C)', color='black')
ax2.legend(loc='upper right')
axs[imu].legend(loc='upper left')
axs[imu].set_title('IMU[%u] Gyro (deg/s)' % imu)
fig, axs = pyplot.subplots(num_imus, 1, sharex=True)
if num_imus == 1:
axs = [axs]
for imu in data.IMUs():
for axis in AXES:
ofs = data.accel_at_temp(imu, axis, clog.atcal.get(imu, TEMP_REF))
axs[imu].plot(data.accel[imu]['time'], data.accel[imu][axis] - ofs, label='Uncorrected %s' % axis)
for axis in AXES:
poly = np.poly1d(c.acoef[imu][axis])
trel = data.accel[imu]['T'] - TEMP_REF
correction = poly(trel)
ofs = data.accel_at_temp(imu, axis, clog.atcal.get(imu, TEMP_REF))
axs[imu].plot(data.accel[imu]['time'], (data.accel[imu][axis] - ofs) - correction, label='Corrected %s' % axis)
if args.log_parm:
for axis in AXES:
if clog.enable[imu] == 0.0:
print("IMU[%u] disabled in log parms" % imu)
continue
poly = np.poly1d(clog.acoef[imu][axis])
ofs = data.accel_at_temp(imu, axis, clog.atcal[imu])
correction = poly(data.accel[imu]['T'] - TEMP_REF) - poly(clog.atcal[imu] - TEMP_REF)
axs[imu].plot(data.accel[imu]['time'], (data.accel[imu][axis] - ofs) - correction, label='Corrected %s (log parm)' % axis)
ax2 = axs[imu].twinx()
ax2.plot(data.accel[imu]['time'], data.accel[imu]['T'], label='Temperature(C)', color='black')
ax2.legend(loc='upper right')
axs[imu].legend(loc='upper left')
axs[imu].set_title('IMU[%u] Accel (m/s^2)' % imu)
pyplot.show()
IMUfit(args.log)
| gpl-3.0 |
yonglehou/scikit-learn | examples/gaussian_process/plot_gp_probabilistic_classification_after_regression.py | 252 | 3490 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
==============================================================================
Gaussian Processes classification example: exploiting the probabilistic output
==============================================================================
A two-dimensional regression exercise with a post-processing allowing for
probabilistic classification thanks to the Gaussian property of the prediction.
The figure illustrates the probability that the prediction is negative with
respect to the remaining uncertainty in the prediction. The red and blue lines
corresponds to the 95% confidence interval on the prediction of the zero level
set.
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
import numpy as np
from scipy import stats
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
from matplotlib import cm
# Standard normal distribution functions
phi = stats.distributions.norm().pdf
PHI = stats.distributions.norm().cdf
PHIinv = stats.distributions.norm().ppf
# A few constants
lim = 8
def g(x):
"""The function to predict (classification will then consist in predicting
whether g(x) <= 0 or not)"""
return 5. - x[:, 1] - .5 * x[:, 0] ** 2.
# Design of experiments
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
# Observations
y = g(X)
# Instanciate and fit Gaussian Process Model
gp = GaussianProcess(theta0=5e-1)
# Don't perform MLE or you'll get a perfect prediction for this simple example!
gp.fit(X, y)
# Evaluate real function, the prediction and its MSE on a grid
res = 50
x1, x2 = np.meshgrid(np.linspace(- lim, lim, res),
np.linspace(- lim, lim, res))
xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T
y_true = g(xx)
y_pred, MSE = gp.predict(xx, eval_MSE=True)
sigma = np.sqrt(MSE)
y_true = y_true.reshape((res, res))
y_pred = y_pred.reshape((res, res))
sigma = sigma.reshape((res, res))
k = PHIinv(.975)
# Plot the probabilistic classification iso-values using the Gaussian property
# of the prediction
fig = pl.figure(1)
ax = fig.add_subplot(111)
ax.axes.set_aspect('equal')
pl.xticks([])
pl.yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
pl.xlabel('$x_1$')
pl.ylabel('$x_2$')
cax = pl.imshow(np.flipud(PHI(- y_pred / sigma)), cmap=cm.gray_r, alpha=0.8,
extent=(- lim, lim, - lim, lim))
norm = pl.matplotlib.colors.Normalize(vmin=0., vmax=0.9)
cb = pl.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm)
cb.set_label('${\\rm \mathbb{P}}\left[\widehat{G}(\mathbf{x}) \leq 0\\right]$')
pl.plot(X[y <= 0, 0], X[y <= 0, 1], 'r.', markersize=12)
pl.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12)
cs = pl.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot')
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.025], colors='b',
linestyles='solid')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.5], colors='k',
linestyles='dashed')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.975], colors='r',
linestyles='solid')
pl.clabel(cs, fontsize=11)
pl.show()
| bsd-3-clause |
ocefpaf/python-oceans | oceans/sw_extras/sw_extras.py | 2 | 29692 | from copy import copy
import numpy as np
import seawater as sw
from seawater.constants import OMEGA, earth_radius
def sigma_t(s, t, p):
"""
:math:`\\sigma_{t}` is the remainder of subtracting 1000 kg m :sup:`-3`
from the density of a sea water sample at atmospheric pressure.
Parameters
----------
s(p) : array_like
salinity [psu (PSS-78)]
t(p) : array_like
temperature [:math:`^\\circ` C (ITS-90)]
p : array_like
pressure [db]
Returns
-------
sgmt : array_like
density [kg m :sup:`3`]
Notes
-----
Density of Sea Water using UNESCO 1983 (EOS 80) polynomial.
Examples
--------
>>> # Data from UNESCO Tech. Paper in Marine Sci. No. 44, p22.
>>> from seawater.library import T90conv
>>> import oceans.sw_extras.sw_extras as swe
>>> s = [0, 0, 0, 0, 35, 35, 35, 35]
>>> t = T90conv([0, 0, 30, 30, 0, 0, 30, 30])
>>> p = [0, 10000, 0, 10000, 0, 10000, 0, 10000]
>>> swe.sigma_t(s, t, p)
array([-0.157406 , 45.33710972, -4.34886626, 36.03148891, 28.10633141,
70.95838408, 21.72863949, 60.55058771])
References
----------
Fofonoff, P. and Millard, R.C. Jr UNESCO 1983. Algorithms for
computation of fundamental properties of seawater. UNESCO Tech. Pap. in
Mar. Sci., No. 44, 53 pp. Eqn.(31) p.39.
http://www.scor-int.org/Publications.htm
Millero, F.J., Chen, C.T., Bradshaw, A., and Schleicher, K. A new
high pressure equation of state for seawater. Deap-Sea Research., 1980,
Vol27A, pp255-264. doi:10.1016/0198-0149(80)90016-3
"""
s, t, p = list(map(np.asanyarray, (s, t, p)))
return sw.dens(s, t, p) - 1000.0
def sigmatheta(s, t, p, pr=0):
"""
:math:`\\sigma_{\\theta}` is a measure of the density of ocean water
where the quantity :math:`\\sigma_{t}` is calculated using the potential
temperature (:math:`\\theta`) rather than the in situ temperature and
potential density of water mass relative to the specified reference
pressure.
Parameters
----------
s(p) : array_like
salinity [psu (PSS-78)]
t(p) : array_like
temperature [:math:`^\\circ` C (ITS-90)]
p : array_like
pressure [db]
pr : number
reference pressure [db], default = 0
Returns
-------
sgmte : array_like
density [kg m :sup:`3`]
Examples
--------
>>> # Data from UNESCO Tech. Paper in Marine Sci. No. 44, p22.
>>> from seawater.library import T90conv
>>> import oceans.sw_extras.sw_extras as swe
>>> s = [0, 0, 0, 0, 35, 35, 35, 35]
>>> t = T90conv([0, 0, 30, 30, 0, 0, 30, 30])
>>> p = [0, 10000, 0, 10000, 0, 10000, 0, 10000]
>>> swe.sigmatheta(s, t, p)
array([-0.157406 , -0.20476006, -4.34886626, -3.63884068, 28.10633141,
28.15738545, 21.72863949, 22.59634627])
References
----------
Fofonoff, P. and Millard, R.C. Jr UNESCO 1983. Algorithms for
computation of fundamental properties of seawater. UNESCO Tech. Pap. in
Mar. Sci., No. 44, 53 pp. Eqn.(31) p.39.
http://www.scor-int.org/Publications.htm
Millero, F.J., Chen, C.T., Bradshaw, A., and Schleicher, K. A new
high pressure equation of state for seawater. Deap-Sea Research., 1980,
Vol27A, pp255-264. doi:10.1016/0198-0149(80)90016-3
"""
s, t, p, pr = list(map(np.asanyarray, (s, t, p, pr)))
return sw.pden(s, t, p, pr) - 1000.0
def N(bvfr2):
"""
Buoyancy frequency is the frequency with which a parcel or particle of
fluid displaced a small vertical distance from its equilibrium position in
a stable environment will oscillate. It will oscillate in simple harmonic
motion with an angular frequency defined by
.. math:: N = \\left(\\frac{-g}{\\sigma_{\\theta}}
\\frac{d\\sigma_{\\theta}}{dz}\\right)^{2}
Parameters
----------
n2 : array_like
Brünt-Väisälä Frequency squared [s :sup:`-2`]
Returns
-------
n : array_like
Brünt-Väisälä Frequency not-squared [s :sup:`-1`]
Examples
--------
>>> import numpy as np
>>> import oceans.sw_extras.sw_extras as swe
>>> s = np.array([[0, 0, 0], [15, 15, 15], [30, 30, 30],[35,35,35]])
>>> t = np.repeat(15, s.size).reshape(s.shape)
>>> p = [[0], [250], [500], [1000]]
>>> lat = [30,32,35]
>>> swe.N(sw.bfrq(s, t, p, lat)[0])
array([[0.02124956, 0.02125302, 0.02125843],
[0.02110919, 0.02111263, 0.02111801],
[0.00860812, 0.00860952, 0.00861171]])
References
----------
A.E. Gill 1982. p.54 eqn 3.7.15 "Atmosphere-Ocean Dynamics"
Academic Press: New York. ISBN: 0-12-283522-0
Jackett, David R., Trevor J. Mcdougall, 1995: Minimal Adjustment of
Hydrographic Profiles to Achieve Static Stability. J. Atmos. Oceanic
Technol., 12, 381-389. doi: 10.1175/1520-0426(1995)012<0381:MAOHPT>2.0.CO;2
"""
bvfr2 = np.asanyarray(bvfr2)
return np.sqrt(np.abs(bvfr2)) * np.sign(bvfr2)
def cph(bvfr2):
"""
Buoyancy frequency in Cycles Per Hour.
Parameters
----------
n2 : array_like
Brünt-Väisälä Frequency squared [s :sup:`-2`]
Returns
-------
cph : array_like
Brünt-Väisälä Frequency [ cylcles hour :sup:`-1`]
Examples
--------
>>> import numpy as np
>>> import oceans.sw_extras.sw_extras as swe
>>> s = np.array([[0, 0, 0], [15, 15, 15], [30, 30, 30],[35,35,35]])
>>> t = np.repeat(15, s.size).reshape(s.shape)
>>> p = [[0], [250], [500], [1000]]
>>> lat = [30,32,35]
>>> swe.cph(sw.bfrq(s, t, p, lat)[0])
array([[12.17509899, 12.17708145, 12.18018192],
[12.09467754, 12.09664676, 12.09972655],
[ 4.93208775, 4.9328907 , 4.93414649]])
References
----------
A.E. Gill 1982. p.54 eqn 3.7.15 "Atmosphere-Ocean Dynamics"
Academic Press: New York. ISBN: 0-12-283522-0
"""
bvfr2 = np.asanyarray(bvfr2)
# Root squared preserving the sign.
bvfr = np.sqrt(np.abs(bvfr2)) * np.sign(bvfr2)
return bvfr * 60.0 * 60.0 / (2.0 * np.pi)
def shear(z, u, v=0):
r"""
Calculates the vertical shear for u, v velocity section.
.. math::
\\textrm{shear} = \\frac{\\partial (u^2 + v^2)^{0.5}}{\partial z}
Parameters
----------
z : array_like
depth [m]
u(z) : array_like
Eastward velocity [m s :sup:`-1`]
v(z) : array_like
Northward velocity [m s :sup:`-1`]
Returns
-------
shr : array_like
frequency [s :sup:`-1`]
z_ave : array_like
depth between z grid (M-1xN) [m]
Examples
--------
>>> import oceans.sw_extras.sw_extras as swe
>>> z = [[0], [250], [500], [1000]]
>>> u = [[0.5, 0.5, 0.5], [0.15, 0.15, 0.15],
... [0.03, 0.03, .03], [0.,0.,0.]]
>>> swe.shear(z, u)[0]
array([[-1.4e-03, -1.4e-03, -1.4e-03],
[-4.8e-04, -4.8e-04, -4.8e-04],
[-6.0e-05, -6.0e-05, -6.0e-05]])
"""
z, u, v = list(map(np.asanyarray, (z, u, v)))
z, u, v = np.broadcast_arrays(z, u, v)
m, n = z.shape
iup = np.arange(0, m - 1)
ilo = np.arange(1, m)
z_ave = (z[iup, :] + z[ilo, :]) / 2.0
vel = np.sqrt(u ** 2 + v ** 2)
diff_vel = np.diff(vel, axis=0)
diff_z = np.diff(z, axis=0)
shr = diff_vel / diff_z
return shr, z_ave
def richnumb(bvfr2, S2):
r"""
Calculates the ratio of buoyancy to inertial forces which measures the
stability of a fluid layer. this functions computes the gradient Richardson
number in the form of:
.. math::
Ri = \\frac{N^2}{S^2}
Representing a dimensionless number that expresses the ratio of the energy
extracted by buoyancy forces to the energy gained from the shear of the
large-scale velocity field.
Parameters
----------
bvfr2 : array_like
Brünt-Väisälä Frequency squared (M-1xN) [rad\ :sup:`-2` s\ :sup:`-2`]
S2 : array_like
shear squared [s :sup:`-2`]
Returns
-------
ri : array_like
non-dimensional
Examples
--------
TODO: check the example and add real values
>>> import numpy as np
>>> import seawater as sw
>>> import oceans.sw_extras.sw_extras as swe
>>> s = np.array([[0, 0, 0], [15, 15, 15], [30, 30, 30],[ 35, 35, 35]])
>>> t = np.repeat(15, s.size).reshape(s.shape)
>>> p = [[0], [250], [500], [1000]]
>>> lat = [30, 32, 35]
>>> bvfr2 = sw.bfrq(s, t, p, lat)[0]
>>> vel = [[0.5, 0.5, 0.5], [0.15, 0.15, 0.15],
... [0.03, 0.03, .03], [0.,0.,0.]]
>>> S2 = swe.shear(p, vel)[0] ** 2
>>> swe.richnumb(bvfr2, S2)
array([[ 230.37941215, 230.45444299, 230.57181258],
[ 1934.01949759, 1934.64933431, 1935.63457818],
[20583.24410868, 20589.94661835, 20600.43125069]])
"""
bvfr2, S2 = list(map(np.asanyarray, (bvfr2, S2)))
# FIXME: check this for correctness.
return bvfr2 / S2
def cor_beta(lat):
"""
Calculates the Coriolis :math:`\\beta` factor defined by:
.. math::
beta = 2 \\Omega \\cos(lat)
where:
.. math::
\\Omega = \\frac{2 \\pi}{\\textrm{sidereal day}} = 7.292e^{-5}
\\textrm{ radians sec}^{-1}
Parameters
----------
lat : array_like
latitude in decimal degrees north [-90..+90].
Returns
-------
beta : array_like
Beta Coriolis [s :sup:`-1`]
Examples
--------
>>> import oceans.sw_extras.sw_extras as swe
>>> swe.cor_beta(0)
2.2891225867210798e-11
References
----------
S. Pond & G.Pickard 2nd Edition 1986 Introductory Dynamical
Oceanography Pergamon Press Sydney. ISBN 0-08-028728-X
A.E. Gill 1982. p.54 eqn 3.7.15 "Atmosphere-Ocean Dynamics"
Academic Press: New York. ISBN: 0-12-283522-0
"""
lat = np.asanyarray(lat)
return 2 * OMEGA * np.cos(lat) / earth_radius
def inertial_period(lat):
"""
Calculate the inertial period as:
.. math::
Ti = \\frac{2\\pi}{f} = \\frac{T_{sd}}{2\\sin\\phi}
Parameters
----------
lat : array_like
latitude in decimal degrees north [-90..+90]
Returns
-------
Ti : array_like
period in seconds
Examples
--------
>>> import oceans.sw_extras.sw_extras as swe
>>> lat = 30.
>>> swe.inertial_period(lat)/3600
23.93484986278565
"""
lat = np.asanyarray(lat)
return 2 * np.pi / sw.f(lat)
def strat_period(N):
"""
Stratification period is the inverse of the Buoyancy frequency and it
is defined by:
.. math:: Tn = \\frac{2\\pi}{N}
Parameters
----------
N : array_like
Brünt-Väisälä Frequency [s :sup:`-1`]
Returns
-------
Tn : array_like
Brünt-Väisälä Period [s]
Examples
--------
>>> import numpy as np
>>> import seawater as sw
>>> import oceans.sw_extras.sw_extras as swe
>>> s = np.array([[0, 0, 0], [15, 15, 15], [30, 30, 30],[35,35,35]])
>>> t = np.repeat(15, s.size).reshape(s.shape)
>>> p = [[0], [250], [500], [1000]]
>>> lat = [30,32,35]
>>> swe.strat_period(swe.N( sw.bfrq(s, t, p, lat)[0]))
array([[295.68548089, 295.63734267, 295.56208791],
[297.6515901 , 297.60313502, 297.52738493],
[729.91402019, 729.79520847, 729.60946944]])
"""
N = np.asanyarray(N)
return 2 * np.pi / N
def visc(s, t, p):
"""
Calculates kinematic viscosity of sea-water. Based on Dan Kelley's fit
to Knauss's TABLE II-8.
Parameters
----------
s : array_like
salinity [psu (PSS-78)]
t : array_like
temperature [℃ (ITS-90)] # FIXME: [degree C (IPTS-68)]
p : array_like
pressure [db]
Returns
-------
visc : kinematic viscosity of sea-water [m^2/s]
Notes
-----
From matlab airsea
Examples
--------
>>> import oceans.sw_extras.sw_extras as swe
>>> swe.visc(40., 40., 1000.)
8.200192496633804e-07
Modifications: Original 1998/01/19 - Ayal Anis 1998
"""
s, t, p = np.broadcast_arrays(s, t, p)
visc = 1e-4 * (17.91 - 0.5381 * t + 0.00694 * t ** 2 + 0.02305 * s)
visc /= sw.dens(s, t, p)
return visc
def tcond(s, t, p):
"""
Calculates thermal conductivity of sea-water.
Parameters
----------
s(p) : array_like
salinity [psu (PSS-78)]
t(p) : array_like
temperature [:math:`^\\circ` C (ITS-90)]
p : array_like
pressure [db]
Returns
-------
therm : array_like
thermal conductivity [W m :sup: `-1` K :sup: `-1`]
Notes
-----
From matlab airsea
Examples
--------
>>> import oceans.sw_extras.sw_extras as swe
>>> swe.tcond(35, 20, 0)
0.5972445569999999
References
----------
Caldwell's DSR 21:131-137 (1974) eq. 9
Catelli et al.'s DSR 21:311-3179(1974) eq. 5
Modifications: Original 1998/01/19 - Ayal Anis 1998
"""
s, t, p = list(map(np.asanyarray, (s, t, p)))
if False: # Castelli's option.
therm = 100.0 * (
5.5286e-3 + 3.4025e-8 * p + 1.8364e-5 * t - 3.3058e-9 * t ** 3
) # [W/m/K]
# 1) Caldwell's option # 2 - simplified formula, accurate to 0.5% (eqn. 9)
# in [cal/cm/C/sec]
therm = 0.001365 * (
1.0 + 0.003 * t - 1.025e-5 * t ** 2 + 0.0653 * (1e-4 * p) - 0.00029 * s
)
return therm * 418.4 # [cal/cm/C/sec] ->[ W/m/K]
def spice(s, t, p):
r"""
Compute sea spiciness as defined by Flament (2002).
.. math:: \pi(\theta,s) = \sum^5_{i=0} \sum^4_{j=0} b_{ij}\theta^i(s-35)^i
Parameters
----------
s(p) : array_like
salinity [psu (PSS-78)]
t(p) : array_like
temperature [:math:`^\\circ` C (ITS-90)]
p : array_like
pressure [db]
Returns
-------
sp : array_like
:math:`\pi` [kg m :sup:`3`]
See Also
--------
pressure is not used... should the input be theta instead of t?
Go read the paper!
Notes
-----
Spiciness, just like potential density, is only useful over limited
vertical excursions near the pressure to which they are referenced; for
large vertical ranges, the slope of the isopycnals and spiciness isopleths
vary significantly with pressure, and generalization of the polynomial
expansion to include a reference pressure dependence is needed.
Examples
--------
>>> import oceans.sw_extras.sw_extras as swe
>>> swe.spice(33, 15, 0)
array(0.54458641)
References
----------
A state variable for characterizing water masses and their
diffusive stability: spiciness. Prog. in Oceanography Volume 54, 2002,
Pages 493-501.
http://www.satlab.hawaii.edu/spice/spice.m
"""
s, t, p = list(map(np.asanyarray, (s, t, p)))
# FIXME: I'm not sure about this next step.
pt = sw.ptmp(s, t, p)
B = np.zeros((6, 5))
B[0, 0] = 0.0
B[0, 1] = 7.7442e-001
B[0, 2] = -5.85e-003
B[0, 3] = -9.84e-004
B[0, 4] = -2.06e-004
B[1, 0] = 5.1655e-002
B[1, 1] = 2.034e-003
B[1, 2] = -2.742e-004
B[1, 3] = -8.5e-006
B[1, 4] = 1.36e-005
B[2, 0] = 6.64783e-003
B[2, 1] = -2.4681e-004
B[2, 2] = -1.428e-005
B[2, 3] = 3.337e-005
B[2, 4] = 7.894e-006
B[3, 0] = -5.4023e-005
B[3, 1] = 7.326e-006
B[3, 2] = 7.0036e-006
B[3, 3] = -3.0412e-006
B[3, 4] = -1.0853e-006
B[4, 0] = 3.949e-007
B[4, 1] = -3.029e-008
B[4, 2] = -3.8209e-007
B[4, 3] = 1.0012e-007
B[4, 4] = 4.7133e-008
B[5, 0] = -6.36e-010
B[5, 1] = -1.309e-009
B[5, 2] = 6.048e-009
B[5, 3] = -1.1409e-009
B[5, 4] = -6.676e-010
sp = np.zeros_like(pt)
T = np.ones_like(pt)
s = s - 35
r, c = B.shape
for i in range(r):
S = np.ones_like(pt)
for j in range(c):
sp += B[i, j] * T * S
S *= s
T *= pt
return sp
def psu2ppt(psu):
"""
Converts salinity from PSU units to PPT
http://stommel.tamu.edu/~baum/paleo/ocean/node31.html#PracticalSalinityScale
"""
a = [0.008, -0.1692, 25.3851, 14.0941, -7.0261, 2.7081]
return (
a[1]
+ a[2] * psu ** 0.5
+ a[3] * psu
+ a[4] * psu ** 1.5
+ a[5] * psu ** 2
+ a[6] * psu ** 2.5
)
def soundspeed(S, T, D, equation="mackenzie"):
"""
Various sound-speed equations.
1) soundspeed(s, t, d) returns the sound speed (m/sec) given vectors
of salinity (ppt), temperature (deg C) and DEPTH (m) using
the formula of Mackenzie: Mackenzie, K.V. "Nine-term Equation for
Sound Speed in the Oceans", J. Acoust. Soc. Am. 70 (1981), 807-812.
2) soundspeed(s, t, p, 'del_grosso') returns the sound speed (m/sec)given
vectors of salinity (ppt), temperature (deg C), and PRESSURE (dbar)
using the Del Grosso equation: Del Grosso, "A New Equation for the
speed of sound in Natural Waters", J. Acoust. Soc. Am. 56#4 (1974).
3) soundspeed(s, t, p, 'chen') returns the sound speed (m/sec) given
vectors of salinity (ppt), temperature (deg C), and PRESSURE (dbar)
using the Chen and Millero equation: Chen and Millero, "The Sound
Speed in Seawater", J. Acoust. Soc. Am. 62 (1977), 1129-1135.
4) soundspeed(s, t, p, 'state') returns the sound speed (m/sec) given
vectors of salinity (ppt), temperature (deg C), and PRESSURE (dbar) by
using derivatives of the EOS80 equation of state for seawater and the
adiabatic lapse rate.
Notes: RP (WHOI) 3/dec/91
Added state equation ss
"""
if equation == "mackenzie":
c = 1.44896e3
t = 4.591e0
t2 = -5.304e-2
t3 = 2.374e-4
s = 1.340e0
d = 1.630e-2
d2 = 1.675e-7
ts = -1.025e-2
td3 = -7.139e-13
ssp = (
c
+ t * T
+ t2 * T * T
+ t3 * T * T * T
+ s * (S - 35.0)
+ d * D
+ d2 * D * D
+ ts * T * (S - 35.0)
+ td3 * T * D * D * D
)
elif equation == "del_grosso":
# Del grosso uses pressure in kg/cm^2. To get to this from dbars
# we must divide by "g". From the UNESCO algorithms (referring to
# ANON (1970) BULLETIN GEODESIQUE) we have this formula for g as a
# function of latitude and pressure. We set latitude to 45 degrees
# for convenience!
XX = np.sin(45 * np.pi / 180)
GR = 9.780318 * (1.0 + (5.2788e-3 + 2.36e-5 * XX) * XX) + 1.092e-6 * D
P = D / GR
# This is from VSOUND.f.
C000 = 1402.392
DCT = (0.501109398873e1 - (0.550946843172e-1 - 0.221535969240e-3 * T) * T) * T
DCS = (0.132952290781e1 + 0.128955756844e-3 * S) * S
DCP = (0.156059257041e0 + (0.244998688441e-4 - 0.883392332513e-8 * P) * P) * P
DCSTP = (
(
-0.127562783426e-1 * T * S
+ 0.635191613389e-2 * T * P
+ 0.265484716608e-7 * T * T * P * P
- 0.159349479045e-5 * T * P * P
+ 0.522116437235e-9 * T * P * P * P
- 0.438031096213e-6 * T * T * T * P
)
- 0.161674495909e-8 * S * S * P * P
+ 0.968403156410e-4 * T * T * S
+ 0.485639620015e-5 * T * S * S * P
- 0.340597039004e-3 * T * S * P
)
ssp = C000 + DCT + DCS + DCP + DCSTP
elif equation == "chen":
P0 = D
# This is copied directly from the UNESCO algorithms.
# CHECKVALUE: SVEL=1731.995 M/S, S=40 (IPSS-78),T=40 DEG C,P=10000 DBAR
# SCALE PRESSURE TO BARS
P = P0 / 10.0
SR = np.sqrt(np.abs(S))
# S**2 TERM.
D = 1.727e-3 - 7.9836e-6 * P
# S**3/2 TERM.
B1 = 7.3637e-5 + 1.7945e-7 * T
B0 = -1.922e-2 - 4.42e-5 * T
B = B0 + B1 * P
# S**1 TERM.
A3 = (-3.389e-13 * T + 6.649e-12) * T + 1.100e-10
A2 = ((7.988e-12 * T - 1.6002e-10) * T + 9.1041e-9) * T - 3.9064e-7
A1 = (
((-2.0122e-10 * T + 1.0507e-8) * T - 6.4885e-8) * T - 1.2580e-5
) * T + 9.4742e-5
A0 = (((-3.21e-8 * T + 2.006e-6) * T + 7.164e-5) * T - 1.262e-2) * T + 1.389
A = ((A3 * P + A2) * P + A1) * P + A0
# S**0 TERM.
C3 = (-2.3643e-12 * T + 3.8504e-10) * T - 9.7729e-9
C2 = (
((1.0405e-12 * T - 2.5335e-10) * T + 2.5974e-8) * T - 1.7107e-6
) * T + 3.1260e-5
C1 = (
((-6.1185e-10 * T + 1.3621e-7) * T - 8.1788e-6) * T + 6.8982e-4
) * T + 0.153563
C0 = (
(((3.1464e-9 * T - 1.47800e-6) * T + 3.3420e-4) * T - 5.80852e-2) * T
+ 5.03711
) * T + 1402.388
C = ((C3 * P + C2) * P + C1) * P + C0
# SOUND SPEED RETURN.
ssp = C + (A + B * SR + D * S) * S
else:
raise TypeError(f"Unrecognizable equation specified: {equation}")
return ssp
def photic_depth(z, par):
"""
Computes photic depth, based on 1% of surface PAR (Photosynthetically
Available Radiation).
Parameters
----------
z : array_like
depth in meters.
par : array_like
float values of PAR
Returns
-------
photic_depth : array_like
Array of depth in which light is available.
photic_ix : array_like
Index of available `par` data from surface to critical depth
"""
photic_ix = np.where(par >= par[0] / 100.0)[0]
photic_depth = z[photic_ix]
return photic_depth, photic_ix
def cr_depth(z, par):
"""
Computes Critical depth. Depth where 1% of surface PAR (Photosynthetically
Available Radiation).
Parameters
----------
z : array_like
depth in meters.
par : array_like
float values of PAR
Returns
-------
crdepth : int
Critical depth. Depth where 1% of surface PAR is available.
"""
ix = photic_depth(z, par)[1]
crdepth = z[ix][-1]
return crdepth
def kdpar(z, par, boundary):
"""
Compute Kd value, since light extinction coefficient can be computed
from depth and Photossintetically Available Radiation (PAR).
It will compute a linear regression through out following depths from
boundary and them will be regressed to the upper depth to boundary
limits.
Parameters
----------
z : array_like
depth in meters of respective PAR values
par : array_like
PAR values
boundary : np.float
First good upper limit of downcast, when PAR data has stabilized
Return
------
kd : float
Light extinction coefficient.
par_surface : float
Surface PAR, modeled from first meters data.
References
----------
Smith RC, Baker KS (1978) Optical classification of natural waters.
Limnol Ocenogr 23:260-267.
"""
# First linear regression. Returns fit parameters to be used on
# reconstruction of surface PAR.
b = np.int32(boundary)
i_b = np.where(z <= b)[0]
par_b = par[i_b]
z_b = z[i_b]
z_light = photic_depth(z_b, par_b)[1]
par_z = par_b[z_light]
z_z = z_b[z_light]
xp = np.polyfit(z_z, np.log(par_z), 1)
# Linear regression based on surface PAR, obtained from linear fitting.
# z = 0
# PAR_surface = a(z) + b
par_surface = np.exp(xp[1])
par = np.r_[par_surface, par]
z = np.r_[0, z]
z_par = photic_depth(z, par)[1]
kd = (np.log(par[0]) - np.log(par[b])) / z_par[b]
return kd, par_surface
def zmld_so(s, t, p, threshold=0.05, smooth=None):
"""
Computes mixed layer depth of Southern Ocean waters.
Parameters
----------
s : array_like
salinity [psu (PSS-78)]
t : array_like
temperature [℃ (ITS-90)]
p : array_like
pressure [db].
smooth : int
size of running mean window, to smooth data.
References
----------
Mitchell B. G., Holm-Hansen, O., 1991. Observations of modeling of the
Antartic phytoplankton crop in relation to mixing depth. Deep Sea
Research, 38(89):981-1007. doi:10.1016/0198-0149(91)90093-U
"""
from pandas import rolling
sigma_t = sigmatheta(s, t, p)
depth = copy(p)
if smooth is not None:
sigma_t = rolling(sigma_t, smooth, min_periods=1).mean()
sublayer = np.where(depth[(depth >= 5) & (depth <= 10)])[0]
sigma_x = np.nanmean(sigma_t[sublayer])
nan_sigma = np.where(sigma_t < sigma_x + threshold)[0]
sigma_t[nan_sigma] = np.nan
der = np.divide(np.diff(sigma_t), np.diff(depth))
mld = np.where(der == np.nanmax(der))[0]
zmld = depth[mld]
return zmld
def zmld_boyer(s, t, p):
"""
Computes mixed layer depth, based on de Boyer Montégut et al., 2004.
Parameters
----------
s : array_like
salinity [psu (PSS-78)]
t : array_like
temperature [℃ (ITS-90)]
p : array_like
pressure [db].
Notes
-----
Based on density with fixed threshold criteria
de Boyer Montégut et al., 2004. Mixed layer depth over the global ocean:
An examination of profile data and a profile-based climatology.
doi:10.1029/2004JC002378
dataset for test and more explanation can be found at:
http://www.ifremer.fr/cerweb/deboyer/mld/Surface_Mixed_Layer_Depth.php
Codes based on : http://mixedlayer.ucsd.edu/
"""
m = len(np.nonzero(~np.isnan(s))[0])
if m <= 1:
mldepthdens_mldindex = 0
mldepthptemp_mldindex = 0
return mldepthdens_mldindex, mldepthptemp_mldindex
else:
# starti = min(find((pres-10).^2==min((pres-10).^2)));
starti = np.min(np.where((p - 10.0) ** 2 == np.min((p - 10.0) ** 2))[0])
starti = 0
pres = p[starti:m]
sal = s[starti:m]
temp = t[starti:m]
pden = sw.dens0(sal, temp) - 1000
mldepthdens_mldindex = m - 1
for i, pp in enumerate(pden):
if np.abs(pden[starti] - pp) > 0.03:
mldepthdens_mldindex = i
break
# Interpolate to exactly match the potential density threshold.
presseg = [pres[mldepthdens_mldindex - 1], pres[mldepthdens_mldindex]]
pdenseg = [
pden[starti] - pden[mldepthdens_mldindex - 1],
pden[starti] - pden[mldepthdens_mldindex],
]
P = np.polyfit(presseg, pdenseg, 1)
presinterp = np.linspace(presseg[0], presseg[1], 3)
pdenthreshold = np.polyval(P, presinterp)
# The potential density threshold MLD value:
ix = np.max(np.where(np.abs(pdenthreshold) < 0.03)[0])
mldepthdens_mldindex = presinterp[ix]
# Search for the first level that exceeds the temperature threshold.
mldepthptmp_mldindex = m - 1
for i, tt in enumerate(temp):
if np.abs(temp[starti] - tt) > 0.2:
mldepthptmp_mldindex = i
break
# Interpolate to exactly match the temperature threshold.
presseg = [pres[mldepthptmp_mldindex - 1], pres[mldepthptmp_mldindex]]
tempseg = [
temp[starti] - temp[mldepthptmp_mldindex - 1],
temp[starti] - temp[mldepthptmp_mldindex],
]
P = np.polyfit(presseg, tempseg, 1)
presinterp = np.linspace(presseg[0], presseg[1], 3)
tempthreshold = np.polyval(P, presinterp)
# The temperature threshold MLD value:
ix = np.max(np.where(np.abs(tempthreshold) < 0.2)[0])
mldepthptemp_mldindex = presinterp[ix]
return mldepthdens_mldindex, mldepthptemp_mldindex
def o2sol_SP_pt_benson_krause_84(SP, pt):
"""
Calculates the oxygen, O2, concentration expected at equilibrium with air
at an Absolute Pressure of 101325 Pa (sea pressure of 0 dbar) including
saturated water vapor.
This function uses the solubility coefficients derived from the data of
Benson and Krause 1984, as fitted by Garcia and Gordon 1992.
Better in the range:
tF >= t >= 40 degC
0 >= t >= 42 %o.
Parameters
----------
SP : array_like
Practical Salinity
pt : array_like
Potential temperature [℃ (ITS-90)]
Examples
--------
>>> SP = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]
>>> pt = [28.8099, 28.4392, 22.7862, 10.2262, 6.8272, 4.3236]
>>> o2sol = o2sol_SP_pt_benson_krause_84(SP, pt)
>>> expected = [194.68254317, 195.61350628, 214.65593602, 273.56528327, 295.15807614, 312.95987166]
>>> np.testing.assert_almost_equal(expected, o2sol)
https://aslopubs.onlinelibrary.wiley.com/doi/pdf/10.4319/lo.1992.37.6.1307
"""
SP, pt = list(map(np.asanyarray, (SP, pt)))
S = SP # rename to make eq. identical to the paper and increase readability.
pt68 = pt * 1.00024 # IPTS-68 potential temperature in degC.
Ts = np.log((298.15 - pt68) / (273.15 + pt68))
# The coefficents for Benson and Krause 1984
# from the table 1 of Garcia and Gordon (1992).
A = [5.80871, 3.20291, 4.17887, 5.10006, -9.86643e-2, 3.80369]
B = [-7.01577e-3, -7.70028e-3, -1.13864e-2, -9.51519e-3]
C0 = -2.75915e-7
# Equation 8 from Garcia and Gordon 1992 accoring to Pilson.
lnCo = (
A[0]
+ A[1] * Ts
+ A[2] * Ts ** 2
+ A[3] * Ts ** 3
+ A[4] * Ts ** 4
+ A[5] * Ts ** 5
+ S * (B[0] + B[1] * Ts + B[2] * Ts ** 2 + B[3] * Ts ** 3)
+ C0 * S ** 2
)
return np.exp(lnCo)
| bsd-3-clause |
aewhatley/scikit-learn | sklearn/datasets/mlcomp.py | 289 | 3855 | # Copyright (c) 2010 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| bsd-3-clause |
trachelr/mne-python | mne/inverse_sparse/mxne_optim.py | 13 | 37011 | from __future__ import print_function
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Daniel Strohmeier <daniel.strohmeier@gmail.com>
#
# License: Simplified BSD
from copy import deepcopy
import warnings
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from .mxne_debiasing import compute_bias
from ..utils import logger, verbose, sum_squared
from ..time_frequency.stft import stft_norm2, stft, istft
from ..externals.six.moves import xrange as range
def groups_norm2(A, n_orient):
"""compute squared L2 norms of groups inplace"""
n_positions = A.shape[0] // n_orient
return np.sum(np.power(A, 2, A).reshape(n_positions, -1), axis=1)
def norm_l2inf(A, n_orient, copy=True):
"""L2-inf norm"""
if A.size == 0:
return 0.0
if copy:
A = A.copy()
return sqrt(np.max(groups_norm2(A, n_orient)))
def norm_l21(A, n_orient, copy=True):
"""L21 norm"""
if A.size == 0:
return 0.0
if copy:
A = A.copy()
return np.sum(np.sqrt(groups_norm2(A, n_orient)))
def prox_l21(Y, alpha, n_orient, shape=None, is_stft=False):
"""proximity operator for l21 norm
L2 over columns and L1 over rows => groups contain n_orient rows.
It can eventually take into account the negative frequencies
when a complex value is passed and is_stft=True.
Example
-------
>>> Y = np.tile(np.array([0, 4, 3, 0, 0], dtype=np.float), (2, 1))
>>> Y = np.r_[Y, np.zeros_like(Y)]
>>> print(Y)
[[ 0. 4. 3. 0. 0.]
[ 0. 4. 3. 0. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]]
>>> Yp, active_set = prox_l21(Y, 2, 2)
>>> print(Yp)
[[ 0. 2.86862915 2.15147186 0. 0. ]
[ 0. 2.86862915 2.15147186 0. 0. ]]
>>> print(active_set)
[ True True False False]
"""
if len(Y) == 0:
return np.zeros_like(Y), np.zeros((0,), dtype=np.bool)
if shape is not None:
shape_init = Y.shape
Y = Y.reshape(*shape)
n_positions = Y.shape[0] // n_orient
if is_stft:
rows_norm = np.sqrt(stft_norm2(Y).reshape(n_positions, -1).sum(axis=1))
else:
rows_norm = np.sqrt((Y * Y.conj()).real.reshape(n_positions,
-1).sum(axis=1))
# Ensure shrink is >= 0 while avoiding any division by zero
shrink = np.maximum(1.0 - alpha / np.maximum(rows_norm, alpha), 0.0)
active_set = shrink > 0.0
if n_orient > 1:
active_set = np.tile(active_set[:, None], [1, n_orient]).ravel()
shrink = np.tile(shrink[:, None], [1, n_orient]).ravel()
Y = Y[active_set]
if shape is None:
Y *= shrink[active_set][:, np.newaxis]
else:
Y *= shrink[active_set][:, np.newaxis, np.newaxis]
Y = Y.reshape(-1, *shape_init[1:])
return Y, active_set
def prox_l1(Y, alpha, n_orient):
"""proximity operator for l1 norm with multiple orientation support
L2 over orientation and L1 over position (space + time)
Example
-------
>>> Y = np.tile(np.array([1, 2, 3, 2, 0], dtype=np.float), (2, 1))
>>> Y = np.r_[Y, np.zeros_like(Y)]
>>> print(Y)
[[ 1. 2. 3. 2. 0.]
[ 1. 2. 3. 2. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]]
>>> Yp, active_set = prox_l1(Y, 2, 2)
>>> print(Yp)
[[ 0. 0.58578644 1.58578644 0.58578644 0. ]
[ 0. 0.58578644 1.58578644 0.58578644 0. ]]
>>> print(active_set)
[ True True False False]
"""
n_positions = Y.shape[0] // n_orient
norms = np.sqrt((Y * Y.conj()).real.T.reshape(-1, n_orient).sum(axis=1))
# Ensure shrink is >= 0 while avoiding any division by zero
shrink = np.maximum(1.0 - alpha / np.maximum(norms, alpha), 0.0)
shrink = shrink.reshape(-1, n_positions).T
active_set = np.any(shrink > 0.0, axis=1)
shrink = shrink[active_set]
if n_orient > 1:
active_set = np.tile(active_set[:, None], [1, n_orient]).ravel()
Y = Y[active_set]
if len(Y) > 0:
for o in range(n_orient):
Y[o::n_orient] *= shrink
return Y, active_set
def dgap_l21(M, G, X, active_set, alpha, n_orient):
"""Duality gaps for the mixed norm inverse problem
For details see:
Gramfort A., Kowalski M. and Hamalainen, M,
Mixed-norm estimates for the M/EEG inverse problem using accelerated
gradient methods, Physics in Medicine and Biology, 2012
http://dx.doi.org/10.1088/0031-9155/57/7/1937
Parameters
----------
M : array, shape (n_sensors, n_times)
The data.
G : array, shape (n_sensors, n_active)
The gain matrix a.k.a. lead field.
X : array, shape (n_active, n_times)
Sources
active_set : array of bool
Mask of active sources
alpha : float
Regularization parameter
n_orient : int
Number of dipoles per locations (typically 1 or 3)
Returns
-------
gap : float
Dual gap
pobj : float
Primal cost
dobj : float
Dual cost. gap = pobj - dobj
R : array, shape (n_sensors, n_times)
Current residual of M - G * X
"""
GX = np.dot(G[:, active_set], X)
R = M - GX
penalty = norm_l21(X, n_orient, copy=True)
nR2 = sum_squared(R)
pobj = 0.5 * nR2 + alpha * penalty
dual_norm = norm_l2inf(np.dot(G.T, R), n_orient, copy=False)
scaling = alpha / dual_norm
scaling = min(scaling, 1.0)
dobj = 0.5 * (scaling ** 2) * nR2 + scaling * np.sum(R * GX)
gap = pobj - dobj
return gap, pobj, dobj, R
@verbose
def _mixed_norm_solver_prox(M, G, alpha, lipschitz_constant, maxit=200,
tol=1e-8, verbose=None, init=None, n_orient=1):
"""Solves L21 inverse problem with proximal iterations and FISTA"""
n_sensors, n_times = M.shape
n_sensors, n_sources = G.shape
if n_sources < n_sensors:
gram = np.dot(G.T, G)
GTM = np.dot(G.T, M)
else:
gram = None
if init is None:
X = 0.0
R = M.copy()
if gram is not None:
R = np.dot(G.T, R)
else:
X = init
if gram is None:
R = M - np.dot(G, X)
else:
R = GTM - np.dot(gram, X)
t = 1.0
Y = np.zeros((n_sources, n_times)) # FISTA aux variable
E = [] # track cost function
active_set = np.ones(n_sources, dtype=np.bool) # start with full AS
for i in range(maxit):
X0, active_set_0 = X, active_set # store previous values
if gram is None:
Y += np.dot(G.T, R) / lipschitz_constant # ISTA step
else:
Y += R / lipschitz_constant # ISTA step
X, active_set = prox_l21(Y, alpha / lipschitz_constant, n_orient)
t0 = t
t = 0.5 * (1.0 + sqrt(1.0 + 4.0 * t ** 2))
Y.fill(0.0)
dt = ((t0 - 1.0) / t)
Y[active_set] = (1.0 + dt) * X
Y[active_set_0] -= dt * X0
Y_as = active_set_0 | active_set
if gram is None:
R = M - np.dot(G[:, Y_as], Y[Y_as])
else:
R = GTM - np.dot(gram[:, Y_as], Y[Y_as])
gap, pobj, dobj, _ = dgap_l21(M, G, X, active_set, alpha, n_orient)
E.append(pobj)
logger.debug("pobj : %s -- gap : %s" % (pobj, gap))
if gap < tol:
logger.debug('Convergence reached ! (gap: %s < %s)' % (gap, tol))
break
return X, active_set, E
@verbose
def _mixed_norm_solver_cd(M, G, alpha, lipschitz_constant, maxit=10000,
tol=1e-8, verbose=None, init=None, n_orient=1):
"""Solves L21 inverse problem with coordinate descent"""
from sklearn.linear_model.coordinate_descent import MultiTaskLasso
n_sensors, n_times = M.shape
n_sensors, n_sources = G.shape
if init is not None:
init = init.T
clf = MultiTaskLasso(alpha=alpha / len(M), tol=tol, normalize=False,
fit_intercept=False, max_iter=maxit,
warm_start=True)
clf.coef_ = init
clf.fit(G, M)
X = clf.coef_.T
active_set = np.any(X, axis=1)
X = X[active_set]
gap, pobj, dobj, _ = dgap_l21(M, G, X, active_set, alpha, n_orient)
return X, active_set, pobj
@verbose
def _mixed_norm_solver_bcd(M, G, alpha, lipschitz_constant, maxit=200,
tol=1e-8, verbose=None, init=None, n_orient=1):
"""Solves L21 inverse problem with block coordinate descent"""
# First make G fortran for faster access to blocks of columns
G = np.asfortranarray(G)
n_sensors, n_times = M.shape
n_sensors, n_sources = G.shape
n_positions = n_sources // n_orient
if init is None:
X = np.zeros((n_sources, n_times))
R = M.copy()
else:
X = init
R = M - np.dot(G, X)
E = [] # track cost function
active_set = np.zeros(n_sources, dtype=np.bool) # start with full AS
alpha_lc = alpha / lipschitz_constant
for i in range(maxit):
for j in range(n_positions):
idx = slice(j * n_orient, (j + 1) * n_orient)
G_j = G[:, idx]
X_j = X[idx]
X_j_new = np.dot(G_j.T, R) / lipschitz_constant[j]
was_non_zero = np.any(X_j)
if was_non_zero:
R += np.dot(G_j, X_j)
X_j_new += X_j
block_norm = linalg.norm(X_j_new, 'fro')
if block_norm <= alpha_lc[j]:
X_j.fill(0.)
active_set[idx] = False
else:
shrink = np.maximum(1.0 - alpha_lc[j] / block_norm, 0.0)
X_j_new *= shrink
R -= np.dot(G_j, X_j_new)
X_j[:] = X_j_new
active_set[idx] = True
gap, pobj, dobj, _ = dgap_l21(M, G, X[active_set], active_set, alpha,
n_orient)
E.append(pobj)
logger.debug("Iteration %d :: pobj %f :: dgap %f :: n_active %d" % (
i + 1, pobj, gap, np.sum(active_set) / n_orient))
if gap < tol:
logger.debug('Convergence reached ! (gap: %s < %s)' % (gap, tol))
break
X = X[active_set]
return X, active_set, E
@verbose
def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None,
active_set_size=50, debias=True, n_orient=1,
solver='auto'):
"""Solves L1/L2 mixed-norm inverse problem with active set strategy
Algorithm is detailed in:
Gramfort A., Kowalski M. and Hamalainen, M,
Mixed-norm estimates for the M/EEG inverse problem using accelerated
gradient methods, Physics in Medicine and Biology, 2012
http://dx.doi.org/10.1088/0031-9155/57/7/1937
Parameters
----------
M : array, shape (n_sensors, n_times)
The data.
G : array, shape (n_sensors, n_dipoles)
The gain matrix a.k.a. lead field.
alpha : float
The regularization parameter. It should be between 0 and 100.
A value of 100 will lead to an empty active set (no active source).
maxit : int
The number of iterations.
tol : float
Tolerance on dual gap for convergence checking.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
active_set_size : int
Size of active set increase at each iteration.
debias : bool
Debias source estimates.
n_orient : int
The number of orientation (1 : fixed or 3 : free or loose).
solver : 'prox' | 'cd' | 'bcd' | 'auto'
The algorithm to use for the optimization.
Returns
-------
X : array, shape (n_active, n_times)
The source estimates.
active_set : array
The mask of active sources.
E : list
The value of the objective function over the iterations.
"""
n_dipoles = G.shape[1]
n_positions = n_dipoles // n_orient
n_sensors, n_times = M.shape
alpha_max = norm_l2inf(np.dot(G.T, M), n_orient, copy=False)
logger.info("-- ALPHA MAX : %s" % alpha_max)
alpha = float(alpha)
has_sklearn = True
try:
from sklearn.linear_model.coordinate_descent import MultiTaskLasso # noqa
except ImportError:
has_sklearn = False
if solver == 'auto':
if has_sklearn and (n_orient == 1):
solver = 'cd'
else:
solver = 'bcd'
if solver == 'cd':
if n_orient == 1 and not has_sklearn:
warnings.warn("Scikit-learn >= 0.12 cannot be found. "
"Using block coordinate descent instead of "
"coordinate descent.")
solver = 'bcd'
if n_orient > 1:
warnings.warn("Coordinate descent is only available for fixed "
"orientation. Using block coordinate descent "
"instead of coordinate descent")
solver = 'bcd'
if solver == 'cd':
logger.info("Using coordinate descent")
l21_solver = _mixed_norm_solver_cd
lc = None
elif solver == 'bcd':
logger.info("Using block coordinate descent")
l21_solver = _mixed_norm_solver_bcd
G = np.asfortranarray(G)
if n_orient == 1:
lc = np.sum(G * G, axis=0)
else:
lc = np.empty(n_positions)
for j in range(n_positions):
G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)]
lc[j] = linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2)
else:
logger.info("Using proximal iterations")
l21_solver = _mixed_norm_solver_prox
lc = 1.01 * linalg.norm(G, ord=2) ** 2
if active_set_size is not None:
E = list()
X_init = None
active_set = np.zeros(n_dipoles, dtype=np.bool)
idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, M), n_orient))
new_active_idx = idx_large_corr[-active_set_size:]
if n_orient > 1:
new_active_idx = (n_orient * new_active_idx[:, None] +
np.arange(n_orient)[None, :]).ravel()
active_set[new_active_idx] = True
as_size = np.sum(active_set)
for k in range(maxit):
if solver == 'bcd':
lc_tmp = lc[active_set[::n_orient]]
elif solver == 'cd':
lc_tmp = None
else:
lc_tmp = 1.01 * linalg.norm(G[:, active_set], ord=2) ** 2
X, as_, _ = l21_solver(M, G[:, active_set], alpha, lc_tmp,
maxit=maxit, tol=tol, init=X_init,
n_orient=n_orient)
active_set[active_set] = as_.copy()
idx_old_active_set = np.where(active_set)[0]
gap, pobj, dobj, R = dgap_l21(M, G, X, active_set, alpha,
n_orient)
E.append(pobj)
logger.info("Iteration %d :: pobj %f :: dgap %f ::"
"n_active_start %d :: n_active_end %d" % (
k + 1, pobj, gap, as_size // n_orient,
np.sum(active_set) // n_orient))
if gap < tol:
logger.info('Convergence reached ! (gap: %s < %s)'
% (gap, tol))
break
# add sources if not last iteration
if k < (maxit - 1):
idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, R),
n_orient))
new_active_idx = idx_large_corr[-active_set_size:]
if n_orient > 1:
new_active_idx = (n_orient * new_active_idx[:, None] +
np.arange(n_orient)[None, :])
new_active_idx = new_active_idx.ravel()
active_set[new_active_idx] = True
idx_active_set = np.where(active_set)[0]
as_size = np.sum(active_set)
X_init = np.zeros((as_size, n_times), dtype=X.dtype)
idx = np.searchsorted(idx_active_set, idx_old_active_set)
X_init[idx] = X
else:
logger.warning('Did NOT converge ! (gap: %s > %s)' % (gap, tol))
else:
X, active_set, E = l21_solver(M, G, alpha, lc, maxit=maxit,
tol=tol, n_orient=n_orient, init=None)
if np.any(active_set) and debias:
bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
X *= bias[:, np.newaxis]
logger.info('Final active set size: %s' % (np.sum(active_set) // n_orient))
return X, active_set, E
@verbose
def iterative_mixed_norm_solver(M, G, alpha, n_mxne_iter, maxit=3000,
tol=1e-8, verbose=None, active_set_size=50,
debias=True, n_orient=1, solver='auto'):
"""Solves L0.5/L2 mixed-norm inverse problem with active set strategy
Algorithm is detailed in:
Strohmeier D., Haueisen J., and Gramfort A.:
Improved MEG/EEG source localization with reweighted mixed-norms,
4th International Workshop on Pattern Recognition in Neuroimaging,
Tuebingen, 2014
Parameters
----------
M : array, shape (n_sensors, n_times)
The data.
G : array, shape (n_sensors, n_dipoles)
The gain matrix a.k.a. lead field.
alpha : float
The regularization parameter. It should be between 0 and 100.
A value of 100 will lead to an empty active set (no active source).
n_mxne_iter : int
The number of MxNE iterations. If > 1, iterative reweighting
is applied.
maxit : int
The number of iterations.
tol : float
Tolerance on dual gap for convergence checking.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
active_set_size : int
Size of active set increase at each iteration.
debias : bool
Debias source estimates.
n_orient : int
The number of orientation (1 : fixed or 3 : free or loose).
solver : 'prox' | 'cd' | 'bcd' | 'auto'
The algorithm to use for the optimization.
Returns
-------
X : array, shape (n_active, n_times)
The source estimates.
active_set : array
The mask of active sources.
E : list
The value of the objective function over the iterations.
"""
def g(w):
return np.sqrt(np.sqrt(groups_norm2(w.copy(), n_orient)))
def gprime(w):
return 2. * np.repeat(g(w), n_orient).ravel()
E = list()
active_set = np.ones(G.shape[1], dtype=np.bool)
weights = np.ones(G.shape[1])
X = np.zeros((G.shape[1], M.shape[1]))
for k in range(n_mxne_iter):
X0 = X.copy()
active_set_0 = active_set.copy()
G_tmp = G[:, active_set] * weights[np.newaxis, :]
if active_set_size is not None:
if np.sum(active_set) > (active_set_size * n_orient):
X, _active_set, _ = mixed_norm_solver(
M, G_tmp, alpha, debias=False, n_orient=n_orient,
maxit=maxit, tol=tol, active_set_size=active_set_size,
solver=solver, verbose=verbose)
else:
X, _active_set, _ = mixed_norm_solver(
M, G_tmp, alpha, debias=False, n_orient=n_orient,
maxit=maxit, tol=tol, active_set_size=None, solver=solver,
verbose=verbose)
else:
X, _active_set, _ = mixed_norm_solver(
M, G_tmp, alpha, debias=False, n_orient=n_orient,
maxit=maxit, tol=tol, active_set_size=None, solver=solver,
verbose=verbose)
logger.info('active set size %d' % (_active_set.sum() / n_orient))
if _active_set.sum() > 0:
active_set[active_set] = _active_set
# Reapply weights to have correct unit
X *= weights[_active_set][:, np.newaxis]
weights = gprime(X)
p_obj = 0.5 * linalg.norm(M - np.dot(G[:, active_set], X),
'fro') ** 2. + alpha * np.sum(g(X))
E.append(p_obj)
# Check convergence
if ((k >= 1) and np.all(active_set == active_set_0) and
np.all(np.abs(X - X0) < tol)):
print('Convergence reached after %d reweightings!' % k)
break
else:
active_set = np.zeros_like(active_set)
p_obj = 0.5 * linalg.norm(M) ** 2.
E.append(p_obj)
break
if np.any(active_set) and debias:
bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
X *= bias[:, np.newaxis]
return X, active_set, E
###############################################################################
# TF-MxNE
@verbose
def tf_lipschitz_constant(M, G, phi, phiT, tol=1e-3, verbose=None):
"""Compute lipschitz constant for FISTA
It uses a power iteration method.
"""
n_times = M.shape[1]
n_points = G.shape[1]
iv = np.ones((n_points, n_times), dtype=np.float)
v = phi(iv)
L = 1e100
for it in range(100):
L_old = L
logger.info('Lipschitz estimation: iteration = %d' % it)
iv = np.real(phiT(v))
Gv = np.dot(G, iv)
GtGv = np.dot(G.T, Gv)
w = phi(GtGv)
L = np.max(np.abs(w)) # l_inf norm
v = w / L
if abs((L - L_old) / L_old) < tol:
break
return L
def safe_max_abs(A, ia):
"""Compute np.max(np.abs(A[ia])) possible with empty A"""
if np.sum(ia): # ia is not empty
return np.max(np.abs(A[ia]))
else:
return 0.
def safe_max_abs_diff(A, ia, B, ib):
"""Compute np.max(np.abs(A)) possible with empty A"""
A = A[ia] if np.sum(ia) else 0.0
B = B[ib] if np.sum(ia) else 0.0
return np.max(np.abs(A - B))
class _Phi(object):
"""Util class to have phi stft as callable without using
a lambda that does not pickle"""
def __init__(self, wsize, tstep, n_coefs):
self.wsize = wsize
self.tstep = tstep
self.n_coefs = n_coefs
def __call__(self, x):
return stft(x, self.wsize, self.tstep,
verbose=False).reshape(-1, self.n_coefs)
class _PhiT(object):
"""Util class to have phi.T istft as callable without using
a lambda that does not pickle"""
def __init__(self, tstep, n_freq, n_step, n_times):
self.tstep = tstep
self.n_freq = n_freq
self.n_step = n_step
self.n_times = n_times
def __call__(self, z):
return istft(z.reshape(-1, self.n_freq, self.n_step), self.tstep,
self.n_times)
def norm_l21_tf(Z, shape, n_orient):
if Z.shape[0]:
Z2 = Z.reshape(*shape)
l21_norm = np.sqrt(stft_norm2(Z2).reshape(-1, n_orient).sum(axis=1))
l21_norm = l21_norm.sum()
else:
l21_norm = 0.
return l21_norm
def norm_l1_tf(Z, shape, n_orient):
if Z.shape[0]:
n_positions = Z.shape[0] // n_orient
Z_ = np.sqrt(np.sum((np.abs(Z) ** 2.).reshape((n_orient, -1),
order='F'), axis=0))
Z_ = Z_.reshape((n_positions, -1), order='F').reshape(*shape)
l1_norm = (2. * Z_.sum(axis=2).sum(axis=1) - np.sum(Z_[:, 0, :],
axis=1) - np.sum(Z_[:, -1, :], axis=1))
l1_norm = l1_norm.sum()
else:
l1_norm = 0.
return l1_norm
@verbose
def _tf_mixed_norm_solver_bcd_(M, G, Z, active_set, alpha_space, alpha_time,
lipschitz_constant, phi, phiT,
wsize=64, tstep=4, n_orient=1,
maxit=200, tol=1e-8, log_objective=True,
perc=None, verbose=None):
# First make G fortran for faster access to blocks of columns
G = np.asfortranarray(G)
n_sensors, n_times = M.shape
n_sources = G.shape[1]
n_positions = n_sources // n_orient
n_step = int(ceil(n_times / float(tstep)))
n_freq = wsize // 2 + 1
shape = (-1, n_freq, n_step)
G = dict(zip(np.arange(n_positions), np.hsplit(G, n_positions)))
R = M.copy() # residual
active = np.where(active_set)[0][::n_orient] // n_orient
for idx in active:
R -= np.dot(G[idx], phiT(Z[idx]))
E = [] # track cost function
alpha_time_lc = alpha_time / lipschitz_constant
alpha_space_lc = alpha_space / lipschitz_constant
converged = False
for i in range(maxit):
val_norm_l21_tf = 0.0
val_norm_l1_tf = 0.0
max_diff = 0.0
active_set_0 = active_set.copy()
for j in range(n_positions):
ids = j * n_orient
ide = ids + n_orient
G_j = G[j]
Z_j = Z[j]
active_set_j = active_set[ids:ide]
Z0 = deepcopy(Z_j)
was_active = np.any(active_set_j)
# gradient step
GTR = np.dot(G_j.T, R) / lipschitz_constant[j]
X_j_new = GTR.copy()
if was_active:
X_j = phiT(Z_j)
R += np.dot(G_j, X_j)
X_j_new += X_j
rows_norm = linalg.norm(X_j_new, 'fro')
if rows_norm <= alpha_space_lc[j]:
if was_active:
Z[j] = 0.0
active_set_j[:] = False
else:
if was_active:
Z_j_new = Z_j + phi(GTR)
else:
Z_j_new = phi(GTR)
col_norm = np.sqrt(np.sum(np.abs(Z_j_new) ** 2, axis=0))
if np.all(col_norm <= alpha_time_lc[j]):
Z[j] = 0.0
active_set_j[:] = False
else:
# l1
shrink = np.maximum(1.0 - alpha_time_lc[j] / np.maximum(
col_norm, alpha_time_lc[j]), 0.0)
Z_j_new *= shrink[np.newaxis, :]
# l21
shape_init = Z_j_new.shape
Z_j_new = Z_j_new.reshape(*shape)
row_norm = np.sqrt(stft_norm2(Z_j_new).sum())
if row_norm <= alpha_space_lc[j]:
Z[j] = 0.0
active_set_j[:] = False
else:
shrink = np.maximum(1.0 - alpha_space_lc[j] /
np.maximum(row_norm,
alpha_space_lc[j]), 0.0)
Z_j_new *= shrink
Z[j] = Z_j_new.reshape(-1, *shape_init[1:]).copy()
active_set_j[:] = True
R -= np.dot(G_j, phiT(Z[j]))
if log_objective:
val_norm_l21_tf += norm_l21_tf(
Z[j], shape, n_orient)
val_norm_l1_tf += norm_l1_tf(
Z[j], shape, n_orient)
max_diff = np.maximum(max_diff, np.max(np.abs(Z[j] - Z0)))
if log_objective: # log cost function value
pobj = (0.5 * (R ** 2.).sum() + alpha_space * val_norm_l21_tf +
alpha_time * val_norm_l1_tf)
E.append(pobj)
logger.info("Iteration %d :: pobj %f :: n_active %d" % (i + 1,
pobj, np.sum(active_set) / n_orient))
else:
logger.info("Iteration %d" % (i + 1))
if perc is not None:
if np.sum(active_set) / float(n_orient) <= perc * n_positions:
break
if np.array_equal(active_set, active_set_0):
if max_diff < tol:
logger.info("Convergence reached !")
converged = True
break
return Z, active_set, E, converged
@verbose
def _tf_mixed_norm_solver_bcd_active_set(
M, G, alpha_space, alpha_time, lipschitz_constant, phi, phiT,
Z_init=None, wsize=64, tstep=4, n_orient=1, maxit=200, tol=1e-8,
log_objective=True, perc=None, verbose=None):
"""Solves TF L21+L1 inverse solver with BCD and active set approach
Algorithm is detailed in:
Strohmeier D., Gramfort A., and Haueisen J.:
MEG/EEG source imaging with a non-convex penalty in the time-
frequency domain,
5th International Workshop on Pattern Recognition in Neuroimaging,
Stanford University, 2015
Parameters
----------
M : array
The data.
G : array
The forward operator.
alpha_space : float in [0, 100]
Regularization parameter for spatial sparsity. If larger than 100,
then no source will be active.
alpha_time : float in [0, 100]
Regularization parameter for temporal sparsity. It set to 0,
no temporal regularization is applied. It this case, TF-MxNE is
equivalent to MxNE with L21 norm.
lipschitz_constant : float
The lipschitz constant of the spatio temporal linear operator.
phi : instance of _Phi
The TF operator.
phiT : instance of _PhiT
The transpose of the TF operator.
Z_init : None | array
The initialization of the TF coefficient matrix. If None, zeros
will be used for all coefficients.
wsize: int
length of the STFT window in samples (must be a multiple of 4).
tstep: int
step between successive windows in samples (must be a multiple of 2,
a divider of wsize and smaller than wsize/2) (default: wsize/2).
n_orient : int
The number of orientation (1 : fixed or 3 : free or loose).
maxit : int
The number of iterations.
tol : float
If absolute difference between estimates at 2 successive iterations
is lower than tol, the convergence is reached.
log_objective : bool
If True, the value of the minimized objective function is computed
and stored at every iteration.
perc : None | float in [0, 1]
The early stopping parameter used for BCD with active set approach.
If the active set size is smaller than perc * n_sources, the
subproblem limited to the active set is stopped. If None, full
convergence will be achieved.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
X : array
The source estimates.
active_set : array
The mask of active sources.
E : list
The value of the objective function at each iteration. If log_objective
is False, it will be empty.
"""
n_sources = G.shape[1]
n_positions = n_sources // n_orient
if Z_init is None:
Z = dict.fromkeys(range(n_positions), 0.0)
active_set = np.zeros(n_sources, dtype=np.bool)
else:
active_set = np.zeros(n_sources, dtype=np.bool)
active = list()
for i in range(n_positions):
if np.any(Z_init[i * n_orient:(i + 1) * n_orient]):
active_set[i * n_orient:(i + 1) * n_orient] = True
active.append(i)
Z = dict.fromkeys(range(n_positions), 0.0)
if len(active):
Z.update(dict(zip(active, np.vsplit(Z_init[active_set],
len(active)))))
Z, active_set, E, _ = _tf_mixed_norm_solver_bcd_(
M, G, Z, active_set, alpha_space, alpha_time, lipschitz_constant,
phi, phiT, wsize=wsize, tstep=tstep, n_orient=n_orient, maxit=1,
tol=tol, log_objective=log_objective, perc=None, verbose=verbose)
while active_set.sum():
active = np.where(active_set)[0][::n_orient] // n_orient
Z_init = dict(zip(range(len(active)), [Z[idx] for idx in active]))
Z, as_, E_tmp, converged = _tf_mixed_norm_solver_bcd_(
M, G[:, active_set], Z_init,
np.ones(len(active) * n_orient, dtype=np.bool),
alpha_space, alpha_time,
lipschitz_constant[active_set[::n_orient]],
phi, phiT, wsize=wsize, tstep=tstep, n_orient=n_orient,
maxit=maxit, tol=tol, log_objective=log_objective,
perc=0.5, verbose=verbose)
E += E_tmp
active = np.where(active_set)[0][::n_orient] // n_orient
Z_init = dict.fromkeys(range(n_positions), 0.0)
Z_init.update(dict(zip(active, Z.values())))
active_set[active_set] = as_
active_set_0 = active_set.copy()
Z, active_set, E_tmp, _ = _tf_mixed_norm_solver_bcd_(
M, G, Z_init, active_set, alpha_space, alpha_time,
lipschitz_constant, phi, phiT, wsize=wsize, tstep=tstep,
n_orient=n_orient, maxit=1, tol=tol, log_objective=log_objective,
perc=None, verbose=verbose)
E += E_tmp
if converged:
if np.array_equal(active_set_0, active_set):
break
if active_set.sum():
Z = np.vstack([Z_ for Z_ in list(Z.values()) if np.any(Z_)])
X = phiT(Z)
else:
n_sensors, n_times = M.shape
n_step = int(ceil(n_times / float(tstep)))
n_freq = wsize // 2 + 1
Z = np.zeros((0, n_step * n_freq), dtype=np.complex)
X = np.zeros((0, n_times))
return X, Z, active_set, E
@verbose
def tf_mixed_norm_solver(M, G, alpha_space, alpha_time, wsize=64, tstep=4,
n_orient=1, maxit=200, tol=1e-8, log_objective=True,
debias=True, verbose=None):
"""Solves TF L21+L1 inverse solver with BCD and active set approach
Algorithm is detailed in:
A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
Time-Frequency Mixed-Norm Estimates: Sparse M/EEG imaging with
non-stationary source activations
Neuroimage, Volume 70, 15 April 2013, Pages 410-422, ISSN 1053-8119,
DOI: 10.1016/j.neuroimage.2012.12.051.
Functional Brain Imaging with M/EEG Using Structured Sparsity in
Time-Frequency Dictionaries
Gramfort A., Strohmeier D., Haueisen J., Hamalainen M. and Kowalski M.
INFORMATION PROCESSING IN MEDICAL IMAGING
Lecture Notes in Computer Science, 2011, Volume 6801/2011,
600-611, DOI: 10.1007/978-3-642-22092-0_49
http://dx.doi.org/10.1007/978-3-642-22092-0_49
Parameters
----------
M : array, shape (n_sensors, n_times)
The data.
G : array, shape (n_sensors, n_dipoles)
The gain matrix a.k.a. lead field.
alpha_space : float
The spatial regularization parameter. It should be between 0 and 100.
alpha_time : float
The temporal regularization parameter. The higher it is the smoother
will be the estimated time series.
wsize: int
length of the STFT window in samples (must be a multiple of 4).
tstep: int
step between successive windows in samples (must be a multiple of 2,
a divider of wsize and smaller than wsize/2) (default: wsize/2).
n_orient : int
The number of orientation (1 : fixed or 3 : free or loose).
maxit : int
The number of iterations.
tol : float
If absolute difference between estimates at 2 successive iterations
is lower than tol, the convergence is reached.
log_objective : bool
If True, the value of the minimized objective function is computed
and stored at every iteration.
debias : bool
Debias source estimates.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
X : array, shape (n_active, n_times)
The source estimates.
active_set : array
The mask of active sources.
E : list
The value of the objective function at each iteration. If log_objective
is False, it will be empty.
"""
n_sensors, n_times = M.shape
n_sensors, n_sources = G.shape
n_positions = n_sources // n_orient
n_step = int(ceil(n_times / float(tstep)))
n_freq = wsize // 2 + 1
n_coefs = n_step * n_freq
phi = _Phi(wsize, tstep, n_coefs)
phiT = _PhiT(tstep, n_freq, n_step, n_times)
if n_orient == 1:
lc = np.sum(G * G, axis=0)
else:
lc = np.empty(n_positions)
for j in range(n_positions):
G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)]
lc[j] = linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2)
logger.info("Using block coordinate descent and active set approach")
X, Z, active_set, E = _tf_mixed_norm_solver_bcd_active_set(
M, G, alpha_space, alpha_time, lc, phi, phiT, Z_init=None,
wsize=wsize, tstep=tstep, n_orient=n_orient, maxit=maxit, tol=tol,
log_objective=log_objective, verbose=None)
if np.any(active_set) and debias:
bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
X *= bias[:, np.newaxis]
return X, active_set, E
| bsd-3-clause |
zhengfaxiang/Runge-Kutta-Fehlberg | src/hill_surf.py | 1 | 4678 | #!/usr/bin/env python
"""
Script to plot Hill surface.
"""
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
def Hill_Surf(n, Miu, Cj):
"""Implicit equation of Hill surface."""
def hill_surf(x, y, z):
r1 = ((x + Miu)**2 + y**2 + z**2)**0.5
r2 = ((x + Miu - 1)**2 + y**2 + z**2)**0.5
return 0.5 * n**2 * (x**2 + y**2) + (1 - Miu)/r1 + Miu/r2 - 0.5 * Cj
return hill_surf
def Hill_Surf_Cj_xy(n, Miu):
"""Cj function when z==0."""
def func(x, y):
r1 = ((x + Miu)**2 + y**2)**0.5
r2 = ((x + Miu - 1)**2 + y**2)**0.5
return n**2 * (x**2 + y**2) + 2 * (1 - Miu) / r1 + 2 * Miu / r2
return func
def plot_implicit(ax, fn, bbox=(-2.5, 2.5)):
""" create a plot of an implicit function
fn ...implicit function (plot where fn==0)
bbox ..the x,y,and z limits of plotted interval"""
xmin, xmax, ymin, ymax, zmin, zmax = bbox*3
A = np.linspace(xmin, xmax, 100) # resolution of the contour
B = np.linspace(xmin, xmax, 15) # number of slices
A1, A2 = np.meshgrid(A, A) # grid on which the contour is plotted
for z in B: # plot contours in the XY plane
X, Y = A1, A2
Z = fn(X, Y, z)
ax.contour(X, Y, Z+z, [z], zdir='z')
# [z] defines the only level to plot for this contour for
# this value of z
for y in B: # plot contours in the XZ plane
X, Z = A1, A2
Y = fn(X, y, Z)
ax.contour(X, Y+y, Z, [y], zdir='y')
for x in B: # plot contours in the YZ plane
Y, Z = A1, A2
X = fn(x, Y, Z)
ax.contour(X+x, Y, Z, [x], zdir='x')
ax.tick_params(labelsize=6) # smaller label size
# must set plot limits because the contour will likely extend
# way beyond the displayed level. Otherwise matplotlib extends
# the plot limits to encompass all values in the contour.
ax.set_zlim3d(zmin, zmax)
ax.set_xlim3d(xmin, xmax)
ax.set_ylim3d(ymin, ymax)
def plot_contour(ax, f, bbox=(-2.5, 2.5), levels=[0]):
"""Plot contour."""
A = np.linspace(bbox[0], bbox[1], 300) # resolution of the contour
A1, A2 = np.meshgrid(A, A) # grid on which the contour is plotted
z = f(A1, A2)
if len(levels) > 1:
ax.contour(A1, A2, z, levels, linewidths=0.5, colors='k')
CF = ax.contourf(A1, A2, z, levels, cmap=plt.cm.jet)
cbar = plt.colorbar(CF, shrink=0.8)
# cbar.set_label('$C_{J}$', size=14)
cbar.ax.tick_params(labelsize=8)
else:
ax.contour(A1, A2, z, levels)
ax.tick_params(labelsize=8)
# parameters of Hill surface
Hill_Surf_Miu = 0.1
Hill_Surf_n = 1.0
Hill_Surf_Cj = np.array([3.5970, 3.4667, 3.0996])
# use serif font
plt.rc('font', family='serif')
# plot 3D Hill surface
fig1 = plt.figure(figsize=(8, 10))
ax1 = []
ax1.append(fig1.add_subplot(321, projection='3d'))
ax1.append(fig1.add_subplot(322))
ax1.append(fig1.add_subplot(323, projection='3d'))
ax1.append(fig1.add_subplot(324))
ax1.append(fig1.add_subplot(325, projection='3d'))
ax1.append(fig1.add_subplot(326))
for i in range(3):
plot_implicit(ax1[2 * i], Hill_Surf(Hill_Surf_n, Hill_Surf_Miu,
Hill_Surf_Cj[i]), bbox=(-2.0, 2.0))
ax1[2 * i].set_title('$C_{L_{%d}}$=%.4f' % (i + 1, Hill_Surf_Cj[i]),
fontsize=10)
plot_contour(ax1[2 * i + 1], Hill_Surf_Cj_xy(Hill_Surf_n, Hill_Surf_Miu),
bbox=(-2.0, 2.0), levels=[Hill_Surf_Cj[i]])
fig1.suptitle('Hill Surface, $\mu{}=0.1$', fontsize=16)
fig1.savefig('hill_surf.png')
# plot C_{J} contour
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
steps = 12
levels = np.linspace(2.9, 4.0, steps)
plot_contour(ax2, Hill_Surf_Cj_xy(Hill_Surf_n, Hill_Surf_Miu),
bbox=(-2.0, 2.0), levels=levels)
ax2.set_xlabel('$x$', fontsize=14)
ax2.set_ylabel('$y$', fontsize=14)
ax2.set_title('$C_{J}$ Contour, $\mu{}=0.1$', fontsize=16)
fig2.savefig('cj_contour.png')
# plot C_{J} surface
fig3 = plt.figure()
ax3 = fig3.add_subplot(111, projection='3d')
bbox = (-2.0, 2.0)
A = np.linspace(bbox[0], bbox[1], 200)
X, Y = np.meshgrid(A, A)
Z = np.log10((Hill_Surf_Cj_xy(Hill_Surf_n, Hill_Surf_Miu))(X, Y))
surf = ax3.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=plt.cm.coolwarm,
linewidth=0, antialiased=False)
cbar = plt.colorbar(surf, shrink=0.5, aspect=8)
cbar.ax.tick_params(labelsize=8)
ax3.set_xlabel('$x$')
ax3.set_ylabel('$y$')
ax3.set_zlabel('$log(C_{J})$')
ax3.tick_params(labelsize=8)
ax3.set_title('$C_{J}$ Surface, $\mu{}=0.1, z=0$', fontsize=16)
ax3.dist = 12
fig3.savefig('cj_surface.png')
# show figures
plt.show()
| mit |
gpospelov/BornAgain | Examples/varia/MaterialProfileWithParticles.py | 1 | 1734 | """
Example for producing a profile of SLD of a multilayer with particles
and slicing.
"""
import bornagain as ba
from bornagain import deg, angstrom, nm
import numpy as np
import matplotlib.pyplot as plt
def get_sample():
"""
Defines sample and returns it
"""
# creating materials
m_ambient = ba.MaterialBySLD("Ambient", 0.0, 0.0)
m_ti = ba.MaterialBySLD("Ti", -1.9493e-06, 0.0)
m_ni = ba.MaterialBySLD("Ni", 9.4245e-06, 0.0)
m_particle = ba.MaterialBySLD("Particle", 5e-6, 0.0)
m_substrate = ba.MaterialBySLD("SiSubstrate", 2.0704e-06, 0.0)
# creating layers
ambient_layer = ba.Layer(m_ambient)
ti_layer = ba.Layer(m_ti, 30*angstrom)
ni_layer = ba.Layer(m_ni, 70*angstrom)
substrate_layer = ba.Layer(m_substrate)
# create roughness
roughness = ba.LayerRoughness(5*angstrom, 0.5, 10*angstrom)
# create particle layout
ff = ba.FormFactorCone(5*nm, 10*nm, 75*deg)
particle = ba.Particle(m_particle, ff)
layout = ba.ParticleLayout()
layout.addParticle(particle)
iff = ba.InterferenceFunction2DLattice(ba.SquareLattice2D(10*nm, 0))
layout.setInterferenceFunction(iff)
ambient_layer.addLayout(layout)
ambient_layer.setNumberOfSlices(20)
# creating multilayer
multi_layer = ba.MultiLayer()
multi_layer.addLayer(ambient_layer)
for i in range(2):
multi_layer.addLayerWithTopRoughness(ti_layer, roughness)
multi_layer.addLayerWithTopRoughness(ni_layer, roughness)
multi_layer.addLayer(substrate_layer)
return multi_layer
if __name__ == '__main__':
sample = get_sample()
zpoints, slds = ba.materialProfile(sample)
plt.figure()
plt.plot(zpoints, np.real(slds))
plt.show()
| gpl-3.0 |
dennisobrien/bokeh | sphinx/source/conf.py | 3 | 9540 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from os.path import abspath, dirname, join
#
# Bokeh documentation build configuration file, created by
# sphinx-quickstart on Sat Oct 12 23:43:03 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.7'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'bokeh.sphinxext.bokeh_autodoc',
'bokeh.sphinxext.bokeh_color',
'bokeh.sphinxext.bokeh_enum',
'bokeh.sphinxext.bokeh_gallery',
'bokeh.sphinxext.bokeh_github',
'bokeh.sphinxext.bokeh_jinja',
'bokeh.sphinxext.bokeh_model',
'bokeh.sphinxext.bokeh_options',
'bokeh.sphinxext.bokeh_palette',
'bokeh.sphinxext.bokeh_palette_group',
'bokeh.sphinxext.bokeh_plot',
'bokeh.sphinxext.bokeh_prop',
'bokeh.sphinxext.bokeh_releases',
'bokeh.sphinxext.bokeh_sitemap',
'bokeh.sphinxext.collapsible_code_block',
]
napoleon_include_init_with_doc = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Bokeh'
copyright = '© Copyright 2015-2018, Anaconda and Bokeh Contributors.'
# Get the standard computed Bokeh version string to use for |version|
# and |release|
from bokeh import __version__
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# Check for version override (e.g. when re-deploying a previously released
# docs, or when pushing test docs that do not have a corresponding BokehJS
# available on CDN)
from bokeh.settings import settings
if settings.docs_version():
version = release = settings.docs_version()
# get all the versions that will appear in the version dropdown
f = open(join(dirname(abspath(__file__)), "all_versions.txt"))
all_versions = [x.strip() for x in reversed(f.readlines())]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
#
# NOTE: in these docs all .py script are assumed to be bokeh plot scripts!
# with bokeh_plot_pyfile_include_dirs set desired folder to look for .py files
bokeh_plot_pyfile_include_dirs = ['docs']
# Whether to allow builds to succeed if a Google API key is not defined and plots
# containing "GOOGLE_API_KEY" are processed
bokeh_missing_google_api_key_ok = False
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# Sort members by type
autodoc_member_order = 'groupwise'
# patterns to exclude
exclude_patterns = ['docs/releases/*']
# This would more properly be done with rst_epilog but something about
# the combination of this with the bokeh-gallery directive breaks the build
rst_prolog = """
.. |Color| replace:: :py:class:`~bokeh.core.properties.Color`
.. |DataSpec| replace:: :py:class:`~bokeh.core.properties.DataSpec`
.. |Document| replace:: :py:class:`~bokeh.document.Document`
.. |HasProps| replace:: :py:class:`~bokeh.core.has_props.HasProps`
.. |Model| replace:: :py:class:`~bokeh.model.Model`
.. |Property| replace:: :py:class:`~bokeh.core.property.bases.Property`
.. |PropertyDescriptor| replace:: :py:class:`~bokeh.core.property.descriptor.PropertyDescriptor`
.. |PropertyContainer| replace:: :py:class:`~bokeh.core.property.wrappers.PropertyContainer`
.. |UnitsSpec| replace:: :py:class:`~bokeh.core.properties.UnitsSpec`
.. |field| replace:: :py:func:`~bokeh.core.properties.field`
.. |value| replace:: :py:func:`~bokeh.core.properties.value`
"""
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bokeh_theme'
html_theme_path = ['.']
html_context = {
'SITEMAP_BASE_URL': 'https://bokeh.pydata.org/en/', # Trailing slash is needed
'DESCRIPTION': 'Bokeh visualization library, documentation site.',
'AUTHOR': 'Bokeh contributors',
'VERSION': version,
'NAV': (
('Github', '//github.com/bokeh/bokeh'),
),
'ABOUT': (
('Vision and Work', 'vision'),
('Team', 'team'),
('Citation', 'citation'),
('Contact', 'contact'),
),
'SOCIAL': (
('Contribute', 'contribute'),
('Mailing list', '//groups.google.com/a/anaconda.com/forum/#!forum/bokeh'),
('Github', '//github.com/bokeh/bokeh'),
('Twitter', '//twitter.com/BokehPlots'),
),
'NAV_DOCS': (
('Installation', 'installation'),
('User Guide', 'user_guide'),
('Gallery', 'gallery'),
('Tutorial', 'https://mybinder.org/v2/gh/bokeh/bokeh-notebooks/master?filepath=tutorial%2F00%20-%20Introduction%20and%20Setup.ipynb'),
('Reference', 'reference'),
('Releases', 'releases'),
('Developer Guide', 'dev_guide'),
),
'ALL_VERSIONS': all_versions,
}
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'Bokehdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Bokeh.tex', u'Bokeh Documentation', u'Anaconda', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bokeh', u'Bokeh Documentation',
[u'Anaconda'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Bokeh', u'Bokeh Documentation', u'Anaconda', 'Bokeh', 'Interactive Web Plotting for Python', 'Graphics'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# intersphinx settings
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None)
}
| bsd-3-clause |
datapythonista/pandas | pandas/tests/frame/indexing/test_insert.py | 3 | 2888 | """
test_insert is specifically for the DataFrame.insert method; not to be
confused with tests with "insert" in their names that are really testing
__setitem__.
"""
import numpy as np
import pytest
from pandas.errors import PerformanceWarning
from pandas import (
DataFrame,
Index,
)
import pandas._testing as tm
class TestDataFrameInsert:
def test_insert(self):
df = DataFrame(
np.random.randn(5, 3), index=np.arange(5), columns=["c", "b", "a"]
)
df.insert(0, "foo", df["a"])
tm.assert_index_equal(df.columns, Index(["foo", "c", "b", "a"]))
tm.assert_series_equal(df["a"], df["foo"], check_names=False)
df.insert(2, "bar", df["c"])
tm.assert_index_equal(df.columns, Index(["foo", "c", "bar", "b", "a"]))
tm.assert_almost_equal(df["c"], df["bar"], check_names=False)
with pytest.raises(ValueError, match="already exists"):
df.insert(1, "a", df["b"])
msg = "cannot insert c, already exists"
with pytest.raises(ValueError, match=msg):
df.insert(1, "c", df["b"])
df.columns.name = "some_name"
# preserve columns name field
df.insert(0, "baz", df["c"])
assert df.columns.name == "some_name"
def test_insert_column_bug_4032(self):
# GH#4032, inserting a column and renaming causing errors
df = DataFrame({"b": [1.1, 2.2]})
df = df.rename(columns={})
df.insert(0, "a", [1, 2])
result = df.rename(columns={})
str(result)
expected = DataFrame([[1, 1.1], [2, 2.2]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
df.insert(0, "c", [1.3, 2.3])
result = df.rename(columns={})
str(result)
expected = DataFrame([[1.3, 1, 1.1], [2.3, 2, 2.2]], columns=["c", "a", "b"])
tm.assert_frame_equal(result, expected)
def test_insert_with_columns_dups(self):
# GH#14291
df = DataFrame()
df.insert(0, "A", ["g", "h", "i"], allow_duplicates=True)
df.insert(0, "A", ["d", "e", "f"], allow_duplicates=True)
df.insert(0, "A", ["a", "b", "c"], allow_duplicates=True)
exp = DataFrame(
[["a", "d", "g"], ["b", "e", "h"], ["c", "f", "i"]], columns=["A", "A", "A"]
)
tm.assert_frame_equal(df, exp)
def test_insert_item_cache(self, using_array_manager):
df = DataFrame(np.random.randn(4, 3))
ser = df[0]
if using_array_manager:
expected_warning = None
else:
# with BlockManager warn about high fragmentation of single dtype
expected_warning = PerformanceWarning
with tm.assert_produces_warning(expected_warning):
for n in range(100):
df[n + 3] = df[1] * n
ser.values[0] = 99
assert df.iloc[0, 0] == df[0][0]
| bsd-3-clause |
jklenzing/pysat | pysat/instruments/pysat_testing_xarray.py | 2 | 8837 | # -*- coding: utf-8 -*-
"""
Produces fake instrument data for testing.
"""
from __future__ import print_function
from __future__ import absolute_import
import os
import numpy as np
import pandas as pds
import xarray
import pysat
from pysat.instruments.methods import testing as test
# pysat required parameters
platform = 'pysat'
name = 'testing_xarray'
# dictionary of data 'tags' and corresponding description
tags = {'': 'Regular testing data set'}
# dictionary of satellite IDs, list of corresponding tags
sat_ids = {'': ['']}
_test_dates = {'': {'': pysat.datetime(2009, 1, 1)}}
pandas_format = False
def init(self):
self.new_thing = True
def load(fnames, tag=None, sat_id=None, sim_multi_file_right=False,
sim_multi_file_left=False, malformed_index=False,
**kwargs):
""" Loads the test files
Parameters
----------
fnames : (list)
List of filenames
tag : (str or NoneType)
Instrument tag (accepts '' or a number (i.e., '10'), which specifies
the number of times to include in the test instrument)
sat_id : (str or NoneType)
Instrument satellite ID (accepts '')
sim_multi_file_right : (boolean)
Adjusts date range to be 12 hours in the future or twelve hours beyond
root_date (default=False)
sim_multi_file_left : (boolean)
Adjusts date range to be 12 hours in the past or twelve hours before
root_date (default=False)
malformed_index : (boolean)
If True, time index will be non-unique and non-monotonic.
kwargs : dict
Additional unspecified keywords supplied to pysat.Instrument upon instantiation
are passed here.
Returns
-------
data : (xr.Dataset)
Testing data
meta : (pysat.Meta)
Metadataxs
"""
# create an artifical satellite data set
parts = os.path.split(fnames[0])[-1].split('-')
yr = int(parts[0])
month = int(parts[1])
day = int(parts[2][0:2])
date = pysat.datetime(yr, month, day)
if sim_multi_file_right:
root_date = pysat.datetime(2009, 1, 1, 12)
data_date = date + pds.DateOffset(hours=12)
elif sim_multi_file_left:
root_date = pysat.datetime(2008, 12, 31, 12)
data_date = date - pds.DateOffset(hours=12)
else:
root_date = pysat.datetime(2009, 1, 1)
data_date = date
num = 86400 if sat_id == '' else int(sat_id)
num_array = np.arange(num)
index = pds.date_range(data_date,
data_date+pds.DateOffset(seconds=num-1),
freq='S')
if malformed_index:
index = index[0:num].tolist()
# nonmonotonic
index[0:3], index[3:6] = index[3:6], index[0:3]
# non unique
index[6:9] = [index[6]]*3
data = xarray.Dataset({'uts': (('time'), index)}, coords={'time':index})
# need to create simple orbits here. Have start of first orbit
# at 2009,1, 0 UT. 14.84 orbits per day
time_delta = date - root_date
mlt = test.generate_fake_data(time_delta.total_seconds(), num_array,
period=5820, data_range=[0.0, 24.0])
data['mlt'] = (('time'), mlt)
# do slt, 20 second offset from mlt
slt = test.generate_fake_data(time_delta.total_seconds()+20, num_array,
period=5820, data_range=[0.0, 24.0])
data['slt'] = (('time'), slt)
# create a fake longitude, resets every 6240 seconds
# sat moves at 360/5820 deg/s, Earth rotates at 360/86400, takes extra time
# to go around full longitude
longitude = test.generate_fake_data(time_delta.total_seconds(), num_array,
period=6240, data_range=[0.0, 360.0])
data['longitude'] = (('time'), longitude)
# create latitude area for testing polar orbits
angle = test.generate_fake_data(time_delta.total_seconds(),
num_array, period=5820,
data_range=[0.0, 2.0*np.pi])
latitude = 90.0 * np.cos(angle)
data['latitude'] = (('time'), latitude)
# fake orbit number
fake_delta = date - pysat.datetime(2008, 1, 1)
orbit_num = test.generate_fake_data(fake_delta.total_seconds(),
num_array, period=5820,
cyclic=False)
data['orbit_num'] = (('time'), orbit_num)
# create some fake data to support testing of averaging routines
mlt_int = data['mlt'].astype(int)
long_int = (data['longitude'] / 15.).astype(int)
data['dummy1'] = (('time'), mlt_int)
data['dummy2'] = (('time'), long_int)
data['dummy3'] = (('time'), mlt_int + long_int * 1000.)
data['dummy4'] = (('time'), num_array)
data['string_dummy'] = (('time'), ['test'] * len(data.indexes['time']))
data['unicode_dummy'] = (('time'), [u'test'] * len(data.indexes['time']))
data['int8_dummy'] = (('time'), np.array([1] * len(data.indexes['time']),
dtype=np.int8))
data['int16_dummy'] = (('time'), np.array([1] * len(data.indexes['time']),
dtype=np.int16))
data['int32_dummy'] = (('time'), np.array([1] * len(data.indexes['time']),
dtype=np.int32))
data['int64_dummy'] = (('time'), np.array([1] * len(data.indexes['time']),
dtype=np.int64))
return data, meta.copy()
def list_files(tag=None, sat_id=None, data_path=None, format_str=None):
"""Produce a fake list of files spanning a year"""
index = pds.date_range(pysat.datetime(2008, 1, 1),
pysat.datetime(2010, 12, 31))
names = [data_path+date.strftime('%Y-%m-%d')+'.nofile' for date in index]
return pysat.Series(names, index=index)
def download(date_array, tag, sat_id, data_path=None, user=None,
password=None):
pass
meta = pysat.Meta()
meta['uts'] = {'units': 's',
'long_name': 'Universal Time',
'custom': False}
meta['Epoch'] = {'units': 'Milliseconds since 1970-1-1',
'Bin_Location': 0.5,
'notes': 'UTC time at middle of geophysical measurement.',
'desc': 'UTC seconds', }
meta['mlt'] = {'units': 'hours',
'long_name': 'Magnetic Local Time',
'label': 'MLT',
'axis': 'MLT',
'desc': 'Magnetic Local Time',
'value_min': 0.,
'value_max': 24.,
'notes': ('Magnetic Local Time is the solar local time of the '
'field line at the location where the field crosses '
'the magnetic equator. In this case we just simulate '
'0-24 with a consistent orbital period and an offste '
'with SLT.'),
'fill': np.nan,
'scale': 'linear'}
meta['slt'] = {'units': 'hours',
'long_name': 'Solar Local Time',
'label': 'SLT',
'axis': 'SLT',
'desc': 'Solar Local Time',
'value_min': 0.,
'value_max': 24.,
'notes': ('Solar Local Time is the local time (zenith angle of '
'sun) of the given locaiton. Overhead noon, +/- 90 is'
' 6, 18 SLT .'),
'fill': np.nan,
'scale': 'linear'}
meta['orbit_num'] = {'units': '',
'long_name': 'Orbit Number',
'label': 'Orbit Number',
'axis': 'Orbit Number',
'desc': 'Orbit Number',
'value_min': 0.,
'value_max': 25000.,
'notes': ('Number of orbits since the start of the '
'mission. For this simulation we use the '
'number of 5820 second periods since the '
'start, 2008-01-01.'),
'fill': np.nan,
'scale': 'linear'}
meta['longitude'] = {'units': 'degrees', 'long_name': 'Longitude'}
meta['latitude'] = {'units': 'degrees', 'long_name': 'Latitude'}
meta['dummy1'] = {'units': '', 'long_name': 'dummy1'}
meta['dummy2'] = {'units': '', 'long_name': 'dummy2'}
meta['dummy3'] = {'units': '', 'long_name': 'dummy3'}
meta['dummy4'] = {'units': '', 'long_name': 'dummy4'}
meta['string_dummy'] = {'units': '', 'long_name': 'string_dummy'}
meta['unicode_dummy'] = {'units': '', 'long_name': 'unicode_dummy'}
meta['int8_dummy'] = {'units': '', 'long_name': 'int8_dummy'}
meta['int16_dummy'] = {'units': '', 'long_name': 'int16_dummy'}
meta['int32_dummy'] = {'units': '', 'long_name': 'int32_dummy'}
meta['int64_dummy'] = {'units': '', 'long_name': 'int64_dummy'}
| bsd-3-clause |
AlexanderFabisch/scikit-learn | sklearn/linear_model/tests/test_omp.py | 272 | 7752 | # Author: Vlad Niculae
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
| bsd-3-clause |
dchabot/bluesky | bluesky/testing/noseclasses.py | 4 | 4638 | ########################################################################
# This file contains code from numpy and matplotlib (noted in the code)#
# which is (c) the respective projects. #
# #
# Modifications and original code are (c) BNL/BSA, license below #
# #
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
"""
This module is for decorators related to testing.
Much of this code is inspired by the code in matplotlib. Exact copies
are noted.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
# copied from matplotlib
class KnownFailureDidNotFailTest(Exception):
'''Raise this exception to mark a test should have failed but did not.'''
pass
# This code is copied from numpy
class KnownFailureTest(Exception):
'''Raise this exception to mark a test as a known failing test.'''
pass
# This code is copied from numpy
class KnownFailure(ErrorClassPlugin):
'''Plugin that installs a KNOWNFAIL error class for the
KnownFailureClass exception. When KnownFailureTest is raised,
the exception will be logged in the knownfail attribute of the
result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the
exception will not be counted as an error or failure.
'''
enabled = True
knownfail = ErrorClass(KnownFailureTest,
label='KNOWNFAIL',
isfailure=False)
def options(self, parser, env=os.environ):
env_opt = 'NOSE_WITHOUT_KNOWNFAIL'
parser.add_option('--no-knownfail', action='store_true',
dest='noKnownFail', default=env.get(env_opt, False),
help='Disable special handling of KnownFailureTest '
'exceptions')
def configure(self, options, conf):
if not self.can_configure:
return
self.conf = conf
disable = getattr(options, 'noKnownFail', False)
if disable:
self.enabled = False
| bsd-3-clause |
plissonf/scikit-learn | sklearn/datasets/__init__.py | 176 | 3671 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
cheral/orange3 | Orange/preprocess/score.py | 2 | 12309 | from collections import defaultdict
from itertools import chain
import numpy as np
from sklearn import feature_selection as skl_fss
from Orange.misc.wrapper_meta import WrapperMeta
from Orange.statistics import contingency, distribution
from Orange.data import Domain, Variable, DiscreteVariable, ContinuousVariable
from Orange.preprocess.preprocess import Discretize, Impute, RemoveNaNClasses
from Orange.preprocess.util import _RefuseDataInConstructor
from Orange.util import Reprable
__all__ = ["Chi2",
"ANOVA",
"UnivariateLinearRegression",
"InfoGain",
"GainRatio",
"Gini",
"ReliefF",
"RReliefF",
"FCBF"]
class Scorer(_RefuseDataInConstructor, Reprable):
feature_type = None
class_type = None
supports_sparse_data = None
preprocessors = [
RemoveNaNClasses()
]
def __call__(self, data, feature=None):
if not data.domain.class_var:
raise ValueError("Data with class labels required.")
if not isinstance(data.domain.class_var, self.class_type):
raise ValueError("Scoring method %s requires a class variable of type %s." %
(type(self).__name__, self.class_type.__name__))
if feature is not None:
f = data.domain[feature]
data = data.from_table(Domain([f], data.domain.class_vars), data)
for pp in self.preprocessors:
data = pp(data)
if any(not isinstance(a, self.feature_type)
for a in data.domain.attributes):
raise ValueError('Only %ss are supported' % self.feature_type)
return self.score_data(data, feature)
def score_data(self, data, feature):
raise NotImplementedError
class SklScorer(Scorer, metaclass=WrapperMeta):
supports_sparse_data = True
preprocessors = Scorer.preprocessors + [
Impute()
]
def score_data(self, data, feature):
score = self.score(data.X, data.Y)
if feature is not None:
return score[0]
return score
class Chi2(SklScorer):
"""
A wrapper for `${sklname}`. The following is the documentation
from `scikit-learn <http://scikit-learn.org>`_.
${skldoc}
"""
__wraps__ = skl_fss.chi2
feature_type = DiscreteVariable
class_type = DiscreteVariable
preprocessors = SklScorer.preprocessors + [
Discretize(remove_const=False)
]
def score(self, X, y):
f, p = skl_fss.chi2(X, y)
return f
class ANOVA(SklScorer):
"""
A wrapper for `${sklname}`. The following is the documentation
from `scikit-learn <http://scikit-learn.org>`_.
${skldoc}
"""
__wraps__ = skl_fss.f_classif
feature_type = ContinuousVariable
class_type = DiscreteVariable
def score(self, X, y):
f, p = skl_fss.f_classif(X, y)
return f
class UnivariateLinearRegression(SklScorer):
"""
A wrapper for `${sklname}`. The following is the documentation
from `scikit-learn <http://scikit-learn.org>`_.
${skldoc}
"""
__wraps__ = skl_fss.f_regression
feature_type = ContinuousVariable
class_type = ContinuousVariable
def score(self, X, y):
f, p = skl_fss.f_regression(X, y)
return f
class LearnerScorer(Scorer):
def score(self, data):
raise NotImplementedError
def score_data(self, data, feature=None):
scores = self.score(data)
def average_scores(scores):
scores_grouped = defaultdict(list)
for attr, score in zip(self.domain.attributes, scores):
# Go up the chain of preprocessors to obtain the original variable
while getattr(attr, 'compute_value', False):
attr = getattr(attr.compute_value, 'variable', attr)
scores_grouped[attr].append(score)
return [sum(scores_grouped[attr]) / len(scores_grouped[attr])
if attr in scores_grouped else 0
for attr in data.domain.attributes]
scores = np.atleast_2d(scores)
if data.domain != self.domain:
scores = np.array([average_scores(row) for row in scores])
return scores[:, data.domain.attributes.index(feature)] \
if feature else scores
class ClassificationScorer(Scorer):
"""
Base class for feature scores in a class-labeled data set.
Parameters
----------
feature : int, string, Orange.data.Variable
Feature id
data : Orange.data.Table
Data set
Attributes
----------
feature_type : Orange.data.Variable
Required type of features.
class_type : Orange.data.Variable
Required type of class variable.
"""
feature_type = DiscreteVariable
class_type = DiscreteVariable
supports_sparse_data = True
preprocessors = Scorer.preprocessors + [
Discretize(remove_const=False)
]
def score_data(self, data, feature):
instances_with_class = \
np.sum(distribution.Discrete(data, data.domain.class_var))
def score_from_contingency(f):
cont = contingency.Discrete(data, f)
return self.from_contingency(
cont, 1. - np.sum(cont.unknowns)/instances_with_class)
scores = [score_from_contingency(f) for f in data.domain.attributes]
if feature is not None:
return scores[0]
return scores
def _entropy(D):
"""Entropy of class-distribution matrix"""
P = D / np.sum(D, axis=0)
PC = np.clip(P, 1e-15, 1)
return np.sum(np.sum(- P * np.log2(PC), axis=0) * np.sum(D, axis=0) / np.sum(D))
def _gini(D):
"""Gini index of class-distribution matrix"""
P = np.asarray(D / np.sum(D, axis=0))
return np.sum((1 - np.sum(P ** 2, axis=0)) *
np.sum(D, axis=0) / np.sum(D))
def _symmetrical_uncertainty(X, Y):
"""Symmetrical uncertainty, Press et al., 1988."""
from Orange.preprocess._relieff import contingency_table
X, Y = np.around(X), np.around(Y)
cont = contingency_table(X, Y)
ig = InfoGain().from_contingency(cont, 1)
return 2 * ig / (_entropy(cont.sum(0)) + _entropy(cont.sum(1)))
class FCBF(ClassificationScorer):
"""
Fast Correlation-Based Filter. Described in:
Yu, L., Liu, H.,
Feature selection for high-dimensional data: A fast correlation-based filter solution.
2003. http://www.aaai.org/Papers/ICML/2003/ICML03-111.pdf
"""
def score_data(self, data, feature=None):
S = []
for i, a in enumerate(data.X.T):
S.append((_symmetrical_uncertainty(a, data.Y), i))
S.sort()
worst = []
p = 1
while True:
try: SUpc, Fp = S[-p]
except IndexError: break
q = p + 1
while True:
try: SUqc, Fq = S[-q]
except IndexError: break
# TODO: cache
if _symmetrical_uncertainty(data.X.T[Fp],
data.X.T[Fq]) >= SUqc:
del S[-q]
worst.append((1e-4*SUqc, Fq))
else:
q += 1
p += 1
best = S
scores = [i[0] for i in sorted(chain(best, worst), key=lambda i: i[1])]
return np.array(scores) if not feature else scores[0]
class InfoGain(ClassificationScorer):
"""
Information gain is the expected decrease of entropy. See `Wikipedia entry on information gain
<http://en.wikipedia.org/wiki/Information_gain_ratio>`_.
"""
def from_contingency(self, cont, nan_adjustment):
h_class = _entropy(np.sum(cont, axis=1))
h_residual = _entropy(np.compress(np.sum(cont, axis=0), cont, axis=1))
return nan_adjustment * (h_class - h_residual)
class GainRatio(ClassificationScorer):
"""
Information gain ratio is the ratio between information gain and
the entropy of the feature's
value distribution. The score was introduced in [Quinlan1986]_
to alleviate overestimation for multi-valued features. See `Wikipedia entry on gain ratio
<http://en.wikipedia.org/wiki/Information_gain_ratio>`_.
.. [Quinlan1986] J R Quinlan: Induction of Decision Trees, Machine Learning, 1986.
"""
def from_contingency(self, cont, nan_adjustment):
h_class = _entropy(np.sum(cont, axis=1))
h_residual = _entropy(np.compress(np.sum(cont, axis=0), cont, axis=1))
h_attribute = _entropy(np.sum(cont, axis=0))
if h_attribute == 0:
h_attribute = 1
return nan_adjustment * (h_class - h_residual) / h_attribute
class Gini(ClassificationScorer):
"""
Gini impurity is the probability that two randomly chosen instances will have different
classes. See `Wikipedia entry on Gini impurity
<https://en.wikipedia.org/wiki/Decision_tree_learning#Gini_impurity>`_.
"""
def from_contingency(self, cont, nan_adjustment):
return (_gini(np.sum(cont, axis=1)) - _gini(cont)) * nan_adjustment
class ReliefF(Scorer):
"""
ReliefF algorithm. Contrary to most other scorers, Relief family of
algorithms is not as myoptic but tends to give unreliable results with
datasets with lots (hundreds) of features.
Robnik-Šikonja, M., Kononenko, I.
Theoretical and empirical analysis of ReliefF and RReliefF.
2003. http://lkm.fri.uni-lj.si/rmarko/papers/robnik03-mlj.pdf
"""
feature_type = Variable
class_type = DiscreteVariable
supports_sparse_data = False
def __init__(self, n_iterations=50, k_nearest=10):
self.n_iterations = n_iterations
self.k_nearest = k_nearest
def score_data(self, data, feature):
if len(data.domain.class_vars) != 1:
raise ValueError('ReliefF requires one single class')
if not data.domain.class_var.is_discrete:
raise ValueError('ReliefF supports classification; use RReliefF '
'for regression')
if len(data.domain.class_var.values) == 1: # Single-class value non-problem
return 0 if feature else np.zeros(data.X.shape[1])
from Orange.preprocess._relieff import relieff
weights = np.asarray(relieff(data.X, data.Y,
self.n_iterations, self.k_nearest,
np.array([a.is_discrete for a in data.domain.attributes])))
if feature:
return weights[0]
return weights
class RReliefF(Scorer):
feature_type = Variable
class_type = ContinuousVariable
supports_sparse_data = False
def __init__(self, n_iterations=50, k_nearest=50):
self.n_iterations = n_iterations
self.k_nearest = k_nearest
def score_data(self, data, feature):
if len(data.domain.class_vars) != 1:
raise ValueError('RReliefF requires one single class')
if not data.domain.class_var.is_continuous:
raise ValueError('RReliefF supports regression; use ReliefF '
'for classification')
from Orange.preprocess._relieff import rrelieff
weights = np.asarray(rrelieff(data.X, data.Y,
self.n_iterations, self.k_nearest,
np.array([a.is_discrete for a in data.domain.attributes])))
if feature:
return weights[0]
return weights
if __name__ == '__main__':
from Orange.data import Table
X = np.random.random((500, 20))
X[np.random.random(X.shape) > .95] = np.nan
y_cls = np.zeros(X.shape[0])
y_cls[(X[:, 0] > .5) ^ (X[:, 1] > .6)] = 1
y_reg = np.nansum(X[:, 0:3], 1)
for relief, y in ((ReliefF(), y_cls),
(RReliefF(), y_reg)):
data = Table.from_numpy(None, X, y)
weights = relief.score_data(data, False)
print(relief.__class__.__name__)
print('Best =', weights.argsort()[::-1])
print('Weights =', weights[weights.argsort()[::-1]])
X *= 10
data = Table.from_numpy(None, X, y_cls)
weights = FCBF().score_data(data, False)
print('FCBF')
print('Best =', weights.argsort()[::-1])
print('Weights =', weights[weights.argsort()[::-1]])
| bsd-2-clause |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/tests/test_common.py | 7 | 5050 | # -*- coding: utf-8 -*-
import nose
import numpy as np
from pandas import Series, Timestamp
from pandas.compat import range, lmap
import pandas.core.common as com
import pandas.util.testing as tm
_multiprocess_can_split_ = True
def test_mut_exclusive():
msg = "mutually exclusive arguments: '[ab]' and '[ab]'"
with tm.assertRaisesRegexp(TypeError, msg):
com._mut_exclusive(a=1, b=2)
assert com._mut_exclusive(a=1, b=None) == 1
assert com._mut_exclusive(major=None, major_axis=None) is None
def test_get_callable_name():
from functools import partial
getname = com._get_callable_name
def fn(x):
return x
lambda_ = lambda x: x
part1 = partial(fn)
part2 = partial(part1)
class somecall(object):
def __call__(self):
return x # noqa
assert getname(fn) == 'fn'
assert getname(lambda_)
assert getname(part1) == 'fn'
assert getname(part2) == 'fn'
assert getname(somecall()) == 'somecall'
assert getname(1) is None
def test_any_none():
assert (com._any_none(1, 2, 3, None))
assert (not com._any_none(1, 2, 3, 4))
def test_all_not_none():
assert (com._all_not_none(1, 2, 3, 4))
assert (not com._all_not_none(1, 2, 3, None))
assert (not com._all_not_none(None, None, None, None))
def test_iterpairs():
data = [1, 2, 3, 4]
expected = [(1, 2), (2, 3), (3, 4)]
result = list(com.iterpairs(data))
assert (result == expected)
def test_split_ranges():
def _bin(x, width):
"return int(x) as a base2 string of given width"
return ''.join(str((x >> i) & 1) for i in range(width - 1, -1, -1))
def test_locs(mask):
nfalse = sum(np.array(mask) == 0)
remaining = 0
for s, e in com.split_ranges(mask):
remaining += e - s
assert 0 not in mask[s:e]
# make sure the total items covered by the ranges are a complete cover
assert remaining + nfalse == len(mask)
# exhaustively test all possible mask sequences of length 8
ncols = 8
for i in range(2 ** ncols):
cols = lmap(int, list(_bin(i, ncols))) # count up in base2
mask = [cols[i] == 1 for i in range(len(cols))]
test_locs(mask)
# base cases
test_locs([])
test_locs([0])
test_locs([1])
def test_map_indices_py():
data = [4, 3, 2, 1]
expected = {4: 0, 3: 1, 2: 2, 1: 3}
result = com.map_indices_py(data)
assert (result == expected)
def test_union():
a = [1, 2, 3]
b = [4, 5, 6]
union = sorted(com.union(a, b))
assert ((a + b) == union)
def test_difference():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.difference(b, a))
assert ([4, 5, 6] == inter)
def test_intersection():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.intersection(a, b))
assert (a == inter)
def test_groupby():
values = ['foo', 'bar', 'baz', 'baz2', 'qux', 'foo3']
expected = {'f': ['foo', 'foo3'],
'b': ['bar', 'baz', 'baz2'],
'q': ['qux']}
grouped = com.groupby(values, lambda x: x[0])
for k, v in grouped:
assert v == expected[k]
def test_random_state():
import numpy.random as npr
# Check with seed
state = com._random_state(5)
tm.assert_equal(state.uniform(), npr.RandomState(5).uniform())
# Check with random state object
state2 = npr.RandomState(10)
tm.assert_equal(
com._random_state(state2).uniform(), npr.RandomState(10).uniform())
# check with no arg random state
assert com._random_state() is np.random
# Error for floats or strings
with tm.assertRaises(ValueError):
com._random_state('test')
with tm.assertRaises(ValueError):
com._random_state(5.5)
def test_maybe_match_name():
matched = com._maybe_match_name(
Series([1], name='x'), Series(
[2], name='x'))
assert (matched == 'x')
matched = com._maybe_match_name(
Series([1], name='x'), Series(
[2], name='y'))
assert (matched is None)
matched = com._maybe_match_name(Series([1]), Series([2], name='x'))
assert (matched is None)
matched = com._maybe_match_name(Series([1], name='x'), Series([2]))
assert (matched is None)
matched = com._maybe_match_name(Series([1], name='x'), [2])
assert (matched == 'x')
matched = com._maybe_match_name([1], Series([2], name='y'))
assert (matched == 'y')
def test_dict_compat():
data_datetime64 = {np.datetime64('1990-03-15'): 1,
np.datetime64('2015-03-15'): 2}
data_unchanged = {1: 2, 3: 4, 5: 6}
expected = {Timestamp('1990-3-15'): 1, Timestamp('2015-03-15'): 2}
assert (com._dict_compat(data_datetime64) == expected)
assert (com._dict_compat(expected) == expected)
assert (com._dict_compat(data_unchanged) == data_unchanged)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
claesenm/HPOlib | HPOlib/Plotting/plotTraceWithStd_perTime.py | 4 | 9873 | #!/usr/bin/env python
##
# wrapping: A program making it easy to use hyperparameter
# optimization software.
# Copyright (C) 2013 Katharina Eggensperger and Matthias Feurer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from argparse import ArgumentParser
import cPickle
import itertools
import sys
from matplotlib.pyplot import tight_layout, figure, subplots_adjust, subplot, savefig, show
import matplotlib.gridspec
import numpy as np
from HPOlib.Plotting import plot_util
__authors__ = ["Katharina Eggensperger", "Matthias Feurer"]
__contact__ = "automl.org"
def plot_optimization_trace(trial_list, name_list, times_list, optimum=0, title="",
log=True, save="", y_max=0, y_min=0, scale_std=1):
markers = plot_util.get_plot_markers()
colors = plot_util.get_plot_colors()
linestyles = itertools.cycle(['-'])
size = 1
ratio = 5
gs = matplotlib.gridspec.GridSpec(ratio, 1)
fig = figure(1, dpi=100)
fig.suptitle(title, fontsize=16)
ax1 = subplot(gs[0:ratio, :])
ax1.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
min_val = sys.maxint
max_val = -sys.maxint
max_trials = 0
trial_list_means = list()
trial_list_std = list()
# One trialList represents all runs from one optimizer
for i in range(len(trial_list)):
if log:
trial_list_means.append(np.log10(np.mean(np.array(trial_list[i]), axis=0)))
else:
trial_list_means.append(np.mean(np.array(trial_list[i]), axis=0))
trial_list_std.append(np.std(np.array(trial_list[i]), axis=0)*scale_std)
times_list[i] = np.array(times_list[i])
fig.suptitle(title, fontsize=16)
# Plot the average error and std
for i in range(len(trial_list_means)):
x = times_list[i]
y = trial_list_means[i] - optimum
# m = markers.next()
c = colors.next()
l = linestyles.next()
std_up = y + trial_list_std[i]
std_down = y - trial_list_std[i]
ax1.fill_between(x, std_down, std_up,
facecolor=c, alpha=0.3, edgecolor=c)
ax1.plot(x, y, color=c, linewidth=size*2,
label=name_list[i][0] + "(" + str(len(trial_list[i])) + ")",
linestyle=l, marker="")
if min(std_down) < min_val:
min_val = min(std_down)
if max(y + std_up) > max_val:
max_val = max(std_up)
if max(times_list[i]) > max_trials:
max_trials = max(times_list[i])
# Maybe plot on logscale
if scale_std != 1:
ylabel = ", %s * std" % scale_std
else:
ylabel = ""
if log:
ax1.set_ylabel("log10(Minfunction value)" + ylabel)
else:
ax1.set_ylabel("Minfunction value" + ylabel)
# Descript and label the stuff
leg = ax1.legend(loc='best', fancybox=True)
leg.get_frame().set_alpha(0.5)
ax1.set_xlabel("Duration [sec] ")
if y_max == y_min:
# Set axes limit
ax1.set_ylim([min_val-0.1*abs((max_val-min_val)), max_val+0.1*abs((max_val-min_val))])
else:
ax1.set_ylim([y_min, y_max])
ax1.set_xlim([0, max_trials])
tight_layout()
subplots_adjust(top=0.85)
if save != "":
savefig(save, dpi=100, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches="tight", pad_inches=0.1)
else:
show()
def fill_trajectories(trace_list, times_list):
""" Each trajectory need to has the exact same number of entries and timestamps"""
# We need to define the max value = what is measured before the first evaluation
max_value = np.max([np.max(ls) for ls in trace_list])
number_exp = len(trace_list)
new_trajectories = list()
new_times = list()
for i in range(number_exp):
new_trajectories.append(list())
new_times.append(list())
# noinspection PyUnusedLocal
counter = [1 for i in range(number_exp)]
finish = False
# We need to insert the max values in the beginning and the min values in the end
for i in range(number_exp):
trace_list[i].insert(0, max_value)
trace_list[i].append(np.min(trace_list[i]))
times_list[i].insert(0, 0)
times_list[i].append(sys.maxint)
# Add all possible time values
while not finish:
min_idx = np.argmin([times_list[idx][counter[idx]] for idx in range(number_exp)])
counter[min_idx] += 1
for idx in range(number_exp):
new_times[idx].append(times_list[min_idx][counter[min_idx] - 1])
new_trajectories[idx].append(trace_list[idx][counter[idx] - 1])
# Check if we're finished
for i in range(number_exp):
finish = True
if counter[i] < len(trace_list[i]) - 1:
finish = False
break
times = new_times
trajectories = new_trajectories
tmp_times = list()
# Sanitize lists and delete double entries
for i in range(number_exp):
tmp_times = list()
tmp_traj = list()
for t in range(len(times[i]) - 1):
if times[i][t+1] != times[i][t] and not np.isnan(times[i][t]):
tmp_times.append(times[i][t])
tmp_traj.append(trajectories[i][t])
tmp_times.append(times[i][-1])
tmp_traj.append(trajectories[i][-1])
times[i] = tmp_times
trajectories[i] = tmp_traj
# We need only one list for all times
times = tmp_times
return trajectories, times
def main(pkl_list, name_list, autofill, optimum=0, save="", title="",
log=False, y_min=0, y_max=0, scale_std=1, cut=sys.maxint):
trial_list = list()
times_list = list()
for i in range(len(pkl_list)):
tmp_trial_list = list()
tmp_times_list = list()
for pkl in pkl_list[i]:
fh = open(pkl, "r")
trials = cPickle.load(fh)
fh.close()
trace = plot_util.extract_trajectory(trials)
times = plot_util.extract_runtime_timestamps(trials)
tmp_times_list.append(times)
tmp_trial_list.append(trace)
# We feed this function with two lists of lists and get one list of lists and one list
tmp_trial_list, tmp_times_list = fill_trajectories(tmp_trial_list, tmp_times_list)
trial_list.append(tmp_trial_list)
times_list.append(tmp_times_list)
for i in range(len(trial_list)):
max_len = max([len(ls) for ls in trial_list[i]])
for t in range(len(trial_list[i])):
if len(trial_list[i][t]) < max_len and autofill:
diff = max_len - len(trial_list[i][t])
# noinspection PyUnusedLocal
trial_list[i][t] = np.append(trial_list[i][t], [trial_list[i][t][-1] for x in range(diff)])
elif len(trial_list[i][t]) < max_len and not autofill:
raise ValueError("(%s != %s), Traces do not have the same length, please use -a" %
(str(max_len), str(len(trial_list[i][t]))))
plot_optimization_trace(trial_list, name_list, times_list, optimum, title=title, log=log,
save=save, y_min=y_min, y_max=y_max, scale_std=scale_std)
if save != "":
sys.stdout.write("Saved plot to " + save + "\n")
else:
sys.stdout.write("..Done\n")
if __name__ == "__main__":
prog = "python plotTraceWithStd.py WhatIsThis <oneOrMorePickles> [WhatIsThis <oneOrMorePickles>]"
description = "Plot a Trace with std for multiple experiments"
parser = ArgumentParser(description=description, prog=prog)
# Options for specific benchmarks
parser.add_argument("-o", "--optimum", type=float, dest="optimum",
default=0, help="If not set, the optimum is supposed to be zero")
# Options which are available only for this plot
parser.add_argument("-a", "--autofill", action="store_true", dest="autofill",
default=False, help="Fill trace automatically")
parser.add_argument("-c", "--scale", type=float, dest="scale",
default=1, help="Multiply std to get a nicer plot")
# General Options
parser.add_argument("-l", "--log", action="store_true", dest="log",
default=False, help="Plot on log scale")
parser.add_argument("--max", dest="max", type=float,
default=0, help="Maximum of the plot")
parser.add_argument("--min", dest="min", type=float,
default=0, help="Minimum of the plot")
parser.add_argument("-s", "--save", dest="save",
default="", help="Where to save plot instead of showing it?")
parser.add_argument("-t", "--title", dest="title",
default="", help="Optional supertitle for plot")
args, unknown = parser.parse_known_args()
sys.stdout.write("\nFound " + str(len(unknown)) + " arguments\n")
pkl_list_main, name_list_main = plot_util.get_pkl_and_name_list(unknown)
main(pkl_list_main, name_list_main, autofill=args.autofill, optimum=args.optimum, save=args.save,
title=args.title, log=args.log, y_min=args.min, y_max=args.max, scale_std=args.scale)
| gpl-3.0 |
kdebrab/pandas | pandas/tests/sparse/series/test_indexing.py | 4 | 3127 | import pytest
import numpy as np
from pandas import SparseSeries, Series
from pandas.util import testing as tm
pytestmark = pytest.mark.skip("Wrong SparseBlock initialization (GH 17386)")
@pytest.mark.parametrize('data', [
[1, 1, 2, 2, 3, 3, 4, 4, 0, 0],
[1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, np.nan, np.nan],
[
1.0, 1.0 + 1.0j,
2.0 + 2.0j, 2.0,
3.0, 3.0 + 3.0j,
4.0 + 4.0j, 4.0,
np.nan, np.nan
]
])
@pytest.mark.xfail(reason='Wrong SparseBlock initialization '
'(GH 17386)')
def test_where_with_numeric_data(data):
# GH 17386
lower_bound = 1.5
sparse = SparseSeries(data)
result = sparse.where(sparse > lower_bound)
dense = Series(data)
dense_expected = dense.where(dense > lower_bound)
sparse_expected = SparseSeries(dense_expected)
tm.assert_series_equal(result, dense_expected)
tm.assert_sp_series_equal(result, sparse_expected)
@pytest.mark.parametrize('data', [
[1, 1, 2, 2, 3, 3, 4, 4, 0, 0],
[1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, np.nan, np.nan],
[
1.0, 1.0 + 1.0j,
2.0 + 2.0j, 2.0,
3.0, 3.0 + 3.0j,
4.0 + 4.0j, 4.0,
np.nan, np.nan
]
])
@pytest.mark.parametrize('other', [
True,
-100,
0.1,
100.0 + 100.0j
])
@pytest.mark.skip(reason='Wrong SparseBlock initialization '
'(Segfault) '
'(GH 17386)')
def test_where_with_numeric_data_and_other(data, other):
# GH 17386
lower_bound = 1.5
sparse = SparseSeries(data)
result = sparse.where(sparse > lower_bound, other)
dense = Series(data)
dense_expected = dense.where(dense > lower_bound, other)
sparse_expected = SparseSeries(dense_expected, fill_value=other)
tm.assert_series_equal(result, dense_expected)
tm.assert_sp_series_equal(result, sparse_expected)
@pytest.mark.xfail(reason='Wrong SparseBlock initialization '
'(GH 17386)')
def test_where_with_bool_data():
# GH 17386
data = [False, False, True, True, False, False]
cond = True
sparse = SparseSeries(data)
result = sparse.where(sparse == cond)
dense = Series(data)
dense_expected = dense.where(dense == cond)
sparse_expected = SparseSeries(dense_expected)
tm.assert_series_equal(result, dense_expected)
tm.assert_sp_series_equal(result, sparse_expected)
@pytest.mark.parametrize('other', [
True,
0,
0.1,
100.0 + 100.0j
])
@pytest.mark.skip(reason='Wrong SparseBlock initialization '
'(Segfault) '
'(GH 17386)')
def test_where_with_bool_data_and_other(other):
# GH 17386
data = [False, False, True, True, False, False]
cond = True
sparse = SparseSeries(data)
result = sparse.where(sparse == cond, other)
dense = Series(data)
dense_expected = dense.where(dense == cond, other)
sparse_expected = SparseSeries(dense_expected, fill_value=other)
tm.assert_series_equal(result, dense_expected)
tm.assert_sp_series_equal(result, sparse_expected)
| bsd-3-clause |
Eric89GXL/scikit-learn | examples/applications/plot_hmm_stock_analysis.py | 12 | 2783 | """
==========================
Gaussian HMM of stock data
==========================
This script shows how to use Gaussian HMM.
It uses stock price data, which can be obtained from yahoo finance.
For more information on how to get stock prices with matplotlib, please refer
to date_demo1.py of matplotlib.
"""
from __future__ import print_function
import datetime
import numpy as np
import pylab as pl
from matplotlib.finance import quotes_historical_yahoo
from matplotlib.dates import YearLocator, MonthLocator, DateFormatter
from sklearn.hmm import GaussianHMM
print(__doc__)
###############################################################################
# Downloading the data
date1 = datetime.date(1995, 1, 1) # start date
date2 = datetime.date(2012, 1, 6) # end date
# get quotes from yahoo finance
quotes = quotes_historical_yahoo("INTC", date1, date2)
if len(quotes) == 0:
raise SystemExit
# unpack quotes
dates = np.array([q[0] for q in quotes], dtype=int)
close_v = np.array([q[2] for q in quotes])
volume = np.array([q[5] for q in quotes])[1:]
# take diff of close value
# this makes len(diff) = len(close_t) - 1
# therefore, others quantity also need to be shifted
diff = close_v[1:] - close_v[:-1]
dates = dates[1:]
close_v = close_v[1:]
# pack diff and volume for training
X = np.column_stack([diff, volume])
###############################################################################
# Run Gaussian HMM
print("fitting to HMM and decoding ...", end='')
n_components = 5
# make an HMM instance and execute fit
model = GaussianHMM(n_components, covariance_type="diag", n_iter=1000)
model.fit([X])
# predict the optimal sequence of internal hidden state
hidden_states = model.predict(X)
print("done\n")
###############################################################################
# print trained parameters and plot
print("Transition matrix")
print(model.transmat_)
print()
print("means and vars of each hidden state")
for i in range(n_components):
print("%dth hidden state" % i)
print("mean = ", model.means_[i])
print("var = ", np.diag(model.covars_[i]))
print()
years = YearLocator() # every year
months = MonthLocator() # every month
yearsFmt = DateFormatter('%Y')
fig = pl.figure()
ax = fig.add_subplot(111)
for i in range(n_components):
# use fancy indexing to plot data in each state
idx = (hidden_states == i)
ax.plot_date(dates[idx], close_v[idx], 'o', label="%dth hidden state" % i)
ax.legend()
# format the ticks
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(yearsFmt)
ax.xaxis.set_minor_locator(months)
ax.autoscale_view()
# format the coords message box
ax.fmt_xdata = DateFormatter('%Y-%m-%d')
ax.fmt_ydata = lambda x: '$%1.2f' % x
ax.grid(True)
fig.autofmt_xdate()
pl.show()
| bsd-3-clause |
stonebig/winpython_afterdoc | docs/minesweeper.py | 2 | 7264 | """
Matplotlib Minesweeper
----------------------
A simple Minesweeper implementation in matplotlib.
Author: Jake Vanderplas <vanderplas@astro.washington.edu>, Dec. 2012
License: BSD
"""
import numpy as np
from itertools import product
from scipy.signal import convolve2d
import matplotlib.pyplot as plt
from matplotlib.patches import RegularPolygon
class MineSweeper(object):
covered_color = '#DDDDDD'
uncovered_color = '#AAAAAA'
edge_color = '#888888'
count_colors = ['none', 'blue', 'green', 'red', 'darkblue',
'darkred', 'darkgreen', 'black', 'black']
flag_vertices = np.array([[0.25, 0.2], [0.25, 0.8],
[0.75, 0.65], [0.25, 0.5]])
@classmethod
def beginner(cls):
return cls(8, 8, 10)
@classmethod
def intermediate(cls):
return cls(16, 16, 40)
@classmethod
def expert(cls):
return cls(30, 16, 99)
def __init__(self, width, height, nmines):
self.width, self.height, self.nmines = width, height, nmines
# Create the figure and axes
self.fig = plt.figure(figsize=((width + 2) / 3., (height + 2) / 3.))
self.ax = self.fig.add_axes((0.05, 0.05, 0.9, 0.9),
aspect='equal', frameon=False,
xlim=(-0.05, width + 0.05),
ylim=(-0.05, height + 0.05))
for axis in (self.ax.xaxis, self.ax.yaxis):
axis.set_major_formatter(plt.NullFormatter())
axis.set_major_locator(plt.NullLocator())
# Create the grid of squares
self.squares = np.array([[RegularPolygon((i + 0.5, j + 0.5),
numVertices=4,
radius=0.5 * np.sqrt(2),
orientation=np.pi / 4,
ec=self.edge_color,
fc=self.covered_color)
for j in range(height)]
for i in range(width)])
[self.ax.add_patch(sq) for sq in self.squares.flat]
# define internal state variables
self.mines = None
self.counts = None
self.clicked = np.zeros((self.width, self.height), dtype=bool)
self.flags = np.zeros((self.width, self.height), dtype=object)
self.game_over = False
# Create event hook for mouse clicks
self.fig.canvas.mpl_connect('button_press_event', self._button_press)
def _draw_mine(self, i, j):
self.ax.add_patch(plt.Circle((i + 0.5, j + 0.5), radius=0.25,
ec='black', fc='black'))
def _draw_red_X(self, i, j):
self.ax.text(i + 0.5, j + 0.5, 'X', color='r', fontsize=20,
ha='center', va='center')
def _toggle_mine_flag(self, i, j):
if self.clicked[i, j]:
pass
elif self.flags[i, j]:
self.ax.patches.remove(self.flags[i, j])
self.flags[i, j] = None
else:
self.flags[i, j] = plt.Polygon(self.flag_vertices + [i, j],
fc='red', ec='black', lw=2)
self.ax.add_patch(self.flags[i, j])
def _reveal_unmarked_mines(self):
for (i, j) in zip(*np.where(self.mines & ~self.flags.astype(bool))):
self._draw_mine(i, j)
def _cross_out_wrong_flags(self):
for (i, j) in zip(*np.where(~self.mines & self.flags.astype(bool))):
self._draw_red_X(i, j)
def _mark_remaining_mines(self):
for (i, j) in zip(*np.where(self.mines & ~self.flags.astype(bool))):
self._toggle_mine_flag(i, j)
def _setup_mines(self, i, j):
# randomly place mines on a grid, but not on space (i, j)
idx = np.concatenate([np.arange(i * self.height + j),
np.arange(i * self.height + j + 1,
self.width * self.height)])
np.random.shuffle(idx)
self.mines = np.zeros((self.width, self.height), dtype=bool)
self.mines.flat[idx[:self.nmines]] = 1
# count the number of mines bordering each square
self.counts = convolve2d(self.mines.astype(complex), np.ones((3, 3)),
mode='same').real.astype(int)
def _click_square(self, i, j):
# if this is the first click, then set up the mines
if self.mines is None:
self._setup_mines(i, j)
# if there is a flag or square is already clicked, do nothing
if self.flags[i, j] or self.clicked[i, j]:
return
self.clicked[i, j] = True
# hit a mine: game over
if self.mines[i, j]:
self.game_over = True
self._reveal_unmarked_mines()
self._draw_red_X(i, j)
self._cross_out_wrong_flags()
# square with no surrounding mines: clear out all adjacent squares
elif self.counts[i, j] == 0:
self.squares[i, j].set_facecolor(self.uncovered_color)
for ii in range(max(0, i - 1), min(self.width, i + 2)):
for jj in range(max(0, j - 1), min(self.height, j + 2)):
self._click_square(ii, jj)
# hit an empty square: reveal the number
else:
self.squares[i, j].set_facecolor(self.uncovered_color)
self.ax.text(i + 0.5, j + 0.5, str(self.counts[i, j]),
color=self.count_colors[self.counts[i, j]],
ha='center', va='center', fontsize=18,
fontweight='bold')
# if all remaining squares are mines, mark them and end game
if self.mines.sum() == (~self.clicked).sum():
self.game_over = True
self._mark_remaining_mines()
def _button_press(self, event):
if self.game_over or (event.xdata is None) or (event.ydata is None):
return
i, j = map(int, (event.xdata, event.ydata))
if (i < 0 or j < 0 or i >= self.width or j >= self.height):
return
# left mouse button: reveal square. If the square is already clicked
# and the correct # of mines are marked, then clear surroundig squares
if event.button == 1:
if (self.clicked[i, j]):
flag_count = self.flags[max(0, i - 1):i + 2,
max(0, j - 1):j + 2].astype(bool).sum()
if self.counts[i, j] == flag_count:
for ii, jj in product(range(max(0, i - 1),
min(self.width, i + 2)),
range(max(0, j - 1),
min(self.height, j + 2))):
self._click_square(ii, jj)
else:
self._click_square(i, j)
# right mouse button: mark/unmark flag
elif (event.button == 3) and (not self.clicked[i, j]):
self._toggle_mine_flag(i, j)
self.fig.canvas.draw()
if __name__ == '__main__':
ms = MineSweeper.intermediate()
plt.show()
| mit |
rohanp/scikit-learn | examples/gaussian_process/plot_gpc_iris.py | 81 | 2231 | """
=====================================================
Gaussian process classification (GPC) on iris dataset
=====================================================
This example illustrates the predicted probability of GPC for an isotropic
and anisotropic RBF kernel on a two-dimensional version for the iris-dataset.
The anisotropic RBF kernel obtains slightly higher log-marginal-likelihood by
assigning different length-scales to the two feature dimensions.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = np.array(iris.target, dtype=int)
h = .02 # step size in the mesh
kernel = 1.0 * RBF([1.0])
gpc_rbf_isotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y)
kernel = 1.0 * RBF([1.0, 1.0])
gpc_rbf_anisotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
titles = ["Isotropic RBF", "Anisotropic RBF"]
plt.figure(figsize=(10, 5))
for i, clf in enumerate((gpc_rbf_isotropic, gpc_rbf_anisotropic)):
# Plot the predicted probabilities. For that, we will assign a color to
# each point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 2, i + 1)
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape((xx.shape[0], xx.shape[1], 3))
plt.imshow(Z, extent=(x_min, x_max, y_min, y_max), origin="lower")
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=np.array(["r", "g", "b"])[y])
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title("%s, LML: %.3f" %
(titles[i], clf.log_marginal_likelihood(clf.kernel_.theta)))
plt.tight_layout()
plt.show()
| bsd-3-clause |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/sklearn/tests/test_random_projection.py | 1 | 14003 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import gaussian_random_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import GaussianRandomProjection
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils import DataDimensionalityWarning
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
0.5 * np.ones((10, 10)))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
def test_basic_property_of_random_matrix():
# Check basic properties of random matrix generation
for random_matrix in all_random_matrix:
check_input_size_random_matrix(random_matrix)
check_size_generated(random_matrix)
check_zero_mean_and_unit_norm(random_matrix)
for random_matrix in all_sparse_random_matrix:
check_input_with_sparse_random_matrix(random_matrix)
random_matrix_dense = \
lambda n_components, n_features, random_state: random_matrix(
n_components, n_features, random_state=random_state,
density=1.0)
check_zero_mean_and_unit_norm(random_matrix_dense)
def test_gaussian_random_matrix():
# Check some statical properties of Gaussian random matrix
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
# Check some statical properties of sparse random matrix
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [0, 1, 2])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
| mit |
asteca/ASteCA | packages/out/make_A2_plot.py | 1 | 2412 |
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from os.path import join
from . import mp_cent_dens
from . import add_version_plot
from . import prep_plots
from . prep_plots import grid_x, grid_y, figsize_x, figsize_y
def main(npd, cld_i, pd, clp):
"""
Make A2 block plots.
"""
fig = plt.figure(figsize=(figsize_x, figsize_y))
gs = gridspec.GridSpec(grid_y, grid_x)
add_version_plot.main()
# Obtain plotting parameters and data.
x_min, x_max, y_min, y_max = prep_plots.frame_max_min(
cld_i['x'], cld_i['y'])
asp_ratio = prep_plots.aspect_ratio(x_min, x_max, y_min, y_max)
coord, x_name, y_name = "deg", "ra", "dec"
st_sizes_arr = prep_plots.star_size(cld_i['mags'][0])
_, y_ax = prep_plots.ax_names(pd['colors'][0], pd['filters'][0], 'mag')
# Structure plots.
arglist = [
# pl_full_frame: x,y finding chart of full frame.
[gs, fig, pd['project'], clp['x_offset'], clp['y_offset'], x_name,
y_name, coord, x_min, x_max, y_min, y_max, asp_ratio, clp['kde_cent'],
cld_i['x'], cld_i['y'], st_sizes_arr, clp['clust_rad']],
# pl_densmap: 2D Gaussian convolved histogram.
[gs, fig, asp_ratio, x_name, y_name, coord, clp['bw_list'],
clp['kde_cent'], clp['frame_kde_cent'], clp['fr_dens'],
clp['clust_rad']],
# pl_knn_dens
[gs, fig, pd['plot_style'], asp_ratio, x_min, x_max, y_min, y_max,
x_name, y_name, coord, clp['NN_dd'], clp['xy_filtered'],
clp['fr_dens'], clp['NN_dist'], pd['project'], clp['x_offset'],
clp['y_offset'], clp['kde_cent'], clp['clust_rad']],
# pl_field_dens
[gs, pd['plot_style'], coord, pd['fdens_method'], clp['xy_cent_dist'],
clp['fr_dens'], clp['fdens_min_d'], clp['fdens_lst'],
clp['fdens_std_lst'], clp['field_dens_d'], clp['field_dens'],
clp['field_dens_std']],
# pl_centdist_vs_mag
[gs, fig, pd['plot_style'], y_ax, coord, cld_i['x'], cld_i['y'],
cld_i['mags'][0], clp['kde_cent'], clp['clust_rad'],
clp['integ_dists'], clp['integ_mags']]
]
for n, args in enumerate(arglist):
mp_cent_dens.plot(n, *args)
fig.tight_layout()
fname = join(npd['output_subdir'], npd['clust_name'] + '_A2' + npd['ext'])
plt.savefig(fname)
# Close to release memory.
plt.clf()
plt.close("all")
| gpl-3.0 |
JohnGBaker/ptmcmc | python/corner_with_covar.py | 1 | 27157 | # -*- coding: utf-8 -*-
#This code is adaped from
# https://github.com/dfm/corner.py
# git hash 5c2cd63 on May 25
# Modifications by John Baker NASA-GSFC (2016-18)
#Copyright (c) 2013-2016 Daniel Foreman-Mackey
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
#ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
#ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#The views and conclusions contained in the software and documentation are those
#of the authors and should not be interpreted as representing official policies,
#either expressed or implied, of the FreeBSD Project.
from __future__ import print_function, absolute_import
import logging
import math
import numpy as np
import matplotlib.pyplot as pl
from matplotlib.ticker import MaxNLocator
from matplotlib.colors import LinearSegmentedColormap, colorConverter
from matplotlib.ticker import ScalarFormatter
from matplotlib.patches import Ellipse
try:
from scipy.ndimage import gaussian_filter
except ImportError:
gaussian_filter = None
__all__ = ["corner", "hist2d", "quantile"]
def corner(xs, bins=20, range=None, weights=None, cov=None, color="k",
smooth=None, smooth1d=None,
labels=None, label_kwargs=None,
show_titles=False, title_fmt=".2f", title_kwargs=None,
truths=None, truth_color="#4682b4",
scale_hist=False, quantiles=None, verbose=False, fig=None,
max_n_ticks=5, top_ticks=False, use_math_text=False,
hist_kwargs=None, **hist2d_kwargs):
"""
Make a *sick* corner plot showing the projections of a data set in a
multi-dimensional space. kwargs are passed to hist2d() or used for
`matplotlib` styling.
Parameters
----------
xs : array_like[nsamples, ndim]
The samples. This should be a 1- or 2-dimensional array. For a 1-D
array this results in a simple histogram. For a 2-D array, the zeroth
axis is the list of samples and the next axis are the dimensions of
the space.
bins : int or array_like[ndim,]
The number of bins to use in histograms, either as a fixed value for
all dimensions, or as a list of integers for each dimension.
weights : array_like[nsamples,]
The weight of each sample. If `None` (default), samples are given
equal weight.
color : str
A ``matplotlib`` style color for all histograms.
smooth, smooth1d : float
The standard deviation for Gaussian kernel passed to
`scipy.ndimage.gaussian_filter` to smooth the 2-D and 1-D histograms
respectively. If `None` (default), no smoothing is applied.
labels : iterable (ndim,)
A list of names for the dimensions. If a ``xs`` is a
``pandas.DataFrame``, labels will default to column names.
label_kwargs : dict
Any extra keyword arguments to send to the `set_xlabel` and
`set_ylabel` methods.
show_titles : bool
Displays a title above each 1-D histogram showing the 0.5 quantile
with the upper and lower errors supplied by the quantiles argument.
title_fmt : string
The format string for the quantiles given in titles. If you explicitly
set ``show_titles=True`` and ``title_fmt=None``, the labels will be
shown as the titles. (default: ``.2f``)
title_kwargs : dict
Any extra keyword arguments to send to the `set_title` command.
range : iterable (ndim,)
A list where each element is either a length 2 tuple containing
lower and upper bounds or a float in range (0., 1.)
giving the fraction of samples to include in bounds, e.g.,
[(0.,10.), (1.,5), 0.999, etc.].
If a fraction, the bounds are chosen to be equal-tailed.
truths : iterable (ndim,)
A list of reference values to indicate on the plots. Individual
values can be omitted by using ``None``.
truth_color : str
A ``matplotlib`` style color for the ``truths`` makers.
scale_hist : bool
Should the 1-D histograms be scaled in such a way that the zero line
is visible?
quantiles : iterable
A list of fractional quantiles to show on the 1-D histograms as
vertical dashed lines.
verbose : bool
If true, print the values of the computed quantiles.
plot_contours : bool
Draw contours for dense regions of the plot.
use_math_text : bool
If true, then axis tick labels for very large or small exponents will
be displayed as powers of 10 rather than using `e`.
max_n_ticks: int
Maximum number of ticks to try to use
top_ticks : bool
If true, label the top ticks of each axis
fig : matplotlib.Figure
Overplot onto the provided figure object.
hist_kwargs : dict
Any extra keyword arguments to send to the 1-D histogram plots.
**hist2d_kwargs
Any remaining keyword arguments are sent to `corner.hist2d` to generate
the 2-D histogram plots.
"""
if quantiles is None:
quantiles = []
if title_kwargs is None:
title_kwargs = dict()
if label_kwargs is None:
label_kwargs = dict()
# Try filling in labels from pandas.DataFrame columns.
if labels is None:
try:
labels = xs.columns
except AttributeError:
pass
# Deal with 1D sample lists and cov-only calls.
covcolor='r'
if xs is not None:
xs = np.atleast_1d(xs)
if len(xs.shape) == 1:
xs = np.atleast_2d(xs)
else:
assert len(xs.shape) == 2, "The input sample array must be 1- or 2-D."
xs = xs.T
assert xs.shape[0] <= xs.shape[1], "I don't believe that you want more " \
"dimensions than samples!"
else:
if cov is not None:
xs=np.array([None for x in cov])
covcolor=color
K = len(xs)
# Parse the weight array.
if weights is not None:
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("Weights must be 1-D")
if xs.shape[1] != weights.shape[0]:
raise ValueError("Lengths of weights must match number of samples")
# Parse the parameter ranges.
if range is None:
if "extents" in hist2d_kwargs:
logging.warn("Deprecated keyword argument 'extents'. "
"Use 'range' instead.")
range = hist2d_kwargs.pop("extents")
else:
if xs[0] is not None:
range = [[x.min(), x.max()] for x in xs]
# Check for parameters that never change.
m = np.array([e[0] == e[1] for e in range], dtype=bool)
if np.any(m):
raise ValueError(("It looks like the parameter(s) in "
"column(s) {0} have no dynamic range. "
"Please provide a `range` argument.")
.format(", ".join(map(
"{0}".format, np.arange(len(m))[m]))))
else: #infer range from covar
print("Inferring range from cov (beta). Perhaps provide a 'range' argument.")
range = [ [truths[i]-3*cov[i,i],truths[i]+3*cov[i,i]] for i in range(K)]
else:
# If any of the extents are percentiles, convert them to ranges.
# Also make sure it's a normal list.
range = list(range)
for i, _ in enumerate(range):
try:
emin, emax = range[i]
except TypeError:
q = [0.5 - 0.5*range[i], 0.5 + 0.5*range[i]]
range[i] = quantile(xs[i], q, weights=weights)
if len(range) != xs.shape[0]:
raise ValueError("Dimension mismatch between samples and range")
# Parse the bin specifications.
try:
bins = [int(bins) for _ in range]
except TypeError:
if len(bins) != len(range):
raise ValueError("Dimension mismatch between bins and range")
# Some magic numbers for pretty axis layout.
factor = 2.0 # size of one side of one panel
lbdim = 0.5 * factor # size of left/bottom margin
trdim = 0.2 * factor # size of top/right margin
whspace = 0.05 # w/hspace size
plotdim = factor * K + factor * (K - 1.) * whspace
dim = lbdim + plotdim + trdim
# Create a new figure if one wasn't provided.
if fig is None:
fig, axes = pl.subplots(K, K, figsize=(dim, dim))
else:
try:
axes = np.array(fig.axes).reshape((K, K))
except:
raise ValueError("Provided figure has {0} axes, but data has "
"dimensions K={1}".format(len(fig.axes), K))
#idea is to pass in covariance, otherwise concoct something from the 1-sigma range.
if(cov==[]):
print("concocting covar elements from 1-sigma ranges")
cov=np.zeros((K,K))
for k in np.arange(K):
q_16, q_50, q_84 = quantile(xs[k], [0.16, 0.5, 0.84],weights=weights)
deltax=(q_84-q_16)/2.0
cov[k,k]=deltax**2
#print("cov=",cov)
# Format the figure.
lb = lbdim / dim
tr = (lbdim + plotdim) / dim
fig.subplots_adjust(left=lb, bottom=lb, right=tr, top=tr,
wspace=whspace, hspace=whspace)
# Set up the default histogram keywords.
if hist_kwargs is None:
hist_kwargs = dict()
hist_kwargs["color"] = hist_kwargs.get("color", color)
if smooth1d is None:
hist_kwargs["histtype"] = hist_kwargs.get("histtype", "step")
for i, x in enumerate(xs):
# Deal with masked arrays.
if hasattr(x, "compressed"):
x = x.compressed()
if np.shape(xs)[0] == 1:
ax = axes
else:
ax = axes[i, i]
if x is not None:
#This is to normalize the histogram so that different data can be compared
if(weights is None):
hist1d_wts=[1.0/len(x) for w in x]
else:
hist1d_wts=[w*1.0/len(x) for w in weights]
# Plot the histograms.
if smooth1d is None:
n, _, _ = ax.hist(x, bins=bins[i], weights=hist1d_wts,
range=np.sort(range[i]), **hist_kwargs)
else:
if gaussian_filter is None:
raise ImportError("Please install scipy for smoothing")
n, b = np.histogram(x, bins=bins[i], weights=hist1d_wts,
range=np.sort(range[i]))
n = gaussian_filter(n, smooth1d)
x0 = np.array(list(zip(b[:-1], b[1:]))).flatten()
y0 = np.array(list(zip(n, n))).flatten()
ax.plot(x0, y0, **hist_kwargs)
if truths is not None and truths[i] is not None:
ax.axvline(truths[i], color=truth_color)
# Plot quantiles if wanted.
if len(quantiles) > 0:
qvalues = quantile(x, quantiles, weights=weights)
for q in qvalues:
ax.axvline(q, ls="dashed", color=color)
if verbose:
print("Quantiles:")
print([item for item in zip(quantiles, qvalues)])
if show_titles:
title = None
if title_fmt is not None:
# Compute the quantiles for the title. This might redo
# unneeded computation but who cares.
q_16, q_50, q_84 = quantile(x, [0.16, 0.5, 0.84],
weights=weights)
q_m, q_p = q_50-q_16, q_84-q_50
# Format the quantile display.
fmt = "{{0:{0}}}".format(title_fmt).format
title = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
title = title.format(fmt(q_50), fmt(q_m), fmt(q_p))
# Add in the column name if it's given.
if labels is not None:
title = "{0} = {1}".format(labels[i], title)
elif labels is not None:
title = "{0}".format(labels[i])
if title is not None:
ax.set_title(title, **title_kwargs)
# Set up the axes.
ax.set_xlim(range[i])
if scale_hist:
maxn = np.max(n)
ax.set_ylim(-0.1 * maxn, 1.1 * maxn)
else:
ax.set_ylim(0, 1.1 * np.max(n))
ax.set_yticklabels([])
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
if i < K - 1:
if top_ticks:
ax.xaxis.set_ticks_position("top")
[l.set_rotation(45) for l in ax.get_xticklabels()]
else:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
if labels is not None:
ax.set_xlabel(labels[i], **label_kwargs)
ax.xaxis.set_label_coords(0.5, -0.3)
# use MathText for axes ticks
ax.xaxis.set_major_formatter(
ScalarFormatter(useMathText=use_math_text))
for j, y in enumerate(xs):
if np.shape(xs)[0] == 1:
ax = axes
else:
ax = axes[i, j]
if j > 0:
ax.set_yticklabels([])
else:
[l.set_rotation(45) for l in ax.get_yticklabels()]
if labels is not None:
ax.set_ylabel(labels[i], **label_kwargs)
ax.yaxis.set_label_coords(-0.3, 0.5)
# use MathText for axes ticks
ax.yaxis.set_major_formatter(
ScalarFormatter(useMathText=use_math_text))
if j > i:
ax.set_frame_on(False)
ax.set_xticks([])
ax.set_yticks([])
continue
elif j == i:
continue
if x is not None and y is not None:
# Deal with masked arrays.
if hasattr(y, "compressed"):
y = y.compressed()
hist2d(y, x, ax=ax, range=[range[j], range[i]], weights=weights,
color=color, smooth=smooth, bins=[bins[j], bins[i]],
**hist2d_kwargs)
#add covariance ellipses
if(cov is not None):
#center
cx=truths[j]#need to add checking for availability of truths?
cy=truths[i]
#ang=math.acos(cov[0,1]/math.sqrt(cov[0,0]*cov[1,1]))*180/math.pi
#print (j,i,labels[j],labels[i],"center=",cx,cy)
#add an error ellipse
N_thetas=60
dtheta=2.0*math.pi/(N_thetas-1)
thetas=np.arange(0,(2.0*math.pi+dtheta),dtheta)
#Cplus=(cov[i,i]+cov[j,j])/2.0
#Cminus=(-cov[i,i]+cov[j,j])/2.0
#print("cov[ii],cov[ij],cov[jj],Cplus,Cminus:",cov[i,i],cov[i,j],cov[j,j],Cplus,Cminus)
ang=-math.pi/4.
root=cov[i,j]/math.sqrt(cov[i,i]*cov[j,j])
if(root>1):root=1
if(root<-1):root=-1
acoeff=math.sqrt(1-root)
bcoeff=math.sqrt(1+root)
xcoeff=math.sqrt(cov[j,j])
ycoeff=math.sqrt(cov[i,i])
#print("a2,b2",acoeff*acoeff,bcoeff*bcoeff)
#print("a,b,ang, xcoeff,ycoeff, root=",acoeff,bcoeff,ang,xcoeff,ycoeff,root)
if "levels" in hist2d_kwargs:
levels= hist2d_kwargs["levels"]
else:
levels== 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5) ** 2)
for xlev in levels:
#in the next line we convert the credibility limit
#to a "sigma" limit for a 2-d normal
#this becomes a scale-factor for the error ellipse
#1-exp(x^2/(-2)=y
#-2*log(1-y)=x^2
lev_fac = math.sqrt( -2 * math.log( 1 - xlev ) )
#print ("scales for quantile level = ",xlev," -> ",lev_fac,": (",xcoeff*lev_fac,",",ycoeff*lev_fac,")")
elxs=[cx+lev_fac*xcoeff*(acoeff*math.cos(th)*math.cos(ang)-bcoeff*math.sin(th)*math.sin(ang)) for th in thetas]
elys=[cy+lev_fac*ycoeff*(acoeff*math.cos(th)*math.sin(ang)+bcoeff*math.sin(th)*math.cos(ang)) for th in thetas]
ax.plot(elxs,elys,color=covcolor)
ax.grid()
if truths is not None:
if truths[i] is not None and truths[j] is not None:
ax.plot(truths[j], truths[i], "s", color=truth_color)
if truths[j] is not None:
ax.axvline(truths[j], color=truth_color)
if truths[i] is not None:
ax.axhline(truths[i], color=truth_color)
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
ax.yaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
if i < K - 1:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
if labels is not None:
ax.set_xlabel(labels[j], **label_kwargs)
ax.xaxis.set_label_coords(0.5, -0.3)
# use MathText for axes ticks
ax.xaxis.set_major_formatter(
ScalarFormatter(useMathText=use_math_text))
if j > 0:
ax.set_yticklabels([])
else:
[l.set_rotation(45) for l in ax.get_yticklabels()]
if labels is not None:
ax.set_ylabel(labels[i], **label_kwargs)
ax.yaxis.set_label_coords(-0.3, 0.5)
# use MathText for axes ticks
ax.yaxis.set_major_formatter(
ScalarFormatter(useMathText=use_math_text))
return fig
def quantile(x, q, weights=None):
"""
Compute sample quantiles with support for weighted samples.
Note
----
When ``weights`` is ``None``, this method simply calls numpy's percentile
function with the values of ``q`` multiplied by 100.
Parameters
----------
x : array_like[nsamples,]
The samples.
q : array_like[nquantiles,]
The list of quantiles to compute. These should all be in the range
``[0, 1]``.
weights : Optional[array_like[nsamples,]]
An optional weight corresponding to each sample. These
Returns
-------
quantiles : array_like[nquantiles,]
The sample quantiles computed at ``q``.
Raises
------
ValueError
For invalid quantiles; ``q`` not in ``[0, 1]`` or dimension mismatch
between ``x`` and ``weights``.
"""
x = np.atleast_1d(x)
q = np.atleast_1d(q)
if np.any(q < 0.0) or np.any(q > 1.0):
raise ValueError("Quantiles must be between 0 and 1")
if weights is None:
return np.percentile(x, 100.0 * q)
else:
weights = np.atleast_1d(weights)
if len(x) != len(weights):
raise ValueError("Dimension mismatch: len(weights) != len(x)")
idx = np.argsort(x)
sw = weights[idx]
cdf = np.cumsum(sw)[:-1]
cdf /= cdf[-1]
cdf = np.append(0, cdf)
return np.interp(q, cdf, x[idx]).tolist()
def hist2d(x, y, bins=20, range=None, weights=None, levels=None, smooth=None,
ax=None, color=None, plot_datapoints=True, plot_density=True,
plot_contours=True, no_fill_contours=False, fill_contours=False,
contour_kwargs=None, contourf_kwargs=None, data_kwargs=None,
**kwargs):
"""
Plot a 2-D histogram of samples.
Parameters
----------
x : array_like[nsamples,]
The samples.
y : array_like[nsamples,]
The samples.
levels : array_like
The contour levels to draw.
ax : matplotlib.Axes
A axes instance on which to add the 2-D histogram.
plot_datapoints : bool
Draw the individual data points.
plot_density : bool
Draw the density colormap.
plot_contours : bool
Draw the contours.
no_fill_contours : bool
Add no filling at all to the contours (unlike setting
``fill_contours=False``, which still adds a white fill at the densest
points).
fill_contours : bool
Fill the contours.
contour_kwargs : dict
Any additional keyword arguments to pass to the `contour` method.
contourf_kwargs : dict
Any additional keyword arguments to pass to the `contourf` method.
data_kwargs : dict
Any additional keyword arguments to pass to the `plot` method when
adding the individual data points.
"""
if ax is None:
ax = pl.gca()
# Set the default range based on the data range if not provided.
if range is None:
if "extent" in kwargs:
logging.warn("Deprecated keyword argument 'extent'. "
"Use 'range' instead.")
range = kwargs["extent"]
else:
range = [[x.min(), x.max()], [y.min(), y.max()]]
# Set up the default plotting arguments.
if color is None:
color = "k"
# Choose the default "sigma" contour levels.
if levels is None:
levels = 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5) ** 2)
# This is the color map for the density plot, over-plotted to indicate the
# density of the points near the center.
density_cmap = LinearSegmentedColormap.from_list(
"density_cmap", [color, (1, 1, 1, 0)])
# This color map is used to hide the points at the high density areas.
white_cmap = LinearSegmentedColormap.from_list(
"white_cmap", [(1, 1, 1), (1, 1, 1)], N=2)
# This "color map" is the list of colors for the contour levels if the
# contours are filled.
rgba_color = colorConverter.to_rgba(color)
contour_cmap = [list(rgba_color) for l in levels] + [rgba_color]
for i, l in enumerate(levels):
contour_cmap[i][-1] *= float(i) / (len(levels)+1)
# We'll make the 2D histogram to directly estimate the density.
try:
H, X, Y = np.histogram2d(x.flatten(), y.flatten(), bins=bins,
range=list(map(np.sort, range)),
weights=weights)
except ValueError:
raise ValueError("It looks like at least one of your sample columns "
"have no dynamic range. You could try using the "
"'range' argument.")
if smooth is not None:
if gaussian_filter is None:
raise ImportError("Please install scipy for smoothing")
H = gaussian_filter(H, smooth)
# Compute the density levels.
Hflat = H.flatten()
inds = np.argsort(Hflat)[::-1]
Hflat = Hflat[inds]
sm = np.cumsum(Hflat)
sm /= sm[-1]
V = np.empty(len(levels))
for i, v0 in enumerate(levels):
try:
V[i] = Hflat[sm <= v0][-1]
except:
V[i] = Hflat[0]
V.sort()
m = np.diff(V) == 0
if np.any(m):
logging.warning("Too few points to create valid contours")
while np.any(m):
V[np.where(m)[0][0]] *= 1.0 - 1e-4
m = np.diff(V) == 0
V.sort()
# Compute the bin centers.
X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1])
# Extend the array for the sake of the contours at the plot edges.
H2 = H.min() + np.zeros((H.shape[0] + 4, H.shape[1] + 4))
H2[2:-2, 2:-2] = H
H2[2:-2, 1] = H[:, 0]
H2[2:-2, -2] = H[:, -1]
H2[1, 2:-2] = H[0]
H2[-2, 2:-2] = H[-1]
H2[1, 1] = H[0, 0]
H2[1, -2] = H[0, -1]
H2[-2, 1] = H[-1, 0]
H2[-2, -2] = H[-1, -1]
X2 = np.concatenate([
X1[0] + np.array([-2, -1]) * np.diff(X1[:2]),
X1,
X1[-1] + np.array([1, 2]) * np.diff(X1[-2:]),
])
Y2 = np.concatenate([
Y1[0] + np.array([-2, -1]) * np.diff(Y1[:2]),
Y1,
Y1[-1] + np.array([1, 2]) * np.diff(Y1[-2:]),
])
if plot_datapoints:
if data_kwargs is None:
data_kwargs = dict()
data_kwargs["color"] = data_kwargs.get("color", color)
data_kwargs["ms"] = data_kwargs.get("ms", 2.0)
data_kwargs["mec"] = data_kwargs.get("mec", "none")
data_kwargs["alpha"] = data_kwargs.get("alpha", 0.1)
ax.plot(x, y, "o", zorder=-1, rasterized=True, **data_kwargs)
# Plot the base fill to hide the densest data points.
if (plot_contours or plot_density) and not no_fill_contours:
ax.contourf(X2, Y2, H2.T, [V.min(), H.max()],
cmap=white_cmap, antialiased=False)
if plot_contours and fill_contours:
if contourf_kwargs is None:
contourf_kwargs = dict()
contourf_kwargs["colors"] = contourf_kwargs.get("colors", contour_cmap)
contourf_kwargs["antialiased"] = contourf_kwargs.get("antialiased",
False)
ax.contourf(X2, Y2, H2.T, np.concatenate([[0], V, [H.max()*(1+1e-4)]]),
**contourf_kwargs)
# Plot the density map. This can't be plotted at the same time as the
# contour fills.
elif plot_density:
ax.pcolor(X, Y, H.max() - H.T, cmap=density_cmap)
# Plot the contour edge colors.
if plot_contours:
if contour_kwargs is None:
contour_kwargs = dict()
contour_kwargs["colors"] = contour_kwargs.get("colors", color)
ax.contour(X2, Y2, H2.T, V, **contour_kwargs)
ax.set_xlim(range[0])
ax.set_ylim(range[1])
| apache-2.0 |
rs2/pandas | pandas/core/groupby/groupby.py | 1 | 95861 | """
Provide the groupby split-apply-combine paradigm. Define the GroupBy
class providing the base-class of operations.
The SeriesGroupBy and DataFrameGroupBy sub-class
(defined in pandas.core.groupby.generic)
expose these user-facing objects to provide specific functionality.
"""
from contextlib import contextmanager
import datetime
from functools import partial, wraps
import inspect
import re
import types
from typing import (
Callable,
Dict,
FrozenSet,
Generic,
Hashable,
Iterable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
TypeVar,
Union,
)
import numpy as np
from pandas._config.config import option_context
from pandas._libs import Timestamp, lib
import pandas._libs.groupby as libgroupby
from pandas._typing import F, FrameOrSeries, FrameOrSeriesUnion, Label, Scalar
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution, cache_readonly, doc
from pandas.core.dtypes.cast import maybe_cast_result
from pandas.core.dtypes.common import (
ensure_float,
is_bool_dtype,
is_datetime64_dtype,
is_extension_array_dtype,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
)
from pandas.core.dtypes.missing import isna, notna
from pandas.core import nanops
import pandas.core.algorithms as algorithms
from pandas.core.arrays import Categorical, DatetimeArray
from pandas.core.base import DataError, PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import base, numba_, ops
from pandas.core.indexes.api import CategoricalIndex, Index, MultiIndex
from pandas.core.series import Series
from pandas.core.sorting import get_group_index_sorter
from pandas.core.util.numba_ import NUMBA_FUNC_CACHE
_common_see_also = """
See Also
--------
Series.%(name)s
DataFrame.%(name)s
"""
_apply_docs = dict(
template="""
Apply function `func` group-wise and combine the results together.
The function passed to `apply` must take a {input} as its first
argument and return a DataFrame, Series or scalar. `apply` will
then take care of combining the results back together into a single
dataframe or series. `apply` is therefore a highly flexible
grouping method.
While `apply` is a very flexible method, its downside is that
using it can be quite a bit slower than using more specific methods
like `agg` or `transform`. Pandas offers a wide range of method that will
be much faster than using `apply` for their specific purposes, so try to
use them before reaching for `apply`.
Parameters
----------
func : callable
A callable that takes a {input} as its first argument, and
returns a dataframe, a series or a scalar. In addition the
callable may take positional and keyword arguments.
args, kwargs : tuple and dict
Optional positional and keyword arguments to pass to `func`.
Returns
-------
applied : Series or DataFrame
See Also
--------
pipe : Apply function to the full GroupBy object instead of to each
group.
aggregate : Apply aggregate function to the GroupBy object.
transform : Apply function column-by-column to the GroupBy object.
Series.apply : Apply a function to a Series.
DataFrame.apply : Apply a function to each row or column of a DataFrame.
""",
dataframe_examples="""
>>> df = pd.DataFrame({'A': 'a a b'.split(),
'B': [1,2,3],
'C': [4,6, 5]})
>>> g = df.groupby('A')
Notice that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Example 1: below the function passed to `apply` takes a DataFrame as
its argument and returns a DataFrame. `apply` combines the result for
each group together into a new DataFrame:
>>> g[['B', 'C']].apply(lambda x: x / x.sum())
B C
0 0.333333 0.4
1 0.666667 0.6
2 1.000000 1.0
Example 2: The function passed to `apply` takes a DataFrame as
its argument and returns a Series. `apply` combines the result for
each group together into a new DataFrame:
>>> g[['B', 'C']].apply(lambda x: x.max() - x.min())
B C
A
a 1 2
b 0 0
Example 3: The function passed to `apply` takes a DataFrame as
its argument and returns a scalar. `apply` combines the result for
each group together into a Series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.C.max() - x.B.min())
A
a 5
b 2
dtype: int64
""",
series_examples="""
>>> s = pd.Series([0, 1, 2], index='a a b'.split())
>>> g = s.groupby(s.index)
From ``s`` above we can see that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Example 1: The function passed to `apply` takes a Series as
its argument and returns a Series. `apply` combines the result for
each group together into a new Series:
>>> g.apply(lambda x: x*2 if x.name == 'b' else x/2)
0 0.0
1 0.5
2 4.0
dtype: float64
Example 2: The function passed to `apply` takes a Series as
its argument and returns a scalar. `apply` combines the result for
each group together into a Series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.max() - x.min())
a 1
b 0
dtype: int64
Notes
-----
In the current implementation `apply` calls `func` twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if `func` has
side-effects, as they will take effect twice for the first
group.
Examples
--------
{examples}
""",
)
_groupby_agg_method_template = """
Compute {fname} of group values.
Parameters
----------
numeric_only : bool, default {no}
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
min_count : int, default {mc}
The required number of valid values to perform the operation. If fewer
than ``min_count`` non-NA values are present the result will be NA.
Returns
-------
Series or DataFrame
Computed {fname} of values within each group.
"""
_pipe_template = """
Apply a function `func` with arguments to this %(klass)s object and return
the function's result.
Use `.pipe` when you want to improve readability by chaining together
functions that expect Series, DataFrames, GroupBy or Resampler objects.
Instead of writing
>>> h(g(f(df.groupby('group')), arg1=a), arg2=b, arg3=c) # doctest: +SKIP
You can write
>>> (df.groupby('group')
... .pipe(f)
... .pipe(g, arg1=a)
... .pipe(h, arg2=b, arg3=c)) # doctest: +SKIP
which is much more readable.
Parameters
----------
func : callable or tuple of (callable, str)
Function to apply to this %(klass)s object or, alternatively,
a `(callable, data_keyword)` tuple where `data_keyword` is a
string indicating the keyword of `callable` that expects the
%(klass)s object.
args : iterable, optional
Positional arguments passed into `func`.
kwargs : dict, optional
A dictionary of keyword arguments passed into `func`.
Returns
-------
object : the return type of `func`.
See Also
--------
Series.pipe : Apply a function with arguments to a series.
DataFrame.pipe: Apply a function with arguments to a dataframe.
apply : Apply function to each group instead of to the
full %(klass)s object.
Notes
-----
See more `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html#piping-function-calls>`_
Examples
--------
%(examples)s
"""
_transform_template = """
Call function producing a like-indexed %(klass)s on each group and
return a %(klass)s having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to apply to each group.
Can also accept a Numba JIT function with
``engine='numba'`` specified.
If the ``'numba'`` engine is chosen, the function must be
a user defined function with ``values`` and ``index`` as the
first and second arguments respectively in the function signature.
Each group's index will be passed to the user defined function
and optionally available for use.
.. versionchanged:: 1.1.0
*args
Positional arguments to pass to func
engine : str, default None
* ``'cython'`` : Runs the function through C-extensions from cython.
* ``'numba'`` : Runs the function through JIT compiled code from numba.
* ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba``
.. versionadded:: 1.1.0
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}`` and will be
applied to the function
.. versionadded:: 1.1.0
**kwargs
Keyword arguments to be passed into func.
Returns
-------
%(klass)s
See Also
--------
%(klass)s.groupby.apply
%(klass)s.groupby.aggregate
%(klass)s.transform
Notes
-----
Each group is endowed the attribute 'name' in case you need to know
which group you are working on.
The current implementation imposes three requirements on f:
* f must return a value that either has the same shape as the input
subframe or can be broadcast to the shape of the input subframe.
For example, if `f` returns a scalar it will be broadcast to have the
same shape as the input subframe.
* if this is a DataFrame, f must support application column-by-column
in the subframe. If f also supports application to the entire subframe,
then a fast path is used starting from the second chunk.
* f must not mutate groups. Mutation is not supported and may
produce unexpected results.
When using ``engine='numba'``, there will be no "fall back" behavior internally.
The group data and group index will be passed as numpy arrays to the JITed
user defined function, and no alternative execution attempts will be tried.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : ['one', 'one', 'two', 'three',
... 'two', 'two'],
... 'C' : [1, 5, 5, 2, 5, 5],
... 'D' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
C D
0 -1.154701 -0.577350
1 0.577350 0.000000
2 0.577350 1.154701
3 -1.154701 -1.000000
4 0.577350 -0.577350
5 0.577350 1.000000
Broadcast result of the transformation
>>> grouped.transform(lambda x: x.max() - x.min())
C D
0 4 6.0
1 3 8.0
2 4 6.0
3 3 8.0
4 4 6.0
5 3 8.0
"""
_agg_template = """
Aggregate using one or more operations over the specified axis.
Parameters
----------
func : function, str, list or dict
Function to use for aggregating the data. If a function, must either
work when passed a {klass} or when passed to {klass}.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.sum, 'mean']``
- dict of axis labels -> functions, function names or list of such.
Can also accept a Numba JIT function with
``engine='numba'`` specified. Only passing a single function is supported
with this engine.
If the ``'numba'`` engine is chosen, the function must be
a user defined function with ``values`` and ``index`` as the
first and second arguments respectively in the function signature.
Each group's index will be passed to the user defined function
and optionally available for use.
.. versionchanged:: 1.1.0
*args
Positional arguments to pass to func
engine : str, default None
* ``'cython'`` : Runs the function through C-extensions from cython.
* ``'numba'`` : Runs the function through JIT compiled code from numba.
* ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba``
.. versionadded:: 1.1.0
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be
applied to the function
.. versionadded:: 1.1.0
**kwargs
Keyword arguments to be passed into func.
Returns
-------
{klass}
See Also
--------
{klass}.groupby.apply
{klass}.groupby.transform
{klass}.aggregate
Notes
-----
When using ``engine='numba'``, there will be no "fall back" behavior internally.
The group data and group index will be passed as numpy arrays to the JITed
user defined function, and no alternative execution attempts will be tried.
{examples}
"""
class GroupByPlot(PandasObject):
"""
Class implementing the .plot attribute for groupby objects.
"""
def __init__(self, groupby):
self._groupby = groupby
def __call__(self, *args, **kwargs):
def f(self):
return self.plot(*args, **kwargs)
f.__name__ = "plot"
return self._groupby.apply(f)
def __getattr__(self, name: str):
def attr(*args, **kwargs):
def f(self):
return getattr(self.plot, name)(*args, **kwargs)
return self._groupby.apply(f)
return attr
@contextmanager
def group_selection_context(groupby: "BaseGroupBy"):
"""
Set / reset the group_selection_context.
"""
groupby._set_group_selection()
try:
yield groupby
finally:
groupby._reset_group_selection()
_KeysArgType = Union[
Hashable,
List[Hashable],
Callable[[Hashable], Hashable],
List[Callable[[Hashable], Hashable]],
Mapping[Hashable, Hashable],
]
class BaseGroupBy(PandasObject, SelectionMixin, Generic[FrameOrSeries]):
_group_selection = None
_apply_allowlist: FrozenSet[str] = frozenset()
def __init__(
self,
obj: FrameOrSeries,
keys: Optional[_KeysArgType] = None,
axis: int = 0,
level=None,
grouper: Optional["ops.BaseGrouper"] = None,
exclusions: Optional[Set[Label]] = None,
selection=None,
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
squeeze: bool = False,
observed: bool = False,
mutated: bool = False,
dropna: bool = True,
):
self._selection = selection
assert isinstance(obj, NDFrame), type(obj)
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError("as_index=False only valid with DataFrame")
if axis != 0:
raise ValueError("as_index=False only valid for axis=0")
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
self.observed = observed
self.mutated = mutated
self.dropna = dropna
if grouper is None:
from pandas.core.groupby.grouper import get_grouper
grouper, exclusions, obj = get_grouper(
obj,
keys,
axis=axis,
level=level,
sort=sort,
observed=observed,
mutated=self.mutated,
dropna=self.dropna,
)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = exclusions or set()
def __len__(self) -> int:
return len(self.groups)
def __repr__(self) -> str:
# TODO: Better repr for GroupBy object
return object.__repr__(self)
def _assure_grouper(self):
"""
We create the grouper on instantiation sub-classes may have a
different policy.
"""
pass
@property
def groups(self):
"""
Dict {group name -> group labels}.
"""
self._assure_grouper()
return self.grouper.groups
@property
def ngroups(self):
self._assure_grouper()
return self.grouper.ngroups
@property
def indices(self):
"""
Dict {group name -> group indices}.
"""
self._assure_grouper()
return self.grouper.indices
def _get_indices(self, names):
"""
Safe get multiple indices, translate keys for
datelike to underlying repr.
"""
def get_converter(s):
# possibly convert to the actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, datetime.datetime):
return lambda key: Timestamp(key)
elif isinstance(s, np.datetime64):
return lambda key: Timestamp(key).asm8
else:
return lambda key: key
if len(names) == 0:
return []
if len(self.indices) > 0:
index_sample = next(iter(self.indices))
else:
index_sample = None # Dummy sample
name_sample = names[0]
if isinstance(index_sample, tuple):
if not isinstance(name_sample, tuple):
msg = "must supply a tuple to get_group with multiple grouping keys"
raise ValueError(msg)
if not len(name_sample) == len(index_sample):
try:
# If the original grouper was a tuple
return [self.indices[name] for name in names]
except KeyError as err:
# turns out it wasn't a tuple
msg = (
"must supply a same-length tuple to get_group "
"with multiple grouping keys"
)
raise ValueError(msg) from err
converters = [get_converter(s) for s in index_sample]
names = (tuple(f(n) for f, n in zip(converters, name)) for name in names)
else:
converter = get_converter(index_sample)
names = (converter(name) for name in names)
return [self.indices.get(name, []) for name in names]
def _get_index(self, name):
"""
Safe get index, translate keys for datelike to underlying repr.
"""
return self._get_indices([name])[0]
@cache_readonly
def _selected_obj(self):
# Note: _selected_obj is always just `self.obj` for SeriesGroupBy
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _reset_group_selection(self):
"""
Clear group based selection.
Used for methods needing to return info on each group regardless of
whether a group selection was previously set.
"""
if self._group_selection is not None:
# GH12839 clear cached selection too when changing group selection
self._group_selection = None
self._reset_cache("_selected_obj")
def _set_group_selection(self):
"""
Create group based selection.
Used when selection is not passed directly but instead via a grouper.
NOTE: this should be paired with a call to _reset_group_selection
"""
grp = self.grouper
if not (
self.as_index
and getattr(grp, "groupings", None) is not None
and self.obj.ndim > 1
and self._group_selection is None
):
return
groupers = [g.name for g in grp.groupings if g.level is None and g.in_axis]
if len(groupers):
# GH12839 clear selected obj cache when group selection changes
ax = self.obj._info_axis
self._group_selection = ax.difference(Index(groupers), sort=False).tolist()
self._reset_cache("_selected_obj")
def _set_result_index_ordered(self, result):
# set the result index on the passed values object and
# return the new object, xref 8046
# the values/counts are repeated according to the group index
# shortcut if we have an already ordered grouper
if not self.grouper.is_monotonic:
index = Index(np.concatenate(self._get_indices(self.grouper.result_index)))
result.set_axis(index, axis=self.axis, inplace=True)
result = result.sort_index(axis=self.axis)
result.set_axis(self.obj._get_axis(self.axis), axis=self.axis, inplace=True)
return result
def _dir_additions(self):
return self.obj._dir_additions() | self._apply_allowlist
def __getattr__(self, attr: str):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{attr}'"
)
@Substitution(
klass="GroupBy",
examples="""\
>>> df = pd.DataFrame({'A': 'a b a b'.split(), 'B': [1, 2, 3, 4]})
>>> df
A B
0 a 1
1 b 2
2 a 3
3 b 4
To get the difference between each groups maximum and minimum value in one
pass, you can do
>>> df.groupby('A').pipe(lambda x: x.max() - x.min())
B
A
a 2
b 2""",
)
@Appender(_pipe_template)
def pipe(self, func, *args, **kwargs):
return com.pipe(self, func, *args, **kwargs)
plot = property(GroupByPlot)
def _make_wrapper(self, name: str) -> Callable:
assert name in self._apply_allowlist
with group_selection_context(self):
# need to setup the selection
# as are not passed directly but in the grouper
f = getattr(self._obj_with_exclusions, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._obj_with_exclusions), name)
sig = inspect.signature(f)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
if "axis" in sig.parameters:
if kwargs.get("axis", None) is None:
kwargs["axis"] = self.axis
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in base.plotting_methods:
return self.apply(curried)
try:
return self._python_apply_general(curried, self._obj_with_exclusions)
except TypeError as err:
if not re.search(
"reduction operation '.*' not allowed for this dtype", str(err)
):
# We don't have a cython implementation
# TODO: is the above comment accurate?
raise
if self.obj.ndim == 1:
# this can be called recursively, so need to raise ValueError
raise ValueError
# GH#3688 try to operate item-by-item
result = self._aggregate_item_by_item(name, *args, **kwargs)
return result
wrapper.__name__ = name
return wrapper
def get_group(self, name, obj=None):
"""
Construct DataFrame from group with provided name.
Parameters
----------
name : object
The name of the group to get as a DataFrame.
obj : DataFrame, default None
The DataFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used.
Returns
-------
group : same type as obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
if not len(inds):
raise KeyError(name)
return obj._take_with_is_copy(inds, axis=self.axis)
def __iter__(self):
"""
Groupby iterator.
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
@Appender(
_apply_docs["template"].format(
input="dataframe", examples=_apply_docs["dataframe_examples"]
)
)
def apply(self, func, *args, **kwargs):
func = self._is_builtin_func(func)
# this is needed so we don't try and wrap strings. If we could
# resolve functions to their callable functions prior, this
# wouldn't be needed
if args or kwargs:
if callable(func):
@wraps(func)
def f(g):
with np.errstate(all="ignore"):
return func(g, *args, **kwargs)
elif hasattr(nanops, "nan" + func):
# TODO: should we wrap this in to e.g. _is_builtin_func?
f = getattr(nanops, "nan" + func)
else:
raise ValueError(
"func must be a callable if args or kwargs are supplied"
)
else:
f = func
# ignore SettingWithCopy here in case the user mutates
with option_context("mode.chained_assignment", None):
try:
result = self._python_apply_general(f, self._selected_obj)
except TypeError:
# gh-20949
# try again, with .apply acting as a filtering
# operation, by excluding the grouping column
# This would normally not be triggered
# except if the udf is trying an operation that
# fails on *some* columns, e.g. a numeric operation
# on a string grouper column
with group_selection_context(self):
return self._python_apply_general(f, self._selected_obj)
return result
def _python_apply_general(
self, f: F, data: FrameOrSeriesUnion
) -> FrameOrSeriesUnion:
"""
Apply function f in python space
Parameters
----------
f : callable
Function to apply
data : Series or DataFrame
Data to apply f to
Returns
-------
Series or DataFrame
data after applying f
"""
keys, values, mutated = self.grouper.apply(f, data, self.axis)
return self._wrap_applied_output(
keys, values, not_indexed_same=mutated or self.mutated
)
def _iterate_slices(self) -> Iterable[Series]:
raise AbstractMethodError(self)
def transform(self, func, *args, **kwargs):
raise AbstractMethodError(self)
def _cumcount_array(self, ascending: bool = True):
"""
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Notes
-----
this is currently implementing sort=False
(though the default is sort=True) for groupby in general
"""
ids, _, ngroups = self.grouper.group_info
sorter = get_group_index_sorter(ids, ngroups)
ids, count = ids[sorter], len(ids)
if count == 0:
return np.empty(0, dtype=np.int64)
run = np.r_[True, ids[:-1] != ids[1:]]
rep = np.diff(np.r_[np.nonzero(run)[0], count])
out = (~run).cumsum()
if ascending:
out -= np.repeat(out[run], rep)
else:
out = np.repeat(out[np.r_[run[1:], True]], rep) - out
rev = np.empty(count, dtype=np.intp)
rev[sorter] = np.arange(count, dtype=np.intp)
return out[rev].astype(np.int64, copy=False)
def _transform_should_cast(self, func_nm: str) -> bool:
"""
Parameters
----------
func_nm: str
The name of the aggregation function being performed
Returns
-------
bool
Whether transform should attempt to cast the result of aggregation
"""
filled_series = self.grouper.size().fillna(0)
assert filled_series is not None
return filled_series.gt(0).any() and func_nm not in base.cython_cast_blocklist
def _cython_transform(self, how: str, numeric_only: bool = True, **kwargs):
output: Dict[base.OutputKey, np.ndarray] = {}
for idx, obj in enumerate(self._iterate_slices()):
name = obj.name
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, _ = self.grouper.transform(obj.values, how, **kwargs)
except NotImplementedError:
continue
if self._transform_should_cast(how):
result = maybe_cast_result(result, obj, how=how)
key = base.OutputKey(label=name, position=idx)
output[key] = result
if len(output) == 0:
raise DataError("No numeric types to aggregate")
return self._wrap_transformed_output(output)
def _wrap_aggregated_output(
self, output: Mapping[base.OutputKey, np.ndarray], index: Optional[Index]
):
raise AbstractMethodError(self)
def _wrap_transformed_output(self, output: Mapping[base.OutputKey, np.ndarray]):
raise AbstractMethodError(self)
def _wrap_applied_output(self, keys, values, not_indexed_same: bool = False):
raise AbstractMethodError(self)
def _agg_general(
self,
numeric_only: bool = True,
min_count: int = -1,
*,
alias: str,
npfunc: Callable,
):
with group_selection_context(self):
# try a cython aggregation if we can
try:
return self._cython_agg_general(
how=alias,
alt=npfunc,
numeric_only=numeric_only,
min_count=min_count,
)
except DataError:
pass
except NotImplementedError as err:
if "function is not implemented for this dtype" in str(
err
) or "category dtype not supported" in str(err):
# raised in _get_cython_function, in some cases can
# be trimmed by implementing cython funcs for more dtypes
pass
else:
raise
# apply a non-cython aggregation
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
return result
def _cython_agg_general(
self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1
):
output: Dict[base.OutputKey, Union[np.ndarray, DatetimeArray]] = {}
# Ideally we would be able to enumerate self._iterate_slices and use
# the index from enumeration as the key of output, but ohlc in particular
# returns a (n x 4) array. Output requires 1D ndarrays as values, so we
# need to slice that up into 1D arrays
idx = 0
for obj in self._iterate_slices():
name = obj.name
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
result, agg_names = self.grouper.aggregate(
obj._values, how, min_count=min_count
)
if agg_names:
# e.g. ohlc
assert len(agg_names) == result.shape[1]
for result_column, result_name in zip(result.T, agg_names):
key = base.OutputKey(label=result_name, position=idx)
output[key] = maybe_cast_result(result_column, obj, how=how)
idx += 1
else:
assert result.ndim == 1
key = base.OutputKey(label=name, position=idx)
output[key] = maybe_cast_result(result, obj, how=how)
idx += 1
if len(output) == 0:
raise DataError("No numeric types to aggregate")
return self._wrap_aggregated_output(output, index=self.grouper.result_index)
def _transform_with_numba(self, data, func, *args, engine_kwargs=None, **kwargs):
"""
Perform groupby transform routine with the numba engine.
This routine mimics the data splitting routine of the DataSplitter class
to generate the indices of each group in the sorted data and then passes the
data and indices into a Numba jitted function.
"""
if not callable(func):
raise NotImplementedError(
"Numba engine can only be used with a single function."
)
group_keys = self.grouper._get_group_keys()
labels, _, n_groups = self.grouper.group_info
sorted_index = get_group_index_sorter(labels, n_groups)
sorted_labels = algorithms.take_nd(labels, sorted_index, allow_fill=False)
sorted_data = data.take(sorted_index, axis=self.axis).to_numpy()
starts, ends = lib.generate_slices(sorted_labels, n_groups)
numba_transform_func = numba_.generate_numba_transform_func(
tuple(args), kwargs, func, engine_kwargs
)
result = numba_transform_func(
sorted_data, sorted_index, starts, ends, len(group_keys), len(data.columns)
)
cache_key = (func, "groupby_transform")
if cache_key not in NUMBA_FUNC_CACHE:
NUMBA_FUNC_CACHE[cache_key] = numba_transform_func
# result values needs to be resorted to their original positions since we
# evaluated the data sorted by group
return result.take(np.argsort(sorted_index), axis=0)
def _aggregate_with_numba(self, data, func, *args, engine_kwargs=None, **kwargs):
"""
Perform groupby aggregation routine with the numba engine.
This routine mimics the data splitting routine of the DataSplitter class
to generate the indices of each group in the sorted data and then passes the
data and indices into a Numba jitted function.
"""
if not callable(func):
raise NotImplementedError(
"Numba engine can only be used with a single function."
)
group_keys = self.grouper._get_group_keys()
labels, _, n_groups = self.grouper.group_info
sorted_index = get_group_index_sorter(labels, n_groups)
sorted_labels = algorithms.take_nd(labels, sorted_index, allow_fill=False)
sorted_data = data.take(sorted_index, axis=self.axis).to_numpy()
starts, ends = lib.generate_slices(sorted_labels, n_groups)
numba_agg_func = numba_.generate_numba_agg_func(
tuple(args), kwargs, func, engine_kwargs
)
result = numba_agg_func(
sorted_data, sorted_index, starts, ends, len(group_keys), len(data.columns)
)
cache_key = (func, "groupby_agg")
if cache_key not in NUMBA_FUNC_CACHE:
NUMBA_FUNC_CACHE[cache_key] = numba_agg_func
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(group_keys, names=self.grouper.names)
else:
index = Index(group_keys, name=self.grouper.names[0])
return result, index
def _python_agg_general(self, func, *args, **kwargs):
func = self._is_builtin_func(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output: Dict[base.OutputKey, np.ndarray] = {}
for idx, obj in enumerate(self._iterate_slices()):
name = obj.name
if self.grouper.ngroups == 0:
# agg_series below assumes ngroups > 0
continue
try:
# if this function is invalid for this dtype, we will ignore it.
result, counts = self.grouper.agg_series(obj, f)
except TypeError:
continue
assert result is not None
key = base.OutputKey(label=name, position=idx)
output[key] = maybe_cast_result(result, obj, numeric_only=True)
if len(output) == 0:
return self._python_apply_general(f, self._selected_obj)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for key, result in output.items():
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = ensure_float(values)
output[key] = maybe_cast_result(values[mask], result)
return self._wrap_aggregated_output(output, index=self.grouper.result_index)
def _concat_objects(self, keys, values, not_indexed_same: bool = False):
from pandas.core.reshape.concat import concat
def reset_identity(values):
# reset the identities of the components
# of the values to prevent aliasing
for v in com.not_none(*values):
ax = v._get_axis(self.axis)
ax._reset_identity()
return values
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
# this is a very unfortunate situation
# we can't use reindex to restore the original order
# when the ax has duplicates
# so we resort to this
# GH 14776, 30667
if ax.has_duplicates:
indexer, _ = result.index.get_indexer_non_unique(ax.values)
indexer = algorithms.unique1d(indexer)
result = result.take(indexer, axis=self.axis)
else:
result = result.reindex(ax, axis=self.axis)
elif self.group_keys:
values = reset_identity(values)
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(
values,
axis=self.axis,
keys=group_keys,
levels=group_levels,
names=group_names,
sort=False,
)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
values = reset_identity(values)
result = concat(values, axis=self.axis)
if isinstance(result, Series) and self._selection_name is not None:
result.name = self._selection_name
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = np.array([], dtype="int64")
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices, axis=self.axis)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
# To track operations that expand dimensions, like ohlc
OutputFrameOrSeries = TypeVar("OutputFrameOrSeries", bound=NDFrame)
class GroupBy(BaseGroupBy[FrameOrSeries]):
"""
Class for grouping and aggregating relational data.
See aggregate, transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : str
Most users should ignore this
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
"""
@property
def _obj_1d_constructor(self) -> Type["Series"]:
# GH28330 preserve subclassed Series/DataFrames
if isinstance(self.obj, DataFrame):
return self.obj._constructor_sliced
assert isinstance(self.obj, Series)
return self.obj._constructor
def _bool_agg(self, val_test, skipna):
"""
Shared func to call any / all Cython GroupBy implementations.
"""
def objs_to_bool(vals: np.ndarray) -> Tuple[np.ndarray, Type]:
if is_object_dtype(vals):
vals = np.array([bool(x) for x in vals])
else:
vals = vals.astype(bool)
return vals.view(np.uint8), bool
def result_to_bool(result: np.ndarray, inference: Type) -> np.ndarray:
return result.astype(inference, copy=False)
return self._get_cythonized_result(
"group_any_all",
aggregate=True,
numeric_only=False,
cython_dtype=np.dtype(np.uint8),
needs_values=True,
needs_mask=True,
pre_processing=objs_to_bool,
post_processing=result_to_bool,
val_test=val_test,
skipna=skipna,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def any(self, skipna: bool = True):
"""
Return True if any value in the group is truthful, else False.
Parameters
----------
skipna : bool, default True
Flag to ignore nan values during truth testing.
Returns
-------
bool
"""
return self._bool_agg("any", skipna)
@Substitution(name="groupby")
@Appender(_common_see_also)
def all(self, skipna: bool = True):
"""
Return True if all values in the group are truthful, else False.
Parameters
----------
skipna : bool, default True
Flag to ignore nan values during truth testing.
Returns
-------
bool
"""
return self._bool_agg("all", skipna)
@Substitution(name="groupby")
@Appender(_common_see_also)
def count(self):
"""
Compute count of group, excluding missing values.
Returns
-------
Series or DataFrame
Count of values within each group.
"""
# defined here for API doc
raise NotImplementedError
@Substitution(name="groupby")
@Substitution(see_also=_common_see_also)
def mean(self, numeric_only: bool = True):
"""
Compute mean of groups, excluding missing values.
Parameters
----------
numeric_only : bool, default True
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
Returns
-------
pandas.Series or pandas.DataFrame
%(see_also)s
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5],
... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C'])
Groupby one column and return the mean of the remaining columns in
each group.
>>> df.groupby('A').mean()
B C
A
1 3.0 1.333333
2 4.0 1.500000
Groupby two columns and return the mean of the remaining column.
>>> df.groupby(['A', 'B']).mean()
C
A B
1 2.0 2
4.0 1
2 3.0 1
5.0 2
Groupby one column and return the mean of only particular column in
the group.
>>> df.groupby('A')['B'].mean()
A
1 3.0
2 4.0
Name: B, dtype: float64
"""
return self._cython_agg_general(
"mean",
alt=lambda x, axis: Series(x).mean(numeric_only=numeric_only),
numeric_only=numeric_only,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def median(self, numeric_only=True):
"""
Compute median of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
numeric_only : bool, default True
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
Returns
-------
Series or DataFrame
Median of values within each group.
"""
return self._cython_agg_general(
"median",
alt=lambda x, axis: Series(x).median(axis=axis, numeric_only=numeric_only),
numeric_only=numeric_only,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def std(self, ddof: int = 1):
"""
Compute standard deviation of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
Returns
-------
Series or DataFrame
Standard deviation of values within each group.
"""
return self._get_cythonized_result(
"group_var_float64",
aggregate=True,
needs_counts=True,
needs_values=True,
needs_2d=True,
cython_dtype=np.dtype(np.float64),
post_processing=lambda vals, inference: np.sqrt(vals),
ddof=ddof,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def var(self, ddof: int = 1):
"""
Compute variance of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
Returns
-------
Series or DataFrame
Variance of values within each group.
"""
if ddof == 1:
return self._cython_agg_general(
"var", alt=lambda x, axis: Series(x).var(ddof=ddof)
)
else:
func = lambda x: x.var(ddof=ddof)
with group_selection_context(self):
return self._python_agg_general(func)
@Substitution(name="groupby")
@Appender(_common_see_also)
def sem(self, ddof: int = 1):
"""
Compute standard error of the mean of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
Returns
-------
Series or DataFrame
Standard error of the mean of values within each group.
"""
result = self.std(ddof=ddof)
if result.ndim == 1:
result /= np.sqrt(self.count())
else:
cols = result.columns.get_indexer_for(
result.columns.difference(self.exclusions).unique()
)
# TODO(GH-22046) - setting with iloc broken if labels are not unique
# .values to remove labels
result.iloc[:, cols] = (
result.iloc[:, cols].values / np.sqrt(self.count().iloc[:, cols]).values
)
return result
@Substitution(name="groupby")
@Appender(_common_see_also)
def size(self) -> FrameOrSeriesUnion:
"""
Compute group sizes.
Returns
-------
DataFrame or Series
Number of rows in each group as a Series if as_index is True
or a DataFrame if as_index is False.
"""
result = self.grouper.size()
# GH28330 preserve subclassed Series/DataFrames through calls
if issubclass(self.obj._constructor, Series):
result = self._obj_1d_constructor(result, name=self.obj.name)
else:
result = self._obj_1d_constructor(result)
if not self.as_index:
result = result.rename("size").reset_index()
return self._reindex_output(result, fill_value=0)
@doc(_groupby_agg_method_template, fname="sum", no=True, mc=0)
def sum(self, numeric_only: bool = True, min_count: int = 0):
# If we are grouping on categoricals we want unobserved categories to
# return zero, rather than the default of NaN which the reindexing in
# _agg_general() returns. GH #31422
with com.temp_setattr(self, "observed", True):
result = self._agg_general(
numeric_only=numeric_only,
min_count=min_count,
alias="add",
npfunc=np.sum,
)
return self._reindex_output(result, fill_value=0)
@doc(_groupby_agg_method_template, fname="prod", no=True, mc=0)
def prod(self, numeric_only: bool = True, min_count: int = 0):
return self._agg_general(
numeric_only=numeric_only, min_count=min_count, alias="prod", npfunc=np.prod
)
@doc(_groupby_agg_method_template, fname="min", no=False, mc=-1)
def min(self, numeric_only: bool = False, min_count: int = -1):
return self._agg_general(
numeric_only=numeric_only, min_count=min_count, alias="min", npfunc=np.min
)
@doc(_groupby_agg_method_template, fname="max", no=False, mc=-1)
def max(self, numeric_only: bool = False, min_count: int = -1):
return self._agg_general(
numeric_only=numeric_only, min_count=min_count, alias="max", npfunc=np.max
)
@doc(_groupby_agg_method_template, fname="first", no=False, mc=-1)
def first(self, numeric_only: bool = False, min_count: int = -1):
def first_compat(obj: FrameOrSeries, axis: int = 0):
def first(x: Series):
"""Helper function for first item that isn't NA."""
x = x.array[notna(x.array)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(obj, DataFrame):
return obj.apply(first, axis=axis)
elif isinstance(obj, Series):
return first(obj)
else:
raise TypeError(type(obj))
return self._agg_general(
numeric_only=numeric_only,
min_count=min_count,
alias="first",
npfunc=first_compat,
)
@doc(_groupby_agg_method_template, fname="last", no=False, mc=-1)
def last(self, numeric_only: bool = False, min_count: int = -1):
def last_compat(obj: FrameOrSeries, axis: int = 0):
def last(x: Series):
"""Helper function for last item that isn't NA."""
x = x.array[notna(x.array)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(obj, DataFrame):
return obj.apply(last, axis=axis)
elif isinstance(obj, Series):
return last(obj)
else:
raise TypeError(type(obj))
return self._agg_general(
numeric_only=numeric_only,
min_count=min_count,
alias="last",
npfunc=last_compat,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def ohlc(self) -> DataFrame:
"""
Compute open, high, low and close values of a group, excluding missing values.
For multiple groupings, the result index will be a MultiIndex
Returns
-------
DataFrame
Open, high, low and close values within each group.
"""
return self._apply_to_column_groupbys(lambda x: x._cython_agg_general("ohlc"))
@doc(DataFrame.describe)
def describe(self, **kwargs):
with group_selection_context(self):
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
return result.unstack()
def resample(self, rule, *args, **kwargs):
"""
Provide resampling when using a TimeGrouper.
Given a grouper, the function resamples it according to a string
"string" -> "frequency".
See the :ref:`frequency aliases <timeseries.offset_aliases>`
documentation for more details.
Parameters
----------
rule : str or DateOffset
The offset string or object representing target grouper conversion.
*args, **kwargs
Possible arguments are `how`, `fill_method`, `limit`, `kind` and
`on`, and other arguments of `TimeGrouper`.
Returns
-------
Grouper
Return a new grouper with our resampler appended.
See Also
--------
Grouper : Specify a frequency to resample with when
grouping by a key.
DatetimeIndex.resample : Frequency conversion and resampling of
time series.
Examples
--------
>>> idx = pd.date_range('1/1/2000', periods=4, freq='T')
>>> df = pd.DataFrame(data=4 * [range(2)],
... index=idx,
... columns=['a', 'b'])
>>> df.iloc[2, 0] = 5
>>> df
a b
2000-01-01 00:00:00 0 1
2000-01-01 00:01:00 0 1
2000-01-01 00:02:00 5 1
2000-01-01 00:03:00 0 1
Downsample the DataFrame into 3 minute bins and sum the values of
the timestamps falling into a bin.
>>> df.groupby('a').resample('3T').sum()
a b
a
0 2000-01-01 00:00:00 0 2
2000-01-01 00:03:00 0 1
5 2000-01-01 00:00:00 5 1
Upsample the series into 30 second bins.
>>> df.groupby('a').resample('30S').sum()
a b
a
0 2000-01-01 00:00:00 0 1
2000-01-01 00:00:30 0 0
2000-01-01 00:01:00 0 1
2000-01-01 00:01:30 0 0
2000-01-01 00:02:00 0 0
2000-01-01 00:02:30 0 0
2000-01-01 00:03:00 0 1
5 2000-01-01 00:02:00 5 1
Resample by month. Values are assigned to the month of the period.
>>> df.groupby('a').resample('M').sum()
a b
a
0 2000-01-31 0 3
5 2000-01-31 5 1
Downsample the series into 3 minute bins as above, but close the right
side of the bin interval.
>>> df.groupby('a').resample('3T', closed='right').sum()
a b
a
0 1999-12-31 23:57:00 0 1
2000-01-01 00:00:00 0 2
5 2000-01-01 00:00:00 5 1
Downsample the series into 3 minute bins and close the right side of
the bin interval, but label each bin using the right edge instead of
the left.
>>> df.groupby('a').resample('3T', closed='right', label='right').sum()
a b
a
0 2000-01-01 00:00:00 0 1
2000-01-01 00:03:00 0 2
5 2000-01-01 00:03:00 5 1
"""
from pandas.core.resample import get_resampler_for_grouping
return get_resampler_for_grouping(self, rule, *args, **kwargs)
@Substitution(name="groupby")
@Appender(_common_see_also)
def rolling(self, *args, **kwargs):
"""
Return a rolling grouper, providing rolling functionality per group.
"""
from pandas.core.window import RollingGroupby
return RollingGroupby(self, *args, **kwargs)
@Substitution(name="groupby")
@Appender(_common_see_also)
def expanding(self, *args, **kwargs):
"""
Return an expanding grouper, providing expanding
functionality per group.
"""
from pandas.core.window import ExpandingGroupby
return ExpandingGroupby(self, *args, **kwargs)
def _fill(self, direction, limit=None):
"""
Shared function for `pad` and `backfill` to call Cython method.
Parameters
----------
direction : {'ffill', 'bfill'}
Direction passed to underlying Cython function. `bfill` will cause
values to be filled backwards. `ffill` and any other values will
default to a forward fill
limit : int, default None
Maximum number of consecutive values to fill. If `None`, this
method will convert to -1 prior to passing to Cython
Returns
-------
`Series` or `DataFrame` with filled values
See Also
--------
pad
backfill
"""
# Need int value for Cython
if limit is None:
limit = -1
return self._get_cythonized_result(
"group_fillna_indexer",
numeric_only=False,
needs_mask=True,
cython_dtype=np.dtype(np.int64),
result_is_index=True,
direction=direction,
limit=limit,
)
@Substitution(name="groupby")
def pad(self, limit=None):
"""
Forward fill the values.
Parameters
----------
limit : int, optional
Limit of how many values to fill.
Returns
-------
Series or DataFrame
Object with missing values filled.
See Also
--------
Series.pad
DataFrame.pad
Series.fillna
DataFrame.fillna
"""
return self._fill("ffill", limit=limit)
ffill = pad
@Substitution(name="groupby")
def backfill(self, limit=None):
"""
Backward fill the values.
Parameters
----------
limit : int, optional
Limit of how many values to fill.
Returns
-------
Series or DataFrame
Object with missing values filled.
See Also
--------
Series.backfill
DataFrame.backfill
Series.fillna
DataFrame.fillna
"""
return self._fill("bfill", limit=limit)
bfill = backfill
@Substitution(name="groupby")
@Substitution(see_also=_common_see_also)
def nth(self, n: Union[int, List[int]], dropna: Optional[str] = None) -> DataFrame:
"""
Take the nth row from each group if n is an int, or a subset of rows
if n is a list of ints.
If dropna, will take the nth non-null row, dropna is either
'all' or 'any'; this is equivalent to calling dropna(how=dropna)
before the groupby.
Parameters
----------
n : int or list of ints
A single nth value for the row or a list of nth values.
dropna : None or str, optional
Apply the specified dropna operation before counting which row is
the nth row. Needs to be None, 'any' or 'all'.
Returns
-------
Series or DataFrame
N-th value within each group.
%(see_also)s
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5]}, columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
B
A
1 NaN
2 3.0
>>> g.nth(1)
B
A
1 2.0
2 5.0
>>> g.nth(-1)
B
A
1 4.0
2 5.0
>>> g.nth([0, 1])
B
A
1 NaN
1 2.0
2 3.0
2 5.0
Specifying `dropna` allows count ignoring ``NaN``
>>> g.nth(0, dropna='any')
B
A
1 2.0
2 3.0
NaNs denote group exhausted when using dropna
>>> g.nth(3, dropna='any')
B
A
1 NaN
2 NaN
Specifying `as_index=False` in `groupby` keeps the original index.
>>> df.groupby('A', as_index=False).nth(1)
A B
1 1 2.0
4 2 5.0
"""
valid_containers = (set, list, tuple)
if not isinstance(n, (valid_containers, int)):
raise TypeError("n needs to be an int or a list/set/tuple of ints")
if not dropna:
if isinstance(n, int):
nth_values = [n]
elif isinstance(n, valid_containers):
nth_values = list(set(n))
nth_array = np.array(nth_values, dtype=np.intp)
with group_selection_context(self):
mask_left = np.in1d(self._cumcount_array(), nth_array)
mask_right = np.in1d(
self._cumcount_array(ascending=False) + 1, -nth_array
)
mask = mask_left | mask_right
ids, _, _ = self.grouper.group_info
# Drop NA values in grouping
mask = mask & (ids != -1)
out = self._selected_obj[mask]
if not self.as_index:
return out
result_index = self.grouper.result_index
out.index = result_index[ids[mask]]
if not self.observed and isinstance(result_index, CategoricalIndex):
out = out.reindex(result_index)
out = self._reindex_output(out)
return out.sort_index() if self.sort else out
# dropna is truthy
if isinstance(n, valid_containers):
raise ValueError("dropna option with a list of nth values is not supported")
if dropna not in ["any", "all"]:
# Note: when agg-ing picker doesn't raise this, just returns NaN
raise ValueError(
"For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
f"(was passed {dropna})."
)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else -1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available
# (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
else:
# create a grouper with the original parameters, but on dropped
# object
from pandas.core.groupby.grouper import get_grouper
grouper, _, _ = get_grouper(
dropped,
key=self.keys,
axis=self.axis,
level=self.level,
sort=self.sort,
mutated=self.mutated,
)
grb = dropped.groupby(grouper, as_index=self.as_index, sort=self.sort)
sizes, result = grb.size(), grb.nth(n)
mask = (sizes < max_len)._values
# set the results which don't meet the criteria
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or len(result) == len(
self.grouper.result_index
):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
def quantile(self, q=0.5, interpolation: str = "linear"):
"""
Return group values at the given quantile, a la numpy.percentile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Value(s) between 0 and 1 providing the quantile(s) to compute.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
Method to use when the desired quantile falls between two points.
Returns
-------
Series or DataFrame
Return type determined by caller of GroupBy object.
See Also
--------
Series.quantile : Similar method for Series.
DataFrame.quantile : Similar method for DataFrame.
numpy.percentile : NumPy method to compute qth percentile.
Examples
--------
>>> df = pd.DataFrame([
... ['a', 1], ['a', 2], ['a', 3],
... ['b', 1], ['b', 3], ['b', 5]
... ], columns=['key', 'val'])
>>> df.groupby('key').quantile()
val
key
a 2.0
b 3.0
"""
from pandas import concat
def pre_processor(vals: np.ndarray) -> Tuple[np.ndarray, Optional[Type]]:
if is_object_dtype(vals):
raise TypeError(
"'quantile' cannot be performed against 'object' dtypes!"
)
inference = None
if is_integer_dtype(vals.dtype):
if is_extension_array_dtype(vals.dtype):
vals = vals.to_numpy(dtype=float, na_value=np.nan)
inference = np.int64
elif is_bool_dtype(vals.dtype) and is_extension_array_dtype(vals.dtype):
vals = vals.to_numpy(dtype=float, na_value=np.nan)
elif is_datetime64_dtype(vals.dtype):
inference = "datetime64[ns]"
vals = np.asarray(vals).astype(float)
return vals, inference
def post_processor(vals: np.ndarray, inference: Optional[Type]) -> np.ndarray:
if inference:
# Check for edge case
if not (
is_integer_dtype(inference)
and interpolation in {"linear", "midpoint"}
):
vals = vals.astype(inference)
return vals
if is_scalar(q):
return self._get_cythonized_result(
"group_quantile",
aggregate=True,
numeric_only=False,
needs_values=True,
needs_mask=True,
cython_dtype=np.dtype(np.float64),
pre_processing=pre_processor,
post_processing=post_processor,
q=q,
interpolation=interpolation,
)
else:
results = [
self._get_cythonized_result(
"group_quantile",
aggregate=True,
needs_values=True,
needs_mask=True,
cython_dtype=np.dtype(np.float64),
pre_processing=pre_processor,
post_processing=post_processor,
q=qi,
interpolation=interpolation,
)
for qi in q
]
result = concat(results, axis=0, keys=q)
# fix levels to place quantiles on the inside
# TODO(GH-10710): Ideally, we could write this as
# >>> result.stack(0).loc[pd.IndexSlice[:, ..., q], :]
# but this hits https://github.com/pandas-dev/pandas/issues/10710
# which doesn't reorder the list-like `q` on the inner level.
order = list(range(1, result.index.nlevels)) + [0]
# temporarily saves the index names
index_names = np.array(result.index.names)
# set index names to positions to avoid confusion
result.index.names = np.arange(len(index_names))
# place quantiles on the inside
result = result.reorder_levels(order)
# restore the index names in order
result.index.names = index_names[order]
# reorder rows to keep things sorted
indices = np.arange(len(result)).reshape([len(q), self.ngroups]).T.flatten()
return result.take(indices)
@Substitution(name="groupby")
def ngroup(self, ascending: bool = True):
"""
Number each group from 0 to the number of groups - 1.
This is the enumerative complement of cumcount. Note that the
numbers given to the groups match the order in which the groups
would be seen when iterating over the groupby object, not the
order they are first observed.
Parameters
----------
ascending : bool, default True
If False, number in reverse, from number of group - 1 to 0.
Returns
-------
Series
Unique numbers for each group.
See Also
--------
.cumcount : Number the rows in each group.
Examples
--------
>>> df = pd.DataFrame({"A": list("aaabba")})
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').ngroup()
0 0
1 0
2 0
3 1
4 1
5 0
dtype: int64
>>> df.groupby('A').ngroup(ascending=False)
0 1
1 1
2 1
3 0
4 0
5 1
dtype: int64
>>> df.groupby(["A", [1,1,2,3,2,1]]).ngroup()
0 0
1 0
2 1
3 3
4 2
5 0
dtype: int64
"""
with group_selection_context(self):
index = self._selected_obj.index
result = self._obj_1d_constructor(self.grouper.group_info[0], index)
if not ascending:
result = self.ngroups - 1 - result
return result
@Substitution(name="groupby")
def cumcount(self, ascending: bool = True):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
.. code-block:: python
self.apply(lambda x: pd.Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Returns
-------
Series
Sequence number of each element within each group.
See Also
--------
.ngroup : Number the groups themselves.
Examples
--------
>>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
"""
with group_selection_context(self):
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return self._obj_1d_constructor(cumcounts, index)
@Substitution(name="groupby")
@Appender(_common_see_also)
def rank(
self,
method: str = "average",
ascending: bool = True,
na_option: str = "keep",
pct: bool = False,
axis: int = 0,
):
"""
Provide the rank of values within each group.
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
* average: average rank of group.
* min: lowest rank in group.
* max: highest rank in group.
* first: ranks assigned in order they appear in the array.
* dense: like 'min', but rank always increases by 1 between groups.
ascending : bool, default True
False for ranks by high (1) to low (N).
na_option : {'keep', 'top', 'bottom'}, default 'keep'
* keep: leave NA values where they are.
* top: smallest rank if ascending.
* bottom: smallest rank if descending.
pct : bool, default False
Compute percentage rank of data within each group.
axis : int, default 0
The axis of the object over which to compute the rank.
Returns
-------
DataFrame with ranking of values within each group
"""
if na_option not in {"keep", "top", "bottom"}:
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
raise ValueError(msg)
return self._cython_transform(
"rank",
numeric_only=False,
ties_method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
axis=axis,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def cumprod(self, axis=0, *args, **kwargs):
"""
Cumulative product for each group.
Returns
-------
Series or DataFrame
"""
nv.validate_groupby_func("cumprod", args, kwargs, ["numeric_only", "skipna"])
if axis != 0:
return self.apply(lambda x: x.cumprod(axis=axis, **kwargs))
return self._cython_transform("cumprod", **kwargs)
@Substitution(name="groupby")
@Appender(_common_see_also)
def cumsum(self, axis=0, *args, **kwargs):
"""
Cumulative sum for each group.
Returns
-------
Series or DataFrame
"""
nv.validate_groupby_func("cumsum", args, kwargs, ["numeric_only", "skipna"])
if axis != 0:
return self.apply(lambda x: x.cumsum(axis=axis, **kwargs))
return self._cython_transform("cumsum", **kwargs)
@Substitution(name="groupby")
@Appender(_common_see_also)
def cummin(self, axis=0, **kwargs):
"""
Cumulative min for each group.
Returns
-------
Series or DataFrame
"""
if axis != 0:
return self.apply(lambda x: np.minimum.accumulate(x, axis))
return self._cython_transform("cummin", numeric_only=False)
@Substitution(name="groupby")
@Appender(_common_see_also)
def cummax(self, axis=0, **kwargs):
"""
Cumulative max for each group.
Returns
-------
Series or DataFrame
"""
if axis != 0:
return self.apply(lambda x: np.maximum.accumulate(x, axis))
return self._cython_transform("cummax", numeric_only=False)
def _get_cythonized_result(
self,
how: str,
cython_dtype: np.dtype,
aggregate: bool = False,
numeric_only: bool = True,
needs_counts: bool = False,
needs_values: bool = False,
needs_2d: bool = False,
min_count: Optional[int] = None,
needs_mask: bool = False,
needs_ngroups: bool = False,
result_is_index: bool = False,
pre_processing=None,
post_processing=None,
**kwargs,
):
"""
Get result for Cythonized functions.
Parameters
----------
how : str, Cythonized function name to be called
cython_dtype : np.dtype
Type of the array that will be modified by the Cython call.
aggregate : bool, default False
Whether the result should be aggregated to match the number of
groups
numeric_only : bool, default True
Whether only numeric datatypes should be computed
needs_counts : bool, default False
Whether the counts should be a part of the Cython call
needs_values : bool, default False
Whether the values should be a part of the Cython call
signature
needs_2d : bool, default False
Whether the values and result of the Cython call signature
are 2-dimensional.
min_count : int, default None
When not None, min_count for the Cython call
needs_mask : bool, default False
Whether boolean mask needs to be part of the Cython call
signature
needs_ngroups : bool, default False
Whether number of groups is part of the Cython call signature
result_is_index : bool, default False
Whether the result of the Cython operation is an index of
values to be retrieved, instead of the actual values themselves
pre_processing : function, default None
Function to be applied to `values` prior to passing to Cython.
Function should return a tuple where the first element is the
values to be passed to Cython and the second element is an optional
type which the values should be converted to after being returned
by the Cython operation. This function is also responsible for
raising a TypeError if the values have an invalid type. Raises
if `needs_values` is False.
post_processing : function, default None
Function to be applied to result of Cython function. Should accept
an array of values as the first argument and type inferences as its
second argument, i.e. the signature should be
(ndarray, Type).
**kwargs : dict
Extra arguments to be passed back to Cython funcs
Returns
-------
`Series` or `DataFrame` with filled values
"""
if result_is_index and aggregate:
raise ValueError("'result_is_index' and 'aggregate' cannot both be True!")
if post_processing:
if not callable(post_processing):
raise ValueError("'post_processing' must be a callable!")
if pre_processing:
if not callable(pre_processing):
raise ValueError("'pre_processing' must be a callable!")
if not needs_values:
raise ValueError(
"Cannot use 'pre_processing' without specifying 'needs_values'!"
)
grouper = self.grouper
labels, _, ngroups = grouper.group_info
output: Dict[base.OutputKey, np.ndarray] = {}
base_func = getattr(libgroupby, how)
error_msg = ""
for idx, obj in enumerate(self._iterate_slices()):
name = obj.name
values = obj._values
if numeric_only and not is_numeric_dtype(values):
continue
if aggregate:
result_sz = ngroups
else:
result_sz = len(values)
result = np.zeros(result_sz, dtype=cython_dtype)
if needs_2d:
result = result.reshape((-1, 1))
func = partial(base_func, result)
inferences = None
if needs_counts:
counts = np.zeros(self.ngroups, dtype=np.int64)
func = partial(func, counts)
if needs_values:
vals = values
if pre_processing:
try:
vals, inferences = pre_processing(vals)
except TypeError as e:
error_msg = str(e)
continue
if needs_2d:
vals = vals.reshape((-1, 1))
vals = vals.astype(cython_dtype, copy=False)
func = partial(func, vals)
func = partial(func, labels)
if min_count is not None:
func = partial(func, min_count)
if needs_mask:
mask = isna(values).view(np.uint8)
func = partial(func, mask)
if needs_ngroups:
func = partial(func, ngroups)
func(**kwargs) # Call func to modify indexer values in place
if needs_2d:
result = result.reshape(-1)
if result_is_index:
result = algorithms.take_nd(values, result)
if post_processing:
result = post_processing(result, inferences)
key = base.OutputKey(label=name, position=idx)
output[key] = result
# error_msg is "" on an frame/series with no rows or columns
if len(output) == 0 and error_msg != "":
raise TypeError(error_msg)
if aggregate:
return self._wrap_aggregated_output(output, index=self.grouper.result_index)
else:
return self._wrap_transformed_output(output)
@Substitution(name="groupby")
def shift(self, periods=1, freq=None, axis=0, fill_value=None):
"""
Shift each group by periods observations.
If freq is passed, the index will be increased using the periods and the freq.
Parameters
----------
periods : int, default 1
Number of periods to shift.
freq : str, optional
Frequency string.
axis : axis to shift, default 0
Shift direction.
fill_value : optional
The scalar value to use for newly introduced missing values.
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Object shifted within each group.
See Also
--------
Index.shift : Shift values of Index.
tshift : Shift the time index, using the index’s frequency
if available.
"""
if freq is not None or axis != 0 or not isna(fill_value):
return self.apply(lambda x: x.shift(periods, freq, axis, fill_value))
return self._get_cythonized_result(
"group_shift_indexer",
numeric_only=False,
cython_dtype=np.dtype(np.int64),
needs_ngroups=True,
result_is_index=True,
periods=periods,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None, axis=0):
"""
Calculate pct_change of each value to previous entry in group.
Returns
-------
Series or DataFrame
Percentage changes within each group.
"""
if freq is not None or axis != 0:
return self.apply(
lambda x: x.pct_change(
periods=periods,
fill_method=fill_method,
limit=limit,
freq=freq,
axis=axis,
)
)
if fill_method is None: # GH30463
fill_method = "pad"
limit = 0
filled = getattr(self, fill_method)(limit=limit)
fill_grp = filled.groupby(self.grouper.codes)
shifted = fill_grp.shift(periods=periods, freq=freq)
return (filled / shifted) - 1
@Substitution(name="groupby")
@Substitution(see_also=_common_see_also)
def head(self, n=5):
"""
Return first n rows of each group.
Similar to ``.apply(lambda x: x.head(n))``, but it returns a subset of rows
from the original DataFrame with original index and order preserved
(``as_index`` flag is ignored).
Does not work for negative values of `n`.
Returns
-------
Series or DataFrame
%(see_also)s
Examples
--------
>>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]],
... columns=['A', 'B'])
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(-1)
Empty DataFrame
Columns: [A, B]
Index: []
"""
self._reset_group_selection()
mask = self._cumcount_array() < n
return self._selected_obj[mask]
@Substitution(name="groupby")
@Substitution(see_also=_common_see_also)
def tail(self, n=5):
"""
Return last n rows of each group.
Similar to ``.apply(lambda x: x.tail(n))``, but it returns a subset of rows
from the original DataFrame with original index and order preserved
(``as_index`` flag is ignored).
Does not work for negative values of `n`.
Returns
-------
Series or DataFrame
%(see_also)s
Examples
--------
>>> df = pd.DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]],
... columns=['A', 'B'])
>>> df.groupby('A').tail(1)
A B
1 a 2
3 b 2
>>> df.groupby('A').tail(-1)
Empty DataFrame
Columns: [A, B]
Index: []
"""
self._reset_group_selection()
mask = self._cumcount_array(ascending=False) < n
return self._selected_obj[mask]
def _reindex_output(
self, output: OutputFrameOrSeries, fill_value: Scalar = np.NaN
) -> OutputFrameOrSeries:
"""
If we have categorical groupers, then we might want to make sure that
we have a fully re-indexed output to the levels. This means expanding
the output space to accommodate all values in the cartesian product of
our groups, regardless of whether they were observed in the data or
not. This will expand the output space if there are missing groups.
The method returns early without modifying the input if the number of
groupings is less than 2, self.observed == True or none of the groupers
are categorical.
Parameters
----------
output : Series or DataFrame
Object resulting from grouping and applying an operation.
fill_value : scalar, default np.NaN
Value to use for unobserved categories if self.observed is False.
Returns
-------
Series or DataFrame
Object (potentially) re-indexed to include all possible groups.
"""
groupings = self.grouper.groupings
if groupings is None:
return output
elif len(groupings) == 1:
return output
# if we only care about the observed values
# we are done
elif self.observed:
return output
# reindexing only applies to a Categorical grouper
elif not any(
isinstance(ping.grouper, (Categorical, CategoricalIndex))
for ping in groupings
):
return output
levels_list = [ping.group_index for ping in groupings]
index, _ = MultiIndex.from_product(
levels_list, names=self.grouper.names
).sortlevel()
if self.as_index:
d = {
self.obj._get_axis_name(self.axis): index,
"copy": False,
"fill_value": fill_value,
}
return output.reindex(**d)
# GH 13204
# Here, the categorical in-axis groupers, which need to be fully
# expanded, are columns in `output`. An idea is to do:
# output = output.set_index(self.grouper.names)
# .reindex(index).reset_index()
# but special care has to be taken because of possible not-in-axis
# groupers.
# So, we manually select and drop the in-axis grouper columns,
# reindex `output`, and then reset the in-axis grouper columns.
# Select in-axis groupers
in_axis_grps = (
(i, ping.name) for (i, ping) in enumerate(groupings) if ping.in_axis
)
g_nums, g_names = zip(*in_axis_grps)
output = output.drop(labels=list(g_names), axis=1)
# Set a temp index and reindex (possibly expanding)
output = output.set_index(self.grouper.result_index).reindex(
index, copy=False, fill_value=fill_value
)
# Reset in-axis grouper columns
# (using level numbers `g_nums` because level names may not be unique)
output = output.reset_index(level=g_nums)
return output.reset_index(drop=True)
def sample(
self,
n: Optional[int] = None,
frac: Optional[float] = None,
replace: bool = False,
weights: Optional[Union[Sequence, Series]] = None,
random_state=None,
):
"""
Return a random sample of items from each group.
You can use `random_state` for reproducibility.
.. versionadded:: 1.1.0
Parameters
----------
n : int, optional
Number of items to return for each group. Cannot be used with
`frac` and must be no larger than the smallest group unless
`replace` is True. Default is one if `frac` is None.
frac : float, optional
Fraction of items to return. Cannot be used with `n`.
replace : bool, default False
Allow or disallow sampling of the same row more than once.
weights : list-like, optional
Default None results in equal probability weighting.
If passed a list-like then values must have the same length as
the underlying DataFrame or Series object and will be used as
sampling probabilities after normalization within each group.
Values must be non-negative with at least one positive element
within each group.
random_state : int, array-like, BitGenerator, np.random.RandomState, optional
If int, array-like, or BitGenerator (NumPy>=1.17), seed for
random number generator
If np.random.RandomState, use as numpy RandomState object.
Returns
-------
Series or DataFrame
A new object of same type as caller containing items randomly
sampled within each group from the caller object.
See Also
--------
DataFrame.sample: Generate random samples from a DataFrame object.
numpy.random.choice: Generate a random sample from a given 1-D numpy
array.
Examples
--------
>>> df = pd.DataFrame(
... {"a": ["red"] * 2 + ["blue"] * 2 + ["black"] * 2, "b": range(6)}
... )
>>> df
a b
0 red 0
1 red 1
2 blue 2
3 blue 3
4 black 4
5 black 5
Select one row at random for each distinct value in column a. The
`random_state` argument can be used to guarantee reproducibility:
>>> df.groupby("a").sample(n=1, random_state=1)
a b
4 black 4
2 blue 2
1 red 1
Set `frac` to sample fixed proportions rather than counts:
>>> df.groupby("a")["b"].sample(frac=0.5, random_state=2)
5 5
2 2
0 0
Name: b, dtype: int64
Control sample probabilities within groups by setting weights:
>>> df.groupby("a").sample(
... n=1,
... weights=[1, 1, 1, 0, 0, 1],
... random_state=1,
... )
a b
5 black 5
2 blue 2
0 red 0
"""
from pandas.core.reshape.concat import concat
if weights is not None:
weights = Series(weights, index=self._selected_obj.index)
ws = [weights[idx] for idx in self.indices.values()]
else:
ws = [None] * self.ngroups
if random_state is not None:
random_state = com.random_state(random_state)
samples = [
obj.sample(
n=n, frac=frac, replace=replace, weights=w, random_state=random_state
)
for (_, obj), w in zip(self, ws)
]
return concat(samples, axis=self.axis)
@doc(GroupBy)
def get_groupby(
obj: NDFrame,
by: Optional[_KeysArgType] = None,
axis: int = 0,
level=None,
grouper: "Optional[ops.BaseGrouper]" = None,
exclusions=None,
selection=None,
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
squeeze: bool = False,
observed: bool = False,
mutated: bool = False,
dropna: bool = True,
) -> GroupBy:
klass: Type[GroupBy]
if isinstance(obj, Series):
from pandas.core.groupby.generic import SeriesGroupBy
klass = SeriesGroupBy
elif isinstance(obj, DataFrame):
from pandas.core.groupby.generic import DataFrameGroupBy
klass = DataFrameGroupBy
else:
raise TypeError(f"invalid type: {obj}")
return klass(
obj=obj,
keys=by,
axis=axis,
level=level,
grouper=grouper,
exclusions=exclusions,
selection=selection,
as_index=as_index,
sort=sort,
group_keys=group_keys,
squeeze=squeeze,
observed=observed,
mutated=mutated,
dropna=dropna,
)
| bsd-3-clause |
xiaoxiamii/scikit-learn | examples/bicluster/plot_spectral_coclustering.py | 276 | 1736 | """
==============================================
A demo of the Spectral Co-Clustering algorithm
==============================================
This example demonstrates how to generate a dataset and bicluster it
using the the Spectral Co-Clustering algorithm.
The dataset is generated using the ``make_biclusters`` function, which
creates a matrix of small values and implants bicluster with large
values. The rows and columns are then shuffled and passed to the
Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to
make biclusters contiguous shows how accurately the algorithm found
the biclusters.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_biclusters
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(
shape=(300, 300), n_clusters=5, noise=5,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.3f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.show()
| bsd-3-clause |
HrWangChengdu/CS231n | assignment1/cs231n/features.py | 30 | 4807 | import matplotlib
import numpy as np
from scipy.ndimage import uniform_filter
def extract_features(imgs, feature_fns, verbose=False):
"""
Given pixel data for images and several feature functions that can operate on
single images, apply all feature functions to all images, concatenating the
feature vectors for each image and storing the features for all images in
a single matrix.
Inputs:
- imgs: N x H X W X C array of pixel data for N images.
- feature_fns: List of k feature functions. The ith feature function should
take as input an H x W x D array and return a (one-dimensional) array of
length F_i.
- verbose: Boolean; if true, print progress.
Returns:
An array of shape (N, F_1 + ... + F_k) where each column is the concatenation
of all features for a single image.
"""
num_images = imgs.shape[0]
if num_images == 0:
return np.array([])
# Use the first image to determine feature dimensions
feature_dims = []
first_image_features = []
for feature_fn in feature_fns:
feats = feature_fn(imgs[0].squeeze())
assert len(feats.shape) == 1, 'Feature functions must be one-dimensional'
feature_dims.append(feats.size)
first_image_features.append(feats)
# Now that we know the dimensions of the features, we can allocate a single
# big array to store all features as columns.
total_feature_dim = sum(feature_dims)
imgs_features = np.zeros((num_images, total_feature_dim))
imgs_features[0] = np.hstack(first_image_features).T
# Extract features for the rest of the images.
for i in xrange(1, num_images):
idx = 0
for feature_fn, feature_dim in zip(feature_fns, feature_dims):
next_idx = idx + feature_dim
imgs_features[i, idx:next_idx] = feature_fn(imgs[i].squeeze())
idx = next_idx
if verbose and i % 1000 == 0:
print 'Done extracting features for %d / %d images' % (i, num_images)
return imgs_features
def rgb2gray(rgb):
"""Convert RGB image to grayscale
Parameters:
rgb : RGB image
Returns:
gray : grayscale image
"""
return np.dot(rgb[...,:3], [0.299, 0.587, 0.144])
def hog_feature(im):
"""Compute Histogram of Gradient (HOG) feature for an image
Modified from skimage.feature.hog
http://pydoc.net/Python/scikits-image/0.4.2/skimage.feature.hog
Reference:
Histograms of Oriented Gradients for Human Detection
Navneet Dalal and Bill Triggs, CVPR 2005
Parameters:
im : an input grayscale or rgb image
Returns:
feat: Histogram of Gradient (HOG) feature
"""
# convert rgb to grayscale if needed
if im.ndim == 3:
image = rgb2gray(im)
else:
image = np.at_least_2d(im)
sx, sy = image.shape # image size
orientations = 9 # number of gradient bins
cx, cy = (8, 8) # pixels per cell
gx = np.zeros(image.shape)
gy = np.zeros(image.shape)
gx[:, :-1] = np.diff(image, n=1, axis=1) # compute gradient on x-direction
gy[:-1, :] = np.diff(image, n=1, axis=0) # compute gradient on y-direction
grad_mag = np.sqrt(gx ** 2 + gy ** 2) # gradient magnitude
grad_ori = np.arctan2(gy, (gx + 1e-15)) * (180 / np.pi) + 90 # gradient orientation
n_cellsx = int(np.floor(sx / cx)) # number of cells in x
n_cellsy = int(np.floor(sy / cy)) # number of cells in y
# compute orientations integral images
orientation_histogram = np.zeros((n_cellsx, n_cellsy, orientations))
for i in range(orientations):
# create new integral image for this orientation
# isolate orientations in this range
temp_ori = np.where(grad_ori < 180 / orientations * (i + 1),
grad_ori, 0)
temp_ori = np.where(grad_ori >= 180 / orientations * i,
temp_ori, 0)
# select magnitudes for those orientations
cond2 = temp_ori > 0
temp_mag = np.where(cond2, grad_mag, 0)
orientation_histogram[:,:,i] = uniform_filter(temp_mag, size=(cx, cy))[cx/2::cx, cy/2::cy].T
return orientation_histogram.ravel()
def color_histogram_hsv(im, nbin=10, xmin=0, xmax=255, normalized=True):
"""
Compute color histogram for an image using hue.
Inputs:
- im: H x W x C array of pixel data for an RGB image.
- nbin: Number of histogram bins. (default: 10)
- xmin: Minimum pixel value (default: 0)
- xmax: Maximum pixel value (default: 255)
- normalized: Whether to normalize the histogram (default: True)
Returns:
1D vector of length nbin giving the color histogram over the hue of the
input image.
"""
ndim = im.ndim
bins = np.linspace(xmin, xmax, nbin+1)
hsv = matplotlib.colors.rgb_to_hsv(im/xmax) * xmax
imhist, bin_edges = np.histogram(hsv[:,:,0], bins=bins, density=normalized)
imhist = imhist * np.diff(bin_edges)
# return histogram
return imhist
pass
| mit |
USStateDept/FPA_Core | openspending/lib/apihelper.py | 2 | 26540 | import logging
import urlparse
from dateutil import parser
# import pandas as pd
# import numpy as np
from flask import current_app,request, Response
from openspending.core import db
from openspending.lib.helpers import get_dataset
from openspending.lib.cubes_util import get_cubes_breaks
log = logging.getLogger(__name__)
from sqlalchemy import Table, MetaData, Column, Integer, String, ForeignKey
from sqlalchemy.orm import mapper
from sqlalchemy.sql.expression import func
from sqlalchemy.sql import table, column, select
from openspending.lib.helpers import get_dataset
from openspending.lib.jsonexport import to_json
from collections import namedtuple
import json
import csv
import codecs
import decimal
import datetime
import xlsxwriter
import os
from StringIO import StringIO
from tempfile import NamedTemporaryFile
# CREATE INDEX geogid
# ON geometry__time USING btree (gid ASC NULLS LAST);
# http://stackoverflow.com/questions/11401749/pass-in-where-parameters-to-postgresql-view
# create or replace function label_params(parm1 text, parm2 text)
# returns table (param_label text, param_graphics_label text)
# as
# $body$
# select ...
# WHERE region_label = $1
# AND model_id = (SELECT model_id FROM models WHERE model_label = $2)
# ....
# $body$
# language sql;
# Then you can do:
# select *
# from label_params('foo', 'bar')
# http://localhost:5000/api/slicer/cube/geometry/cubes_aggregate?&
# cluster=jenks&
# numclusters=4&
# cubes=hyogo_framework_for_action_hfa&
# cut=geometry__time:1990-2015&
# order=time&
# drilldown=geometry__country_level0@name|geometry__time&
# cut=geometry__country_level0@name:afghanistan;angola;armenia
# http://find.state.gov/api/slicer/cube/geometry/cubes_aggregate?&
# cluster=jenks&
# numclusters=4&
# cubes=access_to_credit_strength_of_l|hyogo_framework_for_action_hfa&
# cut=geometry__time:1990-2015&
# order=time
# &drilldown=geometry__country_level0@name|geometry__time
# &cut=geometry__country_level0@name:albania;argentina;australia;azerbaijan
# #browser parameters
# # - cubes (attribute, many)
# - e.g. cubes=cubes1_code|cubes2_code
# - default return error it is required
# # - daterange (attribute, range)
# - e.g. daterange=2010-2014 or in future implementions date=yesterday-now
# - default return 1990-current year
# # - format (attribute, one)
# - e.g. format=json
# - default json
# - options json, csv, excel
# # - drilldown (drilldown, one)
# - e.g. drilldown=geometry__country_level0@name|geometry__time
# - default aggregate all
# - options geometry__country_leve0@see columns of regions, geometry__time,
# # - cut (dates, data values, countries)
# - e.g. cut=geometry__country_level0@name:somecountrycode1;somecountrycode2
# - default to return all
# # - agg (display using an order chain of the data
# - e.g. agg=geometry__country_level0@{columnname}|geometry__time
# - returns dict structure of {
# 'geometry__country_level0':{
# 'geometry__time': {
# 'result'
# }
# } }
# # - orderby (attribute, one)
# - e.g. orderby=geometry__country_level0@{columnname}
def parse_date(datestring):
try:
return parser.parse(datestring)
except Exception, e:
log.warn("Could not parse %s"%datestring)
return None
FORMATOPTS = {'json', 'csv', 'excel', 'xls', 'geojson'}
RETURNLIMIT= 10000
DEFAULTDRILLDOWN = {"geometry__country_level0":"sovereignt","geometry__time":"time"}
class DataBrowser(object):
"""
input is query string
"""
def __init__(self):
self.params = {}
self.cubes = []
self.daterange= {"start":None,"end":None}
self.format='json'
self.agg = {}
self.drilldown = {}
self.cut={}
self.nulls=True
self.dataframe = None
self.geomtables = ['geometry__time', 'geometry__country_level0']
self.cubes_tables = []
self.t = {}
self.cachedresult = None
self._parse_params()
self._map_tables()
self.drilldowntables = []
self._finish_query()
def _finish_query(self):
if self.drilldown:
self._drilldowns()
self.selectable = select(self.selects).select_from(self.joins)
for table_name, dds in self.drilldown.iteritems():
for dd in dds:
self.drilldowntables.append(table_name + "." + dd)
self.selectable = self.selectable.group_by(self.t[table_name].c[dd])
else:
self.selectable = select(self.selects).select_from(self.joins)
for table_name, cols in self.cut.iteritems():
for colname, values in cols.iteritems():
self.selectable = self.selectable.where(self.t[table_name].c[colname].in_(values))
if self.daterange['start']:
self.selectable = self.selectable.where(self.t["geometry__time"].c["time"] >= self.daterange['start'])
if self.daterange['end']:
self.selectable = self.selectable.where(self.t["geometry__time"].c["time"] <= self.daterange['end'])
if not self.nulls:
for cube_tb in self.cubes_tables:
self.selectable = self.selectable.where(self.t[cube_tb].c["amount"] != None)
#completed the selects, now doing the wheres and groupby
def _drilldowns(self):
#make sure column exists
for tablename, drilldowns in self.drilldown.iteritems():
for dd in drilldowns:
if tablename == "geometry__country_level0":
self.selects.append(self.t[tablename].c[dd].label("geo__%s"%dd))
else:
self.selects.append(self.t[tablename].c[dd])
#group_by(t['geometry__country_level0'].c['dos_region'])
def _map_tables(self):
self.metadata = MetaData()
reflectiontables = self.geomtables + self.cubes_tables
self.metadata.reflect(db.engine, only=reflectiontables)
self.t = {z.name:z for z in self.metadata.sorted_tables}
#apply joins
self.joins = self.t["geometry__time"].join(self.t['geometry__country_level0'],self.t['geometry__country_level0'].c.gid==self.t['geometry__time'].c.gid)
callables = {"__max":func.max, "__min": func.min, "__avg":func.avg, "__sum":func.sum}
self.selects = [func.count(self.t['geometry__time'].c.id).label("count")]
for cubes_ts in self.cubes_tables:
self.joins = self.joins.outerjoin(self.t[cubes_ts], \
self.t[cubes_ts].c.geom_time_id==self.t['geometry__time'].c.id)
for lab, caller in callables.iteritems():
self.selects.append(caller(self.t[cubes_ts].c.amount).label(cubes_ts.split("__")[0] + lab))
def _getcache(self):
if self.cachedresult:
return self.cachedresult
else:
self.cachedresult = [x for x in self._execute_query_iterator()]
return self.cachedresult
def _execute_query_iterator(self):
results = db.session.execute(self.selectable.order_by(self.t['geometry__time'].c['time']))
for u in results.fetchall():
yield dict(u)
def get_clusters(self, resultsdict, field=None):
if not field:
field = self.cubes_tables[0].split("__entry")[0] + "__avg"
return get_cubes_breaks(resultsdict, field, method=self.clusterparams['cluster'], k=self.clusterparams['clusternum'])
def get_json_result(self):
results = self._getcache()
resultmodel = {
"cells": results
}
tempmodel = {}
for dataset in self.cubes:
tempmodel[dataset] = get_dataset(dataset).detailed_dict()
resultmodel['models'] = tempmodel
resultmodel['attributes'] = self.drilldowntables
if self.clusterparams['cluster']:
resultmodel['cluster'] = self.get_clusters(resultmodel['cells'])
resp = Response(response=to_json(resultmodel),
status=200, \
mimetype="application/json")
return resp
def get_csv(self):
results = self._getcache()
# def csv_generator_p2(records, fields, include_header=True, header=None,
# dialect=csv.excel):
outputstream = StringIO()
def _row_string(row):
writer.writerow(row)
# Fetch UTF-8 output from the queue ...
data = queue.getvalue()
if not isinstance(data, unicode):
data = data.decode('utf-8')
# ... and reencode it into the target encoding
data = encoder.encode(data)
# empty queue
queue.truncate(0)
return data
queue = StringIO()
writer = csv.writer(queue, dialect=csv.excel)
encoder = codecs.getincrementalencoder("utf-8")()
if len(results) > 0:
fields = results[0].keys()
else:
log.warn("There is nothing to resturn")
raise
return
outputstream.write(_row_string(fields))
for record in results:
row = []
for field in fields:
value = record.get(field)
if isinstance(value, unicode):
row.append(value.encode("utf-8"))
elif value is not None:
row.append(value)
else:
row.append(None)
outputstream.write(_row_string(row))
headers = {"Content-Disposition": 'attachment; filename="' + self.cubes[0] + '.csv"'}
return Response(outputstream.getvalue(),
mimetype='text/csv',
headers=headers)
def get_xls(self):
results = self._getcache()
#def xls_generator_p2(records, fields, include_header=True, header=None):
def _value_converter(data):
data = unicode(data)
# ... and reencode it into the target encoding
#data = encoder.encode(data)
return data
#outputfile = NamedTemporaryFile(delete=False, dir=FILE_UPLOAD_TEMP_DIR)
#might need temporary file
#encoder = codecs.getincrementalencoder("utf-8")()
outputfile = NamedTemporaryFile(delete=False)
workbook = xlsxwriter.Workbook(outputfile.name)
worksheet = workbook.add_worksheet('resultset')
row = 0
if len(results) > 0:
fields = results[0].keys()
head_column = 0
for head in fields:
worksheet.write(row, head_column, _value_converter(head))
head_column +=1
row = 1
for record in results:
column = 0
for field in fields:
value = record.get(field)
if isinstance(value, unicode):
worksheet.write(row, column, _value_converter(value))
elif value is not None:
worksheet.write(row, column, value)
else:
worksheet.write(row, column, None)
column +=1
row +=1
workbook.close()
namedfile = outputfile.name
outputfile.close()
outputstream = ""
with open(namedfile, 'rb') as f:
outputstream = f.read()
os.remove(namedfile)
headers = {"Content-Disposition": 'attachment; filename="' + self.cubes[0] + '.xlsx"'}
return Response(outputstream,
mimetype="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
headers=headers)
def get_response(self):
if self.format.lower() in ['xls', 'excel']:
return self.get_xls()
elif self.format.lower() in ['csv']:
return self.get_csv()
else:
return self.get_json_result()
def _parse_params(self):
cubes_arg = request.args.get("cubes", None)
try:
self.cubes = cubes_arg.split("|")
self.cubes_tables = ["%s__entry"%c for c in self.cubes]
except:
raise RequestError("Parameter cubes with value '%s'should be a valid cube names separated by a '|'"
% (cubes_arg) )
if len (self.cubes) > 5:
raise RequestError("You can only join 5 cubes together at one time")
#parse the date
dateparam = request.args.get("daterange", None)
if dateparam:
datesplit = dateparam.split("-")
if len(datesplit) == 1:
self.daterange['start'] = parse_date(datesplit[0]).year
#use this value to do a since date
elif len(datesplit) == 2:
self.daterange['start'] = parse_date(datesplit[0]).year
if self.daterange['start']:
self.daterange['end'] = parse_date(datesplit[1]).year
#parse format
tempformat = request.args.get("format", None)
if tempformat:
if tempformat.lower() not in FORMATOPTS:
log.warn("Could not find format %s"%tempformat)
else:
self.format = tempformat.lower()
else:
self.format = 'json'
#parse cut
#cut=geometry__country_level0@name:albania;argentina;australia;azerbaijan
tempcuts = request.args.get("cut", None)
if tempcuts:
cutsplit = tempcuts.split("|")
for tempcut in cutsplit:
basenamesplit = tempcut.split(":")
name = basenamesplit[0]
values = basenamesplit[1].split(';')
cutter = name.split("@")
if len(cutter) > 1:
if self.cut.get(cutter[0]):
self.cut[cutter[0]][cutter[1]] = values
else:
self.cut[cutter[0]] = {cutter[1]:values}
else:
if self.cut.get(cutter[0]):
self.cut[cutter[0]][DEFAULTDRILLDOWN.get(cutter[0])] = values
else:
self.cut[cutter[0]] = {DEFAULTDRILLDOWN.get(cutter[0]):values}
# tempagg = request.args.get("agg", None)
# if tempagg:
# aggsplit = tempagg.split("|")
# for tempitem in aggsplit:
# pass
tempdrilldown = request.args.get("drilldown", None)
if tempdrilldown:
drilldownsplit = tempdrilldown.split("|")
for tempdrill in drilldownsplit:
dd = tempdrill.split("@")
if len(dd) > 1:
if self.drilldown.get(dd[0]):
self.drilldown[dd[0]].append(dd[1])
else:
self.drilldown[dd[0]] = [dd[1]]
else:
if self.drilldown.get(dd[0]):
self.drilldown[dd[0]].append(DEFAULTDRILLDOWN.get(dd[0]))
else:
self.drilldown[dd[0]] = [DEFAULTDRILLDOWN.get(dd[0])]
self.nulls = request.args.get("nulls", False)
self.clusterparams = {
"cluster": request.args.get("cluster", None),
"clusternum": request.args.get("clusternum",5)
}
# http://localhost:5000/api/slicer/cube/geometry/cubes_aggregate?&cluster=jenks&
# numclusters=4&cubes=under_five_mortality_rate_u5mr&
# order=time&drilldown=geometry__country_level0@dos_region|geometry__time
class DataBrowser_v4(DataBrowser):
"""
input is query string
"""
def __init__(self):
self.params = {}
self.cubes = []
self.daterange= {"start":None,"end":None}
self.format='json'
self.agg = {}
self.drilldown = {}
self.cut={}
self.nulls=True
self.dataframe = None
self.joins = None
self.cubes_tables = []
self.t = {}
self.cachedresult = None
self._parse_params()
self._map_tables()
self.drilldowntables = []
self._finish_query()
def _finish_query(self):
if self.drilldown:
self._drilldowns()
if len(self.cubes_tables) > 1:
self.selectable = select(self.selects).select_from(self.joins)
else:
self.selectable = select(self.selects)
for table_name, dds in self.drilldown.iteritems():
for dd in dds:
if table_name in ['geometry__country_level0', 'geometry__time']:
table_name = self.primary_table.name
self.drilldowntables.append(table_name + "." + dd)
self.selectable = self.selectable.group_by(self.t[table_name].c[dd])
else:
if len(self.cubes_tables) > 1:
self.selectable = select(self.selects).select_from(self.joins)
else:
self.selectable = select(self.selects)
for table_name, cols in self.cut.iteritems():
for colname, values in cols.iteritems():
if table_name in ['geometry__country_level0', 'geometry__time']:
table_name = self.primary_table.name
self.selectable = self.selectable.where(self.t[table_name].c[colname].in_(values))
if self.daterange['start']:
self.selectable = self.selectable.where(self.primary_table.c["time"] >= self.daterange['start'])
if self.daterange['end']:
self.selectable = self.selectable.where(self.primary_table.c["time"] <= self.daterange['end'])
if not self.nulls:
for cube_tb in self.cubes_tables:
self.selectable = self.selectable.where(self.t[cube_tb].c["amount"] != None)
#completed the selects, now doing the wheres and groupby
def _drilldowns(self):
#make sure column exists
for tablename, drilldowns in self.drilldown.iteritems():
for dd in drilldowns:
if tablename == "geometry__country_level0":
self.selects.append(self.primary_table.c[dd].label("geo__%s"%dd))
elif tablename == "geometry__time":
self.selects.append(self.primary_table.c['time'].label("time"))
else:
self.selects.append(self.t[tablename].c[dd])
#group_by(t['geometry__country_level0'].c['dos_region'])
def _map_tables(self):
self.metadata = MetaData()
self.metadata.reflect(db.engine, only=self.cubes_tables, schema="finddata")
self.t = {z.name:z for z in self.metadata.sorted_tables}
self.primary_table= self.t[self.cubes_tables[0]]
#apply joins
callables = {"__max":func.max, "__min": func.min, "__avg":func.avg, "__sum":func.sum}
self.selects = [func.count(self.primary_table.c['geom_time_id']).label("count")]
for cubes_ts in self.cubes_tables:
for lab, caller in callables.iteritems():
self.selects.append(caller(self.t[cubes_ts].c.amount).label(cubes_ts.strip("__denorm") + lab))
if cubes_ts != self.primary_table.name:
if isinstance(self.joins, type(None)):
self.joins = self.primary_table.outerjoin(self.t[cubes_ts], \
self.t[cubes_ts].c.geom_time_id==self.primary_table.c['geom_time_id'])
else:
self.joins = self.joins.outerjoin(self.t[cubes_ts], \
self.t[cubes_ts].c.geom_time_id==self.primary_table.c['geom_time_id'])
def _execute_query_iterator(self):
if "geometry__time" in self.drilldown.keys():
self.selectable = self.selectable.order_by(self.primary_table.c['time'])
results = db.session.execute(self.selectable)
for u in results.fetchall():
yield dict(u)
def get_clusters(self, resultsdict, field=None):
if not field:
field = self.cubes_tables[0].strip("__denorm") + "__avg"
return get_cubes_breaks(resultsdict, field, method=self.clusterparams['cluster'], k=self.clusterparams['clusternum'])
def _parse_params(self):
cubes_arg = request.args.get("cubes", None)
try:
self.cubes = cubes_arg.split("|")
self.cubes_tables = ["%s__denorm"%c for c in self.cubes]
except:
raise RequestError("Parameter cubes with value '%s'should be a valid cube names separated by a '|'"
% (cubes_arg) )
if len (self.cubes) > 5:
raise RequestError("You can only join 5 cubes together at one time")
#parse the date
dateparam = request.args.get("daterange", None)
if dateparam:
datesplit = dateparam.split("-")
if len(datesplit) == 1:
self.daterange['start'] = parse_date(datesplit[0]).year
#use this value to do a since date
elif len(datesplit) == 2:
self.daterange['start'] = parse_date(datesplit[0]).year
if self.daterange['start']:
self.daterange['end'] = parse_date(datesplit[1]).year
#parse format
tempformat = request.args.get("format", None)
if tempformat:
if tempformat.lower() not in FORMATOPTS:
log.warn("Could not find format %s"%tempformat)
else:
self.format = tempformat.lower()
else:
self.format = 'json'
#parse cut
#cut=geometry__country_level0@name:albania;argentina;australia;azerbaijan
tempcuts = request.args.get("cut", None)
if tempcuts:
cutsplit = tempcuts.split("|")
for tempcut in cutsplit:
basenamesplit = tempcut.split(":")
name = basenamesplit[0]
values = basenamesplit[1].split(';')
cutter = name.split("@")
if len(cutter) > 1:
if self.cut.get(cutter[0]):
self.cut[cutter[0]][cutter[1]] = values
else:
self.cut[cutter[0]] = {cutter[1]:values}
else:
if self.cut.get(cutter[0]):
self.cut[cutter[0]][DEFAULTDRILLDOWN.get(cutter[0])] = values
else:
self.cut[cutter[0]] = {DEFAULTDRILLDOWN.get(cutter[0]):values}
# tempagg = request.args.get("agg", None)
# if tempagg:
# aggsplit = tempagg.split("|")
# for tempitem in aggsplit:
# pass
tempdrilldown = request.args.get("drilldown", None)
if tempdrilldown:
drilldownsplit = tempdrilldown.split("|")
for tempdrill in drilldownsplit:
dd = tempdrill.split("@")
if len(dd) > 1:
if self.drilldown.get(dd[0]):
self.drilldown[dd[0]].append(dd[1])
else:
self.drilldown[dd[0]] = [dd[1]]
else:
if self.drilldown.get(dd[0]):
self.drilldown[dd[0]].append(DEFAULTDRILLDOWN.get(dd[0]))
else:
self.drilldown[dd[0]] = [DEFAULTDRILLDOWN.get(dd[0])]
self.nulls = request.args.get("nulls", False)
self.clusterparams = {
"cluster": request.args.get("cluster", None),
"clusternum": request.args.get("clusternum",5)
}
GEO_MAPPING = {"geometry__country_level0": {
"name": {
"name": "name",
"label": "Country Name"
},
"sovereignty": {
"name": "sovereignt",
"label": "Sovereignty"
},
"dos_region":{
"name": "dos_region",
"label": "Department of State Regions"
},
"usaid_reg": {
"name": "usaid_reg",
"label": "USAID Regions"
},
"dod_cmd": {
"name": "dod_cmd",
"label": "Department of Defense Regions"
},
"feed_the_future": {
"name": "feed_the_f",
"label": "Feed the Future Regions"
},
"pepfar": {
"name": "pepfar",
"label": "PEPFAR Regions"
},
"paf": {
"name": "paf",
"label": "PAF Regions"
},
"oecd": {
"name": "oecd",
"label": "OECD Regions"
},
"region_un":{
"name": "region_un",
"label": "United Nation Regions"
},
"subregion":{
"name": "subregion",
"label": "Subregions"
},
"region_wb": {
"name": "region_wb",
"label": "World Bank Regions"
},
"wb_inc_lvl":{
"name": "wb_inc_lvl",
"label": "World Bank Income Level Regions"
},
"continent":{
"name": "continent",
"label": "Continents"
}
}
} | agpl-3.0 |
stylianos-kampakis/scikit-learn | examples/svm/plot_custom_kernel.py | 171 | 1546 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
yejingfu/samples | tensorflow/gene_sample.py | 1 | 2963 | #!/usr/bin/env python3
#%matplotlib inline
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.pyplot as plt
plt.style.use('/Users/jeff/code/elegant-scipy/style/elegant.mplstyle')
def reduceXaxisLabels(ax, factor):
plt.setp(ax.xaxis.get_ticklabels(), visible = False)
for l in ax.xaxis.get_ticklabels()[factor-1::factor]:
l.set_visible(True)
filename = '/Users/jeff/code/elegant-scipy/data/counts.txt'
with open(filename, 'rt') as f:
dataTable = pd.read_csv(f, index_col = 0)
## Print first 6 rows + first 5 columns
print(dataTable.iloc[:6, :5])
## Get the column names, convert to list
samples = list(dataTable.columns)
filename = '/Users/jeff/code/elegant-scipy/data/genes.csv'
with open(filename, 'rt') as f:
geneInfo = pd.read_csv(f, index_col = 0)
## print first 5 rows:
## GeneSymbol GeneID GeneLength
print(geneInfo.iloc[:5, :])
print("genes in dataTable: ", dataTable.shape) ## (20500, 375)
print("genes in geneInfo: ", geneInfo.shape) ## (20503, 2)
## intersection with the index (the first column elements) -- it is the gene name
matchedIdx = pd.Index.intersection(dataTable.index, geneInfo.index)
## 2D array, where the dataTable index hits in the matchedIdx
counts = np.asarray(dataTable.loc[matchedIdx], dtype=int)
geneNames = np.asarray(matchedIdx)
print(f'{counts.shape[0]} genes measured in {counts.shape[1]} individuals')
## 1D array, get from geneInfo table column 'GeneLength'
geneLength = np.asarray(geneInfo.loc[matchedIdx]['GeneLength'], dtype = int)
print(counts.shape) ## (20500, 375)
print(geneLength.shape) ##(20500,)
## Column-axis: For every column, calculate the sum value of all row elements
## Means the sum-gene-value for 375 individuals
## axis=0: 对列进行加总,axis=1: 对行进行加总
totalCounts = np.sum(counts, axis=0)
print(totalCounts.shape) ## (375,)
## KDE: kernel density estimation, 核密度估计分布
density = stats.kde.gaussian_kde(totalCounts)
x = np.arange(min(totalCounts), max(totalCounts), 10000)
fig, ax = plt.subplots()
ax.plot(x, density(x))
ax.set_xlabel("total counts per individual")
ax.set_ylabel("density")
##plt.show()
print(f'Count statistics: \n min: {np.min(totalCounts)}'
f'\n mean: {np.mean(totalCounts)}'
f'\n max: {np.max(totalCounts)}')
##
np.random.seed(seed=7)
samplesIdx = np.random.choice(range(counts.shape[1]), size = 70, replace = False)
countsSubset = counts[:, samplesIdx]
print("countsSubset shape:", countsSubset.shape) ## (20500, 70)
countsNorm = counts / totalCounts * 1000000 ## 2D / 1D
countsNormSubset = countsNorm[:, samplesIdx]
fig, ax = plt.subplots(figsize=(4.8, 2.4))
with plt.style.context('/Users/jeff/code/elegant-scipy/style/thinner.mplstyle'):
#ax.boxplot(countsSubset)
#ax.boxplot(np.log(countsSubset + 1))
ax.boxplot(np.log(countsNormSubset + 1))
ax.set_xlabel("Individuals")
ax.set_ylabel("Gene expr counts")
reduceXaxisLabels(ax, 5)
plt.show()
| mit |
allthroughthenight/aces | python/drivers/wave_forces.py | 1 | 17366 | import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import sys
sys.path.append('../functions')
from base_driver import BaseDriver
from helper_objects import BaseField
from helper_objects import ComplexUtil
import USER_INPUT
from ERRSTP import ERRSTP
from ERRWAVBRK1 import ERRWAVBRK1
from ERRWAVBRK2 import ERRWAVBRK2
from WAVELEN import WAVELEN
from WFVW1 import WFVW1
from WFVW2 import WFVW2
from WFVW3 import WFVW3
from WFVW4 import WFVW4
from EXPORTER import EXPORTER
## ACES Update to python
#-------------------------------------------------------------
# Driver for Nonbreaking Wave Forces at Vertical Walls (page 4-3 of ACES
# User's Guide). Provides pressure distribution and resultant force and
# moment loading on a vertical wall caused by normally incident, nonbreaking,
# regular waves as proposed by Sainflou (1928), Miche (1944), and Rundgren
# (1958).
# Updated by: Mary Anderson, USACE-CHL-Coastal Processes Branch
# Date Created: May 17, 2011
# Date Verified: June 1, 2012
# Requires the following functions:
# ERRSTP
# ERRWAVBRK1
# ERRWAVBRK2
# WAVELEN
# WFVW1
# WFVW2
# WFVW3
# WFVW4
# MAIN VARIABLE LIST:
# INPUT
# d: depth for sea water level
# Hi: incident wave height
# T: wave period
# chi: wave reflection coefficient
# cotphi: cotangent of nearshore slope
# OUTPUT
# MR: array containing Miche-Rundgren integrated values
# (1) particle height above bottom at crest
# (2) integrated force at crest
# (3) integrated moment about base at crest
# (4) particle height above bottom at trough
# (5) integrate force at trough
# (6) integrated moment about bottom at trough
# S: array containing Sainflou integrated values
# MRintc: array containing Miche-Rundgren incremental values at crest
# (1) particle height
# (2) wave pressure
# (3) hydrostatic pressure
# (4) wave and hydrostatic pressure
# (5) moment
# MRintt: array containing Miche-Rundgren incremental values at trough
# Sintc: array containing Sainflou incremental values at crest
# Sintt: array containing Sainflou incremental values at trough
#-------------------------------------------------------------
class WaveForces(BaseDriver):
def __init__(self, d = None, Hi = None, T = None, chi = None, cotphi = None):
self.exporter = EXPORTER("output/exportWaveForces")
if d != None:
self.isSingleCase = True
self.defaultValue_d = d
if Hi != None:
self.isSingleCase = True
self.defaultValueHi = Hi
if T != None:
self.isSingleCase = True
self.defaultValueT = T
if chi != None:
self.isSingleCase = True
self.defaultValue_chi = chi
if cotphi != None:
self.isSingleCase = True
self.defaultValue_cotphi = cotphi
super(WaveForces, self).__init__()
self.exporter.close()
# end __init__
def userInput(self):
super(WaveForces, self).userInput()
self.water, self.rho = USER_INPUT.SALT_FRESH_WATER(self.isMetric)
# end userInput
def defineInputDataList(self):
self.inputList = []
if not hasattr(self, "defaultValue_d"):
self.inputList.append(BaseField("d: depth for sea water level (%s)" %\
self.labelUnitDist, 0.1, 200.0))
if not hasattr(self, "defaultValueHi"):
self.inputList.append(BaseField("Hi: incident wave height (%s)" %\
self.labelUnitDist, 0.1, 100.0))
if not hasattr(self, "defaultValueT"):
self.inputList.append(BaseField("T: wave period (s)", 1.0, 100.0))
if not hasattr(self, "defaultValue_chi"):
self.inputList.append(BaseField(\
"chi: wave reflection coefficient", 0.9, 1.0))
if not hasattr(self, "defaultValue_cotphi"):
self.inputList.append(BaseField(\
"cotphi: cotangent of nearshore slope", 5.0, 10000.0))
# end defineInputDataList
def fileOutputRequestInit(self):
self.fileOutputRequestMain(defaultFilename = "wave_forces")
def getCalcValues(self, caseInputList):
currIndex = 0
if hasattr(self, "defaultValue_d"):
d = self.defaultValue_d
else:
d = caseInputList[currIndex]
currIndex = currIndex + 1
if hasattr(self, "defaultValueHi"):
Hi = self.defaultValueHi
else:
Hi = caseInputList[currIndex]
currIndex = currIndex + 1
if hasattr(self, "defaultValueT"):
T = self.defaultValueT
else:
T = caseInputList[currIndex]
if hasattr(self, "defaultValue_chi"):
chi = self.defaultValue_chi
else:
chi = caseInputList[currIndex]
if hasattr(self, "defaultValue_cotphi"):
cotphi = self.defaultValue_cotphi
else:
cotphi = caseInputList[currIndex]
return d, Hi, T, chi, cotphi
# end getCalcValues
def performCalculations(self, caseInputList, caseIndex = 0):
d, Hi, T, chi, cotphi = self.getCalcValues(caseInputList)
dataDict = {"d": d, "Hi": Hi, "T": T, "chi": chi, "cotphi": cotphi}
H20weight = self.rho * self.g
m = 1.0 / cotphi
if np.isclose(m, 0.0):
Hbs = ERRWAVBRK1(d, 0.78)
else:
Hbs = ERRWAVBRK2(T, m, d)
if not (Hi < Hbs):
self.errorMsg = "Error: Wave broken at structure (Hbs = %6.2f %s)" %\
(Hbs, self.labelUnitDist)
print(self.errorMsg)
self.fileOutputWriteMain(dataDict, caseIndex)
return
L, k = WAVELEN(d, T, 50, self.g)
steep, maxstp = ERRSTP(Hi, d, L)
# assert(steep<maxstp,'Error: Input wave unstable (Max: %0.4f, [H/L] = %0.4f)',maxstp,steep')
if not ComplexUtil.lessThan(steep, maxstp):
self.errorMsg = "Error: Input wave unstable (Max: %0.4f, [H/L] = %0.4f)" %\
(maxstp.real, steep.real)
MR, S, MRintc, MRintt, Sintc, Sintt = WFVW1(d, Hi, chi, L, H20weight)
print('\n\t\t\t\t %s \t\t %s' % ('Miche-Rundgren','Sainflou'))
print("Wave Position at Wall\t\tCrest\t\tTrough\t\tCrest\t\tTrough\t\tUnits")
print("Hgt above bottom \t\t %-6.2f \t %6.2f \t %-6.2f \t %6.2f \t %s" %\
(MR[0].real, MR[3].real, S[0].real, S[3].real, self.labelUnitDist))
print("Integrated force \t\t %-6.2f \t %6.2f \t %-6.2f \t %6.2f \t %s/%s" %\
(MR[1].real, MR[4].real, S[1].real, S[4].real, self.labelUnitWt, self.labelUnitDist))
print("Integrated moment \t\t %-6.2f \t %6.2f \t %-6.2f \t %6.2f \t %s-%s/%s" %\
(MR[2].real, MR[5].real, S[2].real, S[5].real, self.labelUnitWt, self.labelUnitDist, self.labelUnitDist))
dataDict.update({"MR": MR, "S": S})
self.fileOutputWriteMain(dataDict, caseIndex)
if self.isSingleCase:
self.plotDict = {"MRintc": MRintc, "MRintt": MRintt,\
"Sintc": Sintc, "Sintt": Sintt}
# end performCalculations
def fileOutputWriteData(self, dataDict):
self.fileRef.write("Input\n")
self.fileRef.write("d\t%6.2f %s\n" % (dataDict["d"], self.labelUnitDist))
self.fileRef.write("Hi\t%6.2f %s\n" % (dataDict["Hi"], self.labelUnitDist))
self.fileRef.write("T\t%6.2f s\n" % dataDict["T"])
self.fileRef.write("chi\t%6.2f\n" % dataDict["chi"])
self.fileRef.write("cotphi\t%6.2f\n" % dataDict["cotphi"])
if self.errorMsg != None:
self.fileRef.write("\n%s\n" % self.errorMsg)
else:
self.fileRef.write('\n\t\t\t\t %s \t\t %s \n' % ('Miche-Rundgren','Sainflou'))
self.fileRef.write("Wave Position at Wall\t\tCrest\t\tTrough\t\tCrest\t\tTrough\t\tUnits\n")
self.fileRef.write("Hgt above bottom \t\t %-6.2f \t %6.2f \t %-6.2f \t %6.2f \t %s \n" %\
(dataDict["MR"][0].real, dataDict["MR"][3].real,\
dataDict["S"][0].real, dataDict["S"][3].real, self.labelUnitDist))
self.fileRef.write("Integrated force \t\t %-6.2f \t %6.2f \t %-6.2f \t %6.2f \t %s/%s \n" %\
(dataDict["MR"][1].real, dataDict["MR"][4].real,\
dataDict["S"][1].real, dataDict["S"][4].real,\
self.labelUnitWt, self.labelUnitDist))
self.fileRef.write("Integrated moment \t\t %-6.2f \t %6.2f \t %-6.2f \t %6.2f \t %s-%s/%s \n" %\
(dataDict["MR"][2].real, dataDict["MR"][5].real,\
dataDict["S"][2].real, dataDict["S"][5].real,\
self.labelUnitWt, self.labelUnitDist, self.labelUnitDist))
exportData = [dataDict["d"], dataDict["Hi"], dataDict["T"],\
dataDict["chi"], dataDict["cotphi"]]
if self.errorMsg != None:
exportData.append(self.errorMsg)
else:
exportData = exportData + [dataDict["MR"][0], dataDict["MR"][3],\
dataDict["S"][0], dataDict["S"][3],\
dataDict["MR"][1], dataDict["MR"][4],\
dataDict["S"][1], dataDict["S"][4],\
dataDict["MR"][2], dataDict["MR"][5],\
dataDict["S"][2], dataDict["S"][5]]
self.exporter.writeData(exportData)
# end fileOutputWriteData
def hasPlot(self):
return True
def performPlot(self):
plt.figure(1, figsize = self.plotConfigDict["figSize"],\
dpi = self.plotConfigDict["dpi"])
plt.subplot(2, 1, 1)
plt.plot(self.plotDict["MRintc"][1],\
self.plotDict["MRintc"][0], "g-",\
self.plotDict["MRintc"][2],\
self.plotDict["MRintc"][0], "c-.",\
self.plotDict["MRintc"][3],\
self.plotDict["MRintc"][0], "r:")
plt.axhline(y=0.0, color="r", LineStyle="--")
plt.legend(["Wave Pressure", "Hydrostatic Pressure",\
"Wave and Hydrostatic Pressure"])
plt.xlabel("Pressure [%s/%s^2]" % (self.labelUnitWt, self.labelUnitDist))
plt.ylabel("Elevation [%s]" % self.labelUnitDist)
plt.title("Miche-Rundgren Pressure Distribution - Crest at Wall")
ax = plt.subplot(2, 1, 2)
plt.plot(self.plotDict["MRintt"][1],\
self.plotDict["MRintt"][0], "g-",\
self.plotDict["MRintt"][2],\
self.plotDict["MRintt"][0], "c-.",\
self.plotDict["MRintt"][3],\
self.plotDict["MRintt"][0], "r:")
plt.axhline(y=0.0, color="r", LineStyle="--")
ax.add_patch(patches.Rectangle(\
(-50.0, math.floor(min([i.real for i in self.plotDict["Sintt"][0]]))),\
50.0, abs(math.floor(min([i.real for i in self.plotDict["Sintt"][0]]))) + 5,\
lineWidth=2, fill=None))
plt.ylim([math.floor(min([i.real for i in self.plotDict["Sintt"][0]])),\
abs(math.floor(min([i.real for i in self.plotDict["Sintt"][0]]))) - 5])
plt.legend(["Wave Pressure", "Hydrostatic Pressure",\
"Wave and Hydrostatic Pressure"])
plt.xlabel("Pressure [%s/%s^2]" % (self.labelUnitWt, self.labelUnitDist))
plt.ylabel("Elevation [%s]" % self.labelUnitDist)
plt.title("Miche-Rundgren Pressure Distribution - Trough at Wall")
plt.tight_layout(h_pad=1.0)
plt.figure(2, figsize = self.plotConfigDict["figSize"],\
dpi = self.plotConfigDict["dpi"])
plt.subplot(2, 1, 1)
plt.plot(self.plotDict["Sintc"][1],\
self.plotDict["Sintc"][0], "g-",\
self.plotDict["Sintc"][2],\
self.plotDict["Sintc"][0], "c-.",
self.plotDict["Sintc"][3],\
self.plotDict["Sintc"][0], "r:")
plt.axhline(y=0.0, color="r", LineStyle="--")
plt.legend(["Wave Pressure", "Hydrostatic Pressure",\
"Wave and Hydrostatic Pressure"])
plt.xlabel("Pressure [%s/%s^2]" % (self.labelUnitWt, self.labelUnitDist))
plt.ylabel("Elevation [%s]" % self.labelUnitDist)
plt.title("Sainflou Pressure Distribution - Crest at Wall")
ax = plt.subplot(2, 1, 2)
plt.plot(self.plotDict["Sintt"][1],\
self.plotDict["Sintt"][0], "g-",\
self.plotDict["Sintt"][2],\
self.plotDict["Sintt"][0], "c-.",\
self.plotDict["Sintt"][3],\
self.plotDict["Sintt"][0], "r:")
plt.axhline(y=0.0, color="r", LineStyle="--")
ax.add_patch(patches.Rectangle(\
(-50.0, math.floor(min([i.real for i in self.plotDict["Sintt"][0]]))),\
50.0, abs(math.floor(min([i.real for i in self.plotDict["Sintt"][0]]))) + 5,\
lineWidth=2, fill=None))
plt.ylim([math.floor(min([i.real for i in self.plotDict["Sintt"][0]])),\
abs(math.floor(min([i.real for i in self.plotDict["Sintt"][0]]))) - 5])
plt.legend(["Wave Pressure", "Hydrostatic Pressure",\
"Wave and Hydrostatic Pressue"])
plt.xlabel("Pressure [%s/%s^2]" % (self.labelUnitWt, self.labelUnitDist))
plt.ylabel("Elevation [%s]" % self.labelUnitDist)
plt.title("Sainflou Pressure Distribution - Trough at Wall")
plt.tight_layout(h_pad=1.0)
plt.show()
self.fileOutputPlotWriteData()
# end performPlot
def fileOutputPlotWriteData(self):
self.fileRef.write('Partial Listing of Plot Output File\n\n')
self.fileRef.write('Miche-Rundgren Pressure Distribution\n')
self.fileRef.write('Crest at Wall \n\n')
self.fileRef.write(' Elevation Wave Pressure Hydrostatic Pressure Wave & Hydrostatic Pressure\n')
self.fileRef.write(' (%s) (%s/%s^2) (%s/%s^2) (%s/%s^2)\n' %\
(self.labelUnitDist, self.labelUnitWt, self.labelUnitDist,\
self.labelUnitWt, self.labelUnitDist, self.labelUnitWt, self.labelUnitDist))
for i in range(len(self.plotDict["MRintc"][0])):
self.fileRef.write('%-6d %-6.2f %-6.2f %-6.2f %-6.2f\n' %\
((i + 1), self.plotDict["MRintc"][0][i].real,\
self.plotDict["MRintc"][1][i].real,\
self.plotDict["MRintc"][2][i].real,\
self.plotDict["MRintc"][3][i].real))
self.fileRef.write('\n\nMiche-Rundgren Pressure Distribution\n')
self.fileRef.write('Trough at Wall \n\n')
self.fileRef.write(' Elevation Wave Pressure Hydrostatic Pressure Wave & Hydrostatic Pressure\n')
self.fileRef.write(' (%s) (%s/%s^2) (%s/%s^2) (%s/%s^2)\n' %\
(self.labelUnitDist, self.labelUnitWt, self.labelUnitDist,\
self.labelUnitWt, self.labelUnitDist, self.labelUnitWt, self.labelUnitDist))
for i in range(len(self.plotDict["MRintt"][0])):
self.fileRef.write('%-6d %-6.2f %-6.2f %-6.2f %-6.2f\n' %\
((i + 1), self.plotDict["MRintt"][0][i].real,\
self.plotDict["MRintt"][1][i].real,\
self.plotDict["MRintt"][2][i].real,\
self.plotDict["MRintt"][3][i].real))
self.fileRef.write('\n\nSainflou Pressure Distribution\n')
self.fileRef.write('Crest at Wall \n\n')
self.fileRef.write(' Elevation Wave Pressure Hydrostatic Pressure Wave & Hydrostatic Pressure\n')
self.fileRef.write(' (%s) (%s/%s^2) (%s/%s^2) (%s/%s^2)\n' %\
(self.labelUnitDist, self.labelUnitWt, self.labelUnitDist,\
self.labelUnitWt, self.labelUnitDist, self.labelUnitWt, self.labelUnitDist))
for i in range(len(self.plotDict["Sintc"][0])):
self.fileRef.write('%-6d %-6.2f %-6.2f %-6.2f %-6.2f\n' %\
((i + 1), self.plotDict["Sintc"][0][i].real,\
self.plotDict["Sintc"][1][i].real,\
self.plotDict["Sintc"][2][i].real,\
self.plotDict["Sintc"][3][i].real))
self.fileRef.write('\n\nSainflou Pressure Distribution\n')
self.fileRef.write('Trough at Wall \n\n')
self.fileRef.write(' Elevation Wave Pressure Hydrostatic Pressure Wave & Hydrostatic Pressure\n')
self.fileRef.write(' (%s) (%s/%s^2) (%s/%s^2) (%s/%s^2)\n' %\
(self.labelUnitDist, self.labelUnitWt, self.labelUnitDist,\
self.labelUnitWt, self.labelUnitDist, self.labelUnitWt, self.labelUnitDist))
for i in range(len(self.plotDict["Sintt"][0])):
self.fileRef.write('%-6d %-6.2f %-6.2f %-6.2f %-6.2f\n' %\
((i + 1), self.plotDict["Sintt"][0][i].real,\
self.plotDict["Sintt"][1][i].real,\
self.plotDict["Sintt"][2][i].real,\
self.plotDict["Sintt"][3][i].real))
# end fileOutputPlotWriteData
driver = WaveForces() | gpl-3.0 |
numenta/htmresearch | projects/thalamus/run_experiment.py | 2 | 9869 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2019, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from __future__ import print_function
import os
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import skimage
from skimage import data
from skimage.util import img_as_float
from skimage.filters import gabor_kernel
from scipy import ndimage as ndi
from htmresearch.frameworks.thalamus.thalamus import Thalamus
from htmresearch.frameworks.thalamus.thalamus_utils import (
createLocationEncoder, encodeLocation, trainThalamusLocations,
getUnionLocations, defaultDtype)
# TODO: implement an overlap matrix to show that the location codes are overlapping
# TODO: implement feature filtering. Can have R, G, B ganglion inputs segregated
# into different relay cells. Only the R ones will burst, the rest are tonic.
# TODO: change color scheme so that grey is nothing and blue is tonic.
# TODO: implement a filling in mechanism.
# TODO: fan-out from ganglion cells to relay cells are not currently implemented.
# We should have ganglion cells also on the dendrites.
def loadImage(t, filename="cajal.jpg"):
"""
Load the given gray scale image. Threshold it to black and white and crop it
to be the dimensions of the FF input for the thalamus. Return a binary numpy
matrix where 1 corresponds to black, and 0 corresponds to white.
"""
image = Image.open("cajal.jpg").convert("1")
image.load()
box = (0, 0, t.inputWidth, t.inputHeight)
image = image.crop(box)
# Here a will be a binary numpy array where True is white. Convert to floating
# point numpy array where white is 0.0
a = np.asarray(image)
im = np.ones((t.inputWidth, t.inputHeight))
im[a] = 0
return im
def plotActivity(activity, filename,
title="", vmin=0.0, vmax=2.0,
cmap="Greys"):
plt.imshow(activity, vmin=vmin, vmax=vmax, origin="upper", cmap=cmap)
plt.title(title)
plt.colorbar()
plt.savefig(os.path.join("images", filename))
plt.close()
def inferThalamus(t, l6Input, ffInput):
"""
Compute the effect of this feed forward input given the specific L6 input.
:param t: instance of Thalamus
:param l6Input:
:param ffInput: a numpy array of 0's and 1's
:return:
"""
print("\n-----------")
t.reset()
t.deInactivateCells(l6Input)
ffOutput = t.computeFeedForwardActivity(ffInput)
# print("L6 input:", l6Input)
# print("Active TRN cells: ", t.activeTRNCellIndices)
# print("Burst ready relay cells: ", t.burstReadyCellIndices)
return ffOutput
def locationsTest():
"""Test with square and blocky A"""
t = Thalamus()
encoder = createLocationEncoder(t)
trainThalamusLocations(t, encoder)
output = np.zeros(encoder.getWidth(), dtype=defaultDtype)
ff = np.zeros((32, 32))
for x in range(10,20):
ff[:] = 0
ff[10:20, 10:20] = 1
plotActivity(ff, "square_ff_input.jpg", title="Feed forward input")
ffOutput = inferThalamus(t, encodeLocation(encoder, x, x, output), ff)
plotActivity(ffOutput, "square_relay_output_" + str(x) + ".jpg",
title="Relay cell activity",
cmap="coolwarm")
# Show attention with an A
ff = np.zeros((32, 32))
for x in range(10,20):
ff[:] = 0
ff[10, 10:20] = 1
ff[15, 10:20] = 1
ff[10:20, 10] = 1
ff[10:20, 20] = 1
plotActivity(ff, "A_ff_input.jpg", title="Feed forward input")
ffOutput = inferThalamus(t, encodeLocation(encoder, x, x, output), ff)
plotActivity(t.burstReadyCells, "relay_burstReady_" + str(x) + ".jpg",
title="Burst-ready cells (x,y)=({},{})".format(x, x),
)
plotActivity(ffOutput, "A_relay_output_" + str(x) + ".jpg",
title="Relay cell activity",
cmap="coolwarm")
def largeThalamus(w=250):
print("Initializing thalamus")
t = Thalamus(
trnCellShape=(w, w),
relayCellShape=(w, w),
inputShape=(w, w),
l6CellCount=128*128,
trnThreshold=15,
)
encoder = createLocationEncoder(t, w=17)
trainThalamusLocations(t, encoder)
print("Loading image")
ff = loadImage(t)
plotActivity(ff, "cajal_input.jpg", title="Feed forward input")
l6Activity = np.zeros(encoder.getWidth(), dtype=defaultDtype)
for x in range(w/2-60,w/2+60,40):
print("Testing with x=", x)
ff = loadImage(t)
l6Code = list(getUnionLocations(encoder, x, x, 20))
print("Num active cells in L6 union:", len(l6Code),"out of", t.l6CellCount)
ffOutput = inferThalamus(t, l6Code, ff)
plotActivity(t.burstReadyCells, "relay_burstReady_" + str(x) + ".jpg",
title="Burst-ready cells (x,y)=({},{})".format(x, x),
)
plotActivity(ffOutput, "cajal_relay_output_" + str(x) + ".jpg",
title="Relay cell activity",
cmap="coolwarm")
# The eye
x=150
y=110
print("Testing with x,y=", x, y)
ff = loadImage(t)
l6Code = list(getUnionLocations(encoder, x, y, 20))
print("Num active cells in L6 union:", len(l6Code),"out of", t.l6CellCount)
ffOutput = inferThalamus(t, l6Code, ff)
plotActivity(t.burstReadyCells, "relay_burstReady_eye.jpg",
title="Burst-ready cells (x,y)=({},{})".format(x, y),
)
plotActivity(ffOutput, "cajal_relay_output_eye.jpg",
title="Filtered activity",
cmap="Greys")
# The ear
x=25
y=150
print("Testing with x,y=", x, y)
ff = loadImage(t)
l6Code = list(getUnionLocations(encoder, x, y, 20))
print("Num active cells in L6 union:", len(l6Code),"out of", t.l6CellCount)
ffOutput = inferThalamus(t, l6Code, ff)
plotActivity(t.burstReadyCells, "relay_burstReady_ear.jpg",
title="Burst-ready cells (x,y)=({},{})".format(x, y),
)
plotActivity(ffOutput, "cajal_relay_output_ear.jpg",
title="Filtered activity",
cmap="Greys")
return t
def power(image, kernel):
# Normalize images for better comparison.
image = (image - image.mean()) / image.std()
return np.sqrt(ndi.convolve(image, np.real(kernel), mode='wrap') ** 2 +
ndi.convolve(image, np.imag(kernel), mode='wrap') ** 2)
def filtered(w=250):
"""
In this example we filter the image into several channels using gabor filters. L6 activity is used to select
one of those channels. Only activity selected by those channels burst.
"""
# prepare filter bank kernels
kernels = []
for theta in range(4):
theta = theta / 4. * np.pi
for sigma in (1, 3):
for frequency in (0.05, 0.25):
kernel = np.real(gabor_kernel(frequency, theta=theta,
sigma_x=sigma, sigma_y=sigma))
kernels.append(kernel)
print("Initializing thalamus")
t = Thalamus(
trnCellShape=(w, w),
relayCellShape=(w, w),
inputShape=(w, w),
l6CellCount=128*128,
trnThreshold=15,
)
ff = loadImage(t)
for i,k in enumerate(kernels):
plotActivity(k, "kernel"+str(i)+".jpg", "Filter kernel", vmax=k.max(),
vmin=k.min())
filtered0 = power(ff, k)
ft = np.zeros((w, w))
ft[filtered0 > filtered0.mean() + filtered0.std()] = 1.0
plotActivity(ft, "filtered"+str(i)+".jpg", "Filtered image", vmax=1.0)
encoder = createLocationEncoder(t, w=17)
trainThalamusLocations(t, encoder)
filtered0 = power(ff, kernels[3])
ft = np.zeros((w, w))
ft[filtered0 > filtered0.mean() + filtered0.std()] = 1.0
# Get a salt and pepper burst ready image
print("Getting unions")
l6Code = list(getUnionLocations(encoder, 125, 125, 150, step=10))
print("Num active cells in L6 union:", len(l6Code),"out of", t.l6CellCount)
ffOutput = inferThalamus(t, l6Code, ft)
plotActivity(t.burstReadyCells, "relay_burstReady_filtered.jpg",
title="Burst-ready cells",
)
plotActivity(ffOutput, "cajal_relay_output_filtered.jpg",
title="Filtered activity",
cmap="Greys")
# Get a more detailed filtered image
print("Getting unions")
l6Code = list(getUnionLocations(encoder, 125, 125, 150, step=3))
print("Num active cells in L6 union:", len(l6Code),"out of", t.l6CellCount)
ffOutput_all = inferThalamus(t, l6Code, ff)
ffOutput_filtered = inferThalamus(t, l6Code, ft)
ffOutput3 = ffOutput_all*0.4 + ffOutput_filtered
plotActivity(t.burstReadyCells, "relay_burstReady_all.jpg",
title="Burst-ready cells",
)
plotActivity(ffOutput3, "cajal_relay_output_filtered2.jpg",
title="Filtered activity",
cmap="Greys")
# Simple tests for debugging
def trainThalamus(t):
# Learn
t.learnL6Pattern([0, 1, 2, 3, 4, 5], [(0, 0), (2, 3)])
t.learnL6Pattern([6, 7, 8, 9, 10], [(1, 1), (3, 4)])
def basicTest():
t = Thalamus()
trainThalamus(t)
ff = np.zeros((32,32))
ff.reshape(-1)[[8, 9, 98, 99]] = 1.0
inferThalamus(t, [0, 1, 2, 3, 4, 5], ff)
if __name__ == '__main__':
# largeThalamus(250)
# basicTest()
filtered(250)
| agpl-3.0 |
huangziwei/MorphoPy | tests/test_utils.py | 1 | 3616 | import numpy as np
import sys
sys.path.append('..')
#### TEST GET_ANGLE #####
from morphopy._utils.summarize import get_angle
def test_get_angle_with_orthogonal_vectors():
v0 = np.array([0, 0, 1])
v1 = np.array([0, 1, 0])
r, d = get_angle(v0, v1)
assert(r == 90*np.pi/180), "returned angle should be pi/2"
assert (d == 90), "returned angle should be 90 degree"
def test_get_angle_with_opposite_vectors():
v0 = np.array([0, 0, 1])
v1 = np.array([0, 0, -1])
r, d = get_angle(v0, v1)
assert (r == np.pi), "returned angle should be pi"
assert (d == 180), "returned angle should be 180 degree"
def test_get_angle_with_same_vector():
v0 = np.array([0, 0, 1])
r, d = get_angle(v0, v0)
assert (r == 0), "returned angle should be 0"
assert (d == 0), "returned angle should be 0"
def test_get_angle_with_unnormalized_vector():
v0 = np.array([0, 0, 1])
v1 = np.array([0, 2, 0])
r, d = get_angle(v0, v1)
assert (r == 90 * np.pi / 180), "returned angle should be pi/2"
assert (d == 90), "returned angle should be 90 degree"
def test_get_angle_btw_zero_and_v1():
v0 = np.array([0, 0, 0])
v1 = np.array([0, 1, 0])
r, d = get_angle(v0, v1)
assert (r == 0), "returned angle should be 0"
assert (d == 0), "returned angle should be 0"
def test_get_angle_returns_float():
v0 = np.array([0, 0, 1])
v1 = np.array([0, 1, 1])
r, d = get_angle(v0, v1)
assert (isinstance(r, np.float)), "get_angle() should return float"
assert (isinstance(d, np.float)), "get_angle() should return float"
# ### TEST READING METHODS ####
# import networkx as nx
# from morphopy._utils.utils import read_swc
# def test_read_swc_returned_fileformat():
# import pandas as pd
# filepath = 'data/Image001-005-01.CNG.swc'
# G, swc = read_swc(filepath)
# assert(isinstance(G, nx.DiGraph)), "read_swc() should return a graph as networkx.DiGraph"
# assert(isinstance(swc, pd.DataFrame)), "read_swc() should return a swc as pandas.DataFrame"
# def test_read_swc_all_variables_are_in():
# filepath = 'data/Image001-005-01.CNG.swc'
# G, swc = read_swc(filepath)
# assert 'n' in swc.keys(), "column 'n' should be in pandas.DataFrame"
# assert 'x' in swc.keys(), "column 'x' should be in pandas.DataFrame"
# assert 'y' in swc.keys(), "column 'y' should be in pandas.DataFrame"
# assert 'z' in swc.keys(), "column 'z' should be in pandas.DataFrame"
# assert 'type' in swc.keys(), "column 'type' should be in pandas.DataFrame"
# assert 'parent' in swc.keys(), "column 'parent' should be in pandas.DataFrame"
# ### TEST FUNCTIONAL METHODS ###
# from morphopy._utils.utils import get_df_paths
# def test_get_df_paths_creates_dataFrame():
# import pandas as pd
# filepath = 'data/Image001-005-01.CNG.swc'
# G, swc = read_swc(filepath)
# paths = get_df_paths(G)
# assert (isinstance(paths, pd.DataFrame)), "get_df_paths() should return a pandas.DataFrame"
from morphopy._utils.utils import unique_row
def test_unique_row_pairs_of_same_value():
a = np.array([[9, 9], [8, 8], [1, 1], [9, 9]])
a_ = unique_row(a)
assert (a_ == np.array([[1, 1], [8, 8], [9, 9]])).all()
def test_unique_row_pairs_of_different_values():
a = np.array([[1, 2], [2, 3], [2, 3], [9, 8]])
a_ = unique_row(a)
assert (a_ == np.array([[1, 2], [2, 3], [9, 8]])).all()
def test_unique_row_higher_number_first():
a = np.array([[2, 1], [3, 6], [7, 4], [3, 6]])
a_ = unique_row(a)
assert (a_ == np.array([[2, 1], [3, 6], [7, 4]])).all() | mit |
manipopopo/tensorflow | tensorflow/contrib/factorization/python/ops/gmm_test.py | 41 | 8716 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ops.gmm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.factorization.python.ops import gmm as gmm_lib
from tensorflow.contrib.learn.python.learn.estimators import kmeans
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed as random_seed_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.training import queue_runner
class GMMTest(test.TestCase):
def input_fn(self, batch_size=None, points=None):
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return x, None
indices = random_ops.random_uniform(constant_op.constant([batch_size]),
minval=0, maxval=num_points-1,
dtype=dtypes.int32,
seed=10)
return array_ops.gather(x, indices), None
return _fn
def setUp(self):
np.random.seed(3)
random_seed_lib.set_random_seed(2)
self.num_centers = 2
self.num_dims = 2
self.num_points = 4000
self.batch_size = self.num_points
self.true_centers = self.make_random_centers(self.num_centers,
self.num_dims)
self.points, self.assignments = self.make_random_points(
self.true_centers, self.num_points)
# Use initial means from kmeans (just like scikit-learn does).
clusterer = kmeans.KMeansClustering(num_clusters=self.num_centers)
clusterer.fit(input_fn=lambda: (constant_op.constant(self.points), None),
steps=30)
self.initial_means = clusterer.clusters()
@staticmethod
def make_random_centers(num_centers, num_dims):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * 500)
@staticmethod
def make_random_points(centers, num_points):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * 20)
points = centers[assignments] + offsets
return (points, assignments)
def test_weights(self):
"""Tests the shape of the weights."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
weights = gmm.weights()
self.assertAllEqual(list(weights.shape), [self.num_centers])
def test_clusters(self):
"""Tests the shape of the clusters."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
clusters = gmm.clusters()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters='random',
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=1)
score1 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
gmm.fit(input_fn=self.input_fn(), steps=10)
score2 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
self.assertLess(score1, score2)
def test_infer(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=60)
clusters = gmm.clusters()
# Make a small test set
num_points = 40
points, true_assignments = self.make_random_points(clusters, num_points)
assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=num_points)):
assignments.append(item)
assignments = np.ravel(assignments)
self.assertAllEqual(true_assignments, assignments)
def _compare_with_sklearn(self, cov_type):
# sklearn version.
iterations = 40
np.random.seed(5)
sklearn_assignments = np.asarray([0, 0, 1, 0, 0, 0, 1, 0, 0, 1])
sklearn_means = np.asarray([[144.83417719, 254.20130341],
[274.38754816, 353.16074346]])
sklearn_covs = np.asarray([[[395.0081194, -4.50389512],
[-4.50389512, 408.27543989]],
[[385.17484203, -31.27834935],
[-31.27834935, 391.74249925]]])
# skflow version.
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
covariance_type=cov_type,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=iterations)
points = self.points[:10, :]
skflow_assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=10)):
skflow_assignments.append(item)
self.assertAllClose(sklearn_assignments,
np.ravel(skflow_assignments).astype(int))
self.assertAllClose(sklearn_means, gmm.clusters())
if cov_type == 'full':
self.assertAllClose(sklearn_covs, gmm.covariances(), rtol=0.01)
else:
for d in [0, 1]:
self.assertAllClose(
np.diag(sklearn_covs[d]), gmm.covariances()[d, :], rtol=0.01)
def test_compare_full(self):
self._compare_with_sklearn('full')
def test_compare_diag(self):
self._compare_with_sklearn('diag')
def test_random_input_large(self):
# sklearn version.
iterations = 5 # that should be enough to know whether this diverges
np.random.seed(5)
num_classes = 20
x = np.array([[np.random.random() for _ in range(100)]
for _ in range(num_classes)], dtype=np.float32)
# skflow version.
gmm = gmm_lib.GMM(num_classes,
covariance_type='full',
config=run_config.RunConfig(tf_random_seed=2))
def get_input_fn(x):
def input_fn():
return constant_op.constant(x.astype(np.float32)), None
return input_fn
gmm.fit(input_fn=get_input_fn(x), steps=iterations)
self.assertFalse(np.isnan(gmm.clusters()).any())
class GMMTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(capacity=10,
dtypes=dtypes.float32,
shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue,
[enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
gmm = gmm_lib.GMM(2, covariance_type='diag')
gmm.fit(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
mattilyra/scikit-learn | sklearn/externals/joblib/testing.py | 45 | 2720 | """
Helper for testing.
"""
import sys
import warnings
import os.path
import re
import subprocess
import threading
from sklearn.externals.joblib._compat import PY3_OR_LATER
def warnings_to_stdout():
""" Redirect all warnings to stdout.
"""
showwarning_orig = warnings.showwarning
def showwarning(msg, cat, fname, lno, file=None, line=0):
showwarning_orig(msg, cat, os.path.basename(fname), line, sys.stdout)
warnings.showwarning = showwarning
#warnings.simplefilter('always')
try:
from nose.tools import assert_raises_regex
except ImportError:
# For Python 2.7
try:
from nose.tools import assert_raises_regexp as assert_raises_regex
except ImportError:
# for Python 2.6
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except Exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("Should have raised %r" %
expected_exception(expected_regexp))
def check_subprocess_call(cmd, timeout=1, stdout_regex=None):
"""Runs a command in a subprocess with timeout in seconds.
Also checks returncode is zero and stdout if stdout_regex is set.
"""
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def kill_process():
proc.kill()
timer = threading.Timer(timeout, kill_process)
try:
timer.start()
stdout, stderr = proc.communicate()
if PY3_OR_LATER:
stdout, stderr = stdout.decode(), stderr.decode()
if proc.returncode != 0:
message = (
'Non-zero return code: {0}.\nStdout:\n{1}\n'
'Stderr:\n{2}').format(
proc.returncode, stdout, stderr)
raise ValueError(message)
if (stdout_regex is not None and
not re.search(stdout_regex, stdout)):
raise ValueError(
"Unexpected output: '{0!r}' does not match:\n{1!r}".format(
stdout_regex, stdout))
finally:
timer.cancel()
| bsd-3-clause |
mrgloom/python-topic-model | ptm/whdsp.py | 3 | 20653 | import numpy as np
import time
import utils
from scipy.special import gammaln, psi
#epsilon
eps = 1e-100
class hdsp:
"""
hierarchical dirichlet scaling process (hdsp)
"""
def __init__(self, num_topics, num_words, num_labels, dir_prior=0.5):
self.K = num_topics # number of topics
self.N = num_words # vocabulary size
self.J = num_labels # num labels
self.V = np.zeros(self.K)
#for even p
self.V[0] = 1./self.K
for k in xrange(1,self.K-1):
self.V[k] = (1./self.K)/np.prod(1.-self.V[:k])
self.V[self.K-1] = 1.
self.p = self.getP(self.V)
self.alpha = 5.
self.alpha_1 = 1 #prior for alpha
self.alpha_2 = 1e-3 #prior for alpha
self.beta = 5.
self.beta_1 = 1
self.beta_2 = 1e-3
self.dir_prior = dir_prior
self.gamma = np.random.gamma(shape=1, scale=1, size=[self.N, self.K]) + self.dir_prior
self.c_a_max_step = 10
self.is_plot = False
self.is_verbose = True
self.is_compute_lb = True
self.ll_diff_frac = 1e-3
def run_variational_EM(self, max_iter, corpus, directory=None, logger=None):
if self.is_plot:
import matplotlib.pyplot as plt
plt.ion()
lbs = list()
curr = time.clock()
for iter in xrange(max_iter):
lb = 0
lb += self.update_C(corpus)
lb += self.update_Z(corpus)
lb += self.newton_W(corpus)
lb += self.update_V(corpus)
self.update_alpha()
self.update_beta(corpus)
if corpus.heldout_ids != None:
perp = self.heldout_perplexity(corpus)
if self.is_verbose:
print '%d iter, %d topics, %.2f time, %.2f lower_bound %.3f perplexity' % (iter, self.K, time.clock()-curr, lb, perp)
if logger:
logger.write('%d,%d,%f,%f,%f,%f\n'%(iter, self.K, self.dir_prior, time.clock()-curr, lb, perp))
elif corpus.heldout_ids == None and self.is_verbose:
print '%d iter, %d topics, %.2f time, %.2f lower_bound' % (iter, self.K, time.clock()-curr, lb)
if iter > 0:
lbs.append(lb)
if self.is_plot:
plt.close
plt.plot(lbs)
plt.draw()
if iter > 30:
if (abs(lbs[-1] - lbs[-2])/abs(lbs[-2])) < self.ll_diff_frac :
break
if directory:
self.save_result(directory, corpus)
return lbs
def getStickLeft(self, V):
stl = np.ones(self.K)
stl[1:] = np.cumprod(1.-V)[:-1]
return stl
def getP(self, V):
one_v = np.ones(self.K)
one_v[1:] = (1.-V)[:-1]
p = V * np.cumprod(one_v)
return p
#update per word v.d. phi (c denoted by z in the icml paper)
def update_C(self, corpus):
corpus.phi_doc = np.zeros([corpus.M, self.K])
psiGamma = psi(self.gamma)
gammaSum = np.sum(self.gamma,0)
psiGammaSum = psi(np.sum(self.gamma, 0))
lnZ = psi(corpus.A) - np.log(corpus.B)
Z = corpus.A/corpus.B
#entropy of q(eta)
lb = 0
if(self.is_compute_lb):
lb += -np.sum(gammaln(gammaSum)) + np.sum(gammaln(self.gamma)) - np.sum((self.gamma - 1)*(psiGamma - psiGammaSum))
#expectation of eta over variational q(eta)
lb += self.K * gammaln(self.dir_prior*self.N) - self.K * self.N * gammaln(self.dir_prior) - np.sum((self.dir_prior-1)*(psiGamma-psiGammaSum))
self.gamma = np.zeros([self.N, self.K]) + self.dir_prior #multinomial topic distribution prior
for m in xrange(corpus.M):
ids = corpus.word_ids[m]
cnt = corpus.word_cnt[m]
# C = len(ids) x K
E_ln_eta = psiGamma[ids,:] - psiGammaSum
C = np.exp(E_ln_eta + lnZ[m,:])
C = C/np.sum(C,1)[:,np.newaxis]
self.gamma[ids,:] += cnt[:,np.newaxis] * C
corpus.phi_doc[m,:] = np.sum(cnt[:,np.newaxis] * C,0)
#expectation of p(X) over variational q
lb += np.sum(cnt[:,np.newaxis] * C * E_ln_eta)
#entropy of q(C)
lb -= np.sum(cnt[:,np.newaxis] * C * np.log(C+eps))
#expectation of p(C) over variational q
lb += np.sum(cnt[:,np.newaxis] * C * (lnZ[m,:] - np.log(np.sum(Z[m,:]))) )
if self.is_verbose:
print 'p(x,c)-q(c) %f' %lb
return lb
#update variational gamma prior a and b for Z_mk (z denoted by \pi in the icml paper)
def update_Z(self, corpus):
lb = 0
bp = self.beta*self.p
corpus.A = bp + corpus.phi_doc
# taylor approximation on E[\sum lnZ]
xi = np.sum(corpus.A/corpus.B, 1)
E_exp_wr = np.exp(np.dot(corpus.R, corpus.w))
E_wr = np.dot(corpus.R,corpus.w) # M x K
corpus.B = E_exp_wr + (corpus.Nm / xi)[:,np.newaxis]
# expectation of p(Z)
lb += np.sum(-bp * E_wr + (bp-1)*(psi(corpus.A)-np.log(corpus.B)) - E_exp_wr*(corpus.A/corpus.B) - gammaln(bp))
# entropy of q(Z)
lb -= np.sum(corpus.A*np.log(corpus.B) + (corpus.A-1)*(psi(corpus.A) - np.log(corpus.B)) - corpus.A - gammaln(corpus.A))
if self.is_verbose:
print 'p(z)-q(z) %f' %lb
return lb
def newton_W(self, corpus):
lb = 0
bp = self.beta * self.p
Z = corpus.A/corpus.B
lnZ = psi(corpus.A)-np.log(corpus.B)
for ki in np.random.permutation(corpus.K):
E_exp_wr = np.exp(np.dot(corpus.R, corpus.w)) # M x K
E_wr = np.dot(corpus.R,corpus.w) # M x K
det_w = np.zeros([self.J])
H = np.zeros([self.J,self.J])
new_second = corpus.R*(E_exp_wr[:,ki][:,np.newaxis])*(Z[:,ki][:,np.newaxis]) # M x J
det_w = np.sum(bp[ki]*corpus.R - new_second, 0) - corpus.w[:,ki] # with normal prior mean 0 and variance 1
H = - np.dot(new_second.T, corpus.R) - np.identity(self.J) # - identity for normal
# for ji in xrange(corpus.J):
# H[:,ji] = np.sum(- corpus.R * new_second[:,ji][:,np.newaxis], 0)
# second = corpus.R[:,ji]*E_exp_wr[:,ki]*Z[:,ki] # M-dim
# det_w[ji] = np.sum(bp[ki]*corpus.R[:,ji] - second) # - 2.0 * corpus.w[ji,ki] # normal prior
# for ji2 in xrange(corpus.J):
# H[ji2,ji] = np.sum(- corpus.R[:,ji2] * corpus.R[:,ji] * E_exp_wr[:,ki]*Z[:,ki])
invH = np.linalg.inv(H)
corpus.w[:,ki] = corpus.w[:,ki] - np.dot(invH, det_w)
E_exp_wr = np.exp(np.dot(corpus.R, corpus.w)) # M x K
E_wr = np.dot(corpus.R,corpus.w) # M x K
lb = np.sum(-bp * E_wr + (bp-1)*(lnZ) - E_exp_wr*(Z))
if self.is_verbose:
print 'p(w)-q(w) %f, max %f, min %f' % (lb, np.max(corpus.w), np.min(corpus.w))
return lb
#coordinate ascent for w_jk
def update_W(self, corpus):
lb = 0
bp = self.beta * self.p
Z = corpus.A/corpus.B
lnZ = psi(corpus.A)-np.log(corpus.B)
for iter in xrange(10):
E_exp_wr = np.exp(np.dot(corpus.R, corpus.w)) # M x K
E_wr = np.dot(corpus.R,corpus.w) # M x K
old_lb = np.sum(-bp * E_wr - E_exp_wr*(Z))
del_w = np.zeros([corpus.J, self.K])
for ji in xrange(corpus.J):
for ki in xrange(corpus.K):
del_w[ji,ki] = np.sum(bp[ki]*corpus.R[:,ji] - corpus.R[:,ji]*E_exp_wr[:,ki]*Z[:,ki])
stepsize = 1.0/np.max(np.abs(del_w))
steps = np.logspace(-10,0)
ll = list()
for si in xrange(len(steps)):
step = steps[si]
new_w = corpus.w + step * stepsize * del_w
E_exp_wr = np.exp(np.dot(corpus.R, new_w))
E_wr = np.dot(corpus.R,new_w) # M x K
new_lb = np.sum(-bp * E_wr - E_exp_wr*(Z))
if np.isnan(new_lb):
break
ll.append(new_lb)
ll = np.array(ll)
idx = ll.argsort()[::-1][0]
corpus.w = corpus.w + steps[idx]*stepsize*del_w
print '\t%d w old new diff %f \t %f \t %f \t%f \t%f \t%f' %(iter, (ll[idx] - old_lb), stepsize, np.max(np.abs(del_w)), np.max(np.abs(del_w))*stepsize, np.max(corpus.w), np.min(corpus.w) )
if np.abs(ll[idx] - old_lb) < 0.1:
break
lb = np.sum(-bp * E_wr + (bp-1)*(lnZ) - E_exp_wr*(Z))
if self.is_verbose:
print 'p(w)-q(w) %f' % lb
return lb
#coordinate ascent for V
def update_V(self, corpus):
lb = 0
sumLnZ = np.sum(psi(corpus.A) - np.log(corpus.B), 0) # K dim
tmp = np.dot(corpus.R, corpus.w) # M x K
sum_r_w = np.sum(tmp, 0)
assert len(sum_r_w) == self.K
for i in xrange(self.c_a_max_step):
one_V = 1-self.V
stickLeft = self.getStickLeft(self.V) # prod(1-V_(dim-1))
p = self.V * stickLeft
psiV = psi(self.beta * p)
vVec = - self.beta*stickLeft*sum_r_w + self.beta*stickLeft*sumLnZ - corpus.M*self.beta*stickLeft*psiV;
for k in xrange(self.K):
tmp1 = self.beta*sum(sum_r_w[k+1:]*p[k+1:]/one_V[k]);
tmp2 = self.beta*sum(sumLnZ[k+1:]*p[k+1:]/one_V[k]);
tmp3 = corpus.M*self.beta*sum(psiV[k+1:]*p[k+1:]/one_V[k]);
vVec[k] = vVec[k] + tmp1 - tmp2;
vVec[k] = vVec[k] + tmp3;
vVec[k] = vVec[k]
vVec[:self.K-2] -= (self.alpha-1)/one_V[:self.K-2];
vVec[self.K-1] = 0;
step_stick = self.getstepSTICK(self.V,vVec,sum_r_w,sumLnZ,self.beta,self.alpha,corpus.M);
self.V = self.V + step_stick*vVec;
self.p = self.getP(self.V)
lb += self.K*gammaln(self.alpha+1) - self.K*gammaln(self.alpha) + np.sum((self.alpha-1)*np.log(1-self.V[:self.K-1]))
if self.is_verbose:
print 'p(V)-q(V) %f' % lb
return lb
def update_alpha(self):
old = (self.K-1) * gammaln(self.alpha + 1) - (self.K-1) * gammaln(self.alpha) + np.sum(self.alpha*(1-self.V[:-1])) + self.alpha_1*np.log(self.alpha_2) + (self.alpha_1 - 1)*np.log(self.alpha) - self.alpha_2 * self.alpha - gammaln(self.alpha_1)
self.alpha = (self.K + self.alpha_1 -2)/(self.alpha_2 - np.sum(np.log(1-self.V[:-1]+eps)))
new = (self.K-1) * gammaln(self.alpha + 1) - (self.K-1) * gammaln(self.alpha) + np.sum(self.alpha*(1-self.V[:-1])) + self.alpha_1*np.log(self.alpha_2) + (self.alpha_1 - 1)*np.log(self.alpha) - self.alpha_2 * self.alpha - gammaln(self.alpha_1)
if self.is_verbose:
print 'new alpha = %.2f, %.2f' % (self.alpha, (new-old))
def update_beta(self, corpus):
E_wr = np.dot(corpus.R, corpus.w) #M x K
lnZ = psi(corpus.A) - np.log(corpus.B)
first = self.p * E_wr
# since beta does not change a lot, this way is more efficient
candidate = np.linspace(-1, 1, 31)
f = np.zeros(len(candidate))
for i in xrange(len(candidate)):
step = candidate[i]
new_beta = self.beta + self.beta*step
if new_beta < 0:
f[i] = -np.inf
else:
bp = new_beta * self.p
f[i] = np.sum(new_beta * first) + np.sum((bp - 1) * lnZ) - np.sum(corpus.M * gammaln(bp))
best_idx = f.argsort()[-1]
maxstep = candidate[best_idx]
self.beta += self.beta*maxstep
if self.is_verbose:
print 'new beta = %.2f, %.2f' % (self.beta, candidate[best_idx])
# get stick length to update the gradient
def getstepSTICK(self,curr,grad,sumMu,sumlnZ,beta,alpha,M):
_curr = curr[:len(curr)-1]
_grad = grad[:len(curr)-1]
_curr = _curr[_grad != 0]
_grad = _grad[_grad != 0]
step_zero = -_curr/_grad
step_one = (1-_curr)/_grad
min_zero = 1
min_one = 1
if(np.sum(step_zero>=0) > 0):
min_zero = min(step_zero[step_zero>=0])
if(np.sum(step_one>=0) > 0):
min_one = min(step_one[step_one>=0])
max_step = min([min_zero,min_one])
if max_step > 0:
step_check_vec = np.array([.01, .125, .25, .375, .5, .625, .75, .875 ])*max_step;
else:
step_check_vec = list();
f = np.zeros(len(step_check_vec));
for ite in xrange(len(step_check_vec)):
step_check = step_check_vec[ite];
vec_check = curr + step_check*grad;
p = self.getP(vec_check)
f[ite] = -np.sum(beta*p*sumMu) - M*np.sum(gammaln(beta*p)) + np.sum((beta*p-1)*sumlnZ) + (alpha-1.)*np.sum(np.log(1.-vec_check[:-1]+eps))
if len(f) != 0:
b = f.argsort()[-1]
step = step_check_vec[b]
else:
step = 0;
if b == 0:
rho = .5;
bool = 1;
fold = f[b];
while bool:
step = rho*step;
vec_check = curr + step*grad;
tmp = np.zeros(vec_check.size)
tmp[1:] = vec_check[:-1]
p = vec_check * np.cumprod(1-tmp)
fnew = -np.sum(beta*p*sumMu) - M*np.sum(gammaln(beta*p)) + np.sum((beta*p-1)*sumlnZ) + (alpha-1.)*np.sum(np.log(1.-vec_check[:-1]+eps))
if fnew > fold:
fold = fnew
else:
bool = 0
step = step/rho
return step
def write_top_words(self, corpus, filepath):
with open(filepath + '/final_top_words.csv', 'w') as f:
posterior_topic_count = np.sum(self.gamma, 0)
topic_rank = posterior_topic_count.argsort()[::-1]
for ti in topic_rank:
top_words = corpus.vocab[self.gamma[:,ti].argsort()[::-1][:20]]
f.write( '%d,%f' % (ti, self.p[ti]) )
for word in top_words:
f.write(',' + word)
f.write('\n')
def write_label_top_words(self, corpus, filepath):
bp = self.beta * self.p
with open(filepath + '/final_label_top_words.csv', 'w') as f, open(filepath + '/final_label_top_words_all.csv', 'w') as f2:
mean = corpus.w
for li in xrange(corpus.J):
for ki in xrange(corpus.K):
top_words = corpus.vocab[self.gamma[:,ki].argsort()[::-1][:20]]
f2.write('%s,%d,%f' % (corpus.label_names[li].replace(',',' '), ki, mean[li,ki]*bp[ki]))
for word in top_words:
f2.write(',' + word)
f2.write('\n')
min_topic = mean[li,:].argsort()[0]
max_topic = mean[li,:].argsort()[-1]
top_words = corpus.vocab[self.gamma[:,min_topic].argsort()[::-1][:20]]
f.write('min,%s,%f'%(corpus.label_names[li].replace(',',' '), mean[li,min_topic] ))
for word in top_words:
f.write(',' + word)
f.write('\n')
f.write('max,%s,%f'%(corpus.label_names[li].replace(',',' '), mean[li,max_topic] ))
top_words = corpus.vocab[self.gamma[:,max_topic].argsort()[::-1][:20]]
for word in top_words:
f.write(',' + word)
f.write('\n')
def save_result(self, folder, corpus):
import os, cPickle
if not os.path.exists(folder):
os.mkdir(folder)
np.savetxt(folder+'/final_w.csv', corpus.w, delimiter=',')
np.savetxt(folder+'/final_V.csv', self.V, delimiter=',')
np.savetxt(folder+'/gamma.csv', self.gamma, delimiter=',')
np.savetxt(folder+'/A.csv',corpus.A, delimiter=',')
np.savetxt(folder+'/B.csv',corpus.B, delimiter=',')
self.write_top_words(corpus, folder)
self.write_label_top_words(corpus, folder)
#cPickle.dump([self,corpus], open(folder+'/model_corpus.pkl','w'))
def heldout_perplexity(self, corpus):
num_hdoc = len(corpus.heldout_ids)
topic = self.gamma/np.sum(self.gamma, 0)
mean = corpus.w
bp = self.beta * self.p
perp = 0
cnt_sum = 0
wr = np.dot(corpus.heldout_responses, corpus.w) # m x k
for di in xrange(num_hdoc):
doc = corpus.heldout_ids[di]
cnt = corpus.heldout_cnt[di]
Z = np.zeros(self.K)
Z = bp / np.exp(wr[di,:])
Z /= np.sum(Z)
if np.sum(cnt) != 0:
perp -= np.sum(np.log(np.dot(topic[doc,:], Z) + eps) * cnt)
cnt_sum += np.sum(cnt)
return np.exp(perp/cnt_sum)
class hdsp_corpus:
def __init__(self, vocab, word_ids, word_cnt, num_topics, labels, label_names = None, heldout_ids = None, heldout_cnt = None, heldout_responses = None):
if type(vocab) == list:
self.vocab = np.array(vocab)
else:
self.vocab = vocab
if type(word_ids[0]) != np.ndarray:
tmp_ids = list()
tmp_cnt = list()
for ids in word_ids:
tmp_ids.append(np.array(ids))
for cnt in word_cnt:
tmp_cnt.append(np.array(cnt))
word_ids = tmp_ids
word_cnt = tmp_cnt
if label_names == None:
label_names = [str(i) for i in xrange(labels.shape[1])]
self.word_ids = word_ids
self.word_cnt = word_cnt
self.R = labels # M x J matrix
self.K = num_topics #num topics
self.N = len(vocab) #num voca
self.M = len(word_ids) #num documents
self.J = labels.shape[1]
self.A = np.random.gamma(shape=1, scale=1, size=[self.M,self.K])
self.B = np.random.gamma(shape=1, scale=1, size=[self.M,self.K])
self.w = np.zeros([self.J, self.K])
self.r_j = np.sum(self.R, 0)
self.label_names = label_names
self.heldout_ids = heldout_ids
self.heldout_cnt = heldout_cnt
self.heldout_responses = heldout_responses
self.Nm = np.zeros(self.M)
for i in xrange(self.M):
self.Nm[i] = np.sum(word_cnt[i])
def plot_expected_topics(model, corpus, labels, save_path=None, num_words = 10, num_topics = 20):
"""plot expected topics
"""
import matplotlib.pyplot as plt
if model.K < num_topics:
num_topics = model.K
if corpus.N < num_words:
num_words = corpus.N
legend_size = 15
word_size = 10
width = 20
height = 3
wr = np.exp(np.dot(labels, corpus.w))
Z = model.p / (wr)
Z /= np.sum(Z) #expected topic proportion given 'labels'
rank = model.p.argsort()[::-1]
fig = plt.gcf()
fig.set_size_inches(width,height)
ax = plt.gca()
l_names = ['%s:%.2f'%(corpus.label_names[i],labels[i]) for i in xrange(0,corpus.J) if labels[i] != 0]
plt.bar(range(0,num_topics), Z[rank[:num_topics]], label='label={%s}'%(', '.join(l_names)), alpha=0.5)
plt.legend(prop={'size':legend_size})
ax.set_xticks(np.arange(num_topics)+0.4)
ax.set_xticklabels(['\n'.join(corpus.vocab[model.gamma[:,i].argsort()[::-1][:num_words]]) for i in rank[:num_topics]], size=word_size)
plt.plot()
if save_path:
plt.savefig(save_path, format='PDF', bbox_inches='tight', dpi=720)
def test():
#corpus parameters
num_topics = 5
num_words = 6
num_labels = 2
num_docs = 3
voca = [str(i) for i in xrange(num_words)]
corpus_ids = [[0,1,2],[1,2,3],[3,4,5]] # word ids for each document
corpus_cnt = [[2,3,1],[1,3,2],[3,2,1]] # word count corresponding to word ids for each document
labels = np.random.random([num_docs,num_labels])
#model parameters
max_iter = 10
output_dir = 'result'
corpus = hdsp_corpus(voca, corpus_ids, corpus_cnt, num_topics, labels)
model = hdsp(num_topics, num_words, num_labels)
model.run_variational_EM(max_iter, corpus, output_dir) # run variational inference
plot_expected_topics(model, corpus, labels[0], save_path='result/expected_topics.pdf')
if __name__ == '__main__':
#test with toy problem
test()
| apache-2.0 |
duane-edgington/stoqs | stoqs/contrib/analysis/crossproduct_biplots.py | 3 | 8932 | #!/usr/bin/env python
'''
Script to create biplots of a cross product of all Parameters in a database.
Mike McCann
MBARI 10 February 2014
'''
import os
import sys
if 'DJANGO_SETTINGS_MODULE' not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE']='settings'
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../../")) # settings.py is one dir up
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime
from utils.utils import round_to_n, pearsonr
from textwrap import wrap
from numpy import polyfit
from pylab import polyval
from contrib.analysis import BiPlot, NoPPDataException, NoTSDataException
class CrossProductBiPlot(BiPlot):
'''
Make customized BiPlots (Parameter Parameter plots) for platforms from STOQS.
'''
def getFileName(self, figCount):
'''
Construct plot file name
'''
fileName = 'cpBiPlot_%02d' % figCount
if self.args.daytime:
fileName += '_day'
if self.args.nighttime:
fileName += '_night'
fileName += '.png'
fileName = os.path.join(self.args.plotDir, self.args.plotPrefix + fileName)
return fileName
def saveFigure(self, fig, figCount):
'''
Save this page
'''
provStr = 'Created with STOQS command ' + '\\\n'.join(wrap(self.commandline, width=160)) + ' on ' + datetime.now().ctime()
plt.figtext(0.0, 0.0, provStr, size=7, horizontalalignment='left', verticalalignment='bottom')
plt.tight_layout()
if self.args.title:
fig.text(0.5, 0.975, self.args.title, horizontalalignment='center', verticalalignment='top')
fileName = self.getFileName(figCount)
print('Saving file', fileName)
fig.savefig(fileName)
def makeCrossProductBiPlots(self):
'''
Cycle through Parameters in alphabetical order and make biplots against each of the other parameters
Parameters can be restricted with --ignore, --sampled, and --r2_greater arguments.
'''
allActivityStartTime, allActivityEndTime, allExtent = self._getActivityExtent(self.args.platform)
allParmsHash = self._getParametersPlatformHash(ignoreNames=self.args.ignore)
setList = []
if self.args.sampled:
xParmsHash = self._getParametersPlatformHash(groupNames=['Sampled'], ignoreNames=self.args.ignore)
else:
xParmsHash = allParmsHash
axisNum = 1
figCount = 1
newFigFlag = True
xpList = list(xParmsHash.keys())
xpList.sort(key=lambda p: p.name.lower())
for xP in xpList:
xPlats = xParmsHash[xP]
if self.args.verbose: print(xP.name)
ypList = list(allParmsHash.keys())
ypList.sort(key=lambda p: p.name.lower())
for yP in ypList:
yPlats = allParmsHash[yP]
commonPlatforms = xPlats.intersection(yPlats)
if xP.name == yP.name or set((xP.name, yP.name)) in setList or not commonPlatforms:
continue
if self.args.verbose: print('\t%s' % yP.name)
try:
x, y, points = self._getPPData(None, None, None, xP.name, yP.name, {})
except (NoPPDataException, TypeError) as e:
if self.args.verbose: print(f"\tCan't plot {yP.name}: {str(e)}")
continue
# Assess the correlation
try:
m, b = polyfit(x, y, 1)
except ValueError as e:
if self.args.verbose: print(f"\tCan't polyfit {yP.name}: {str(e)}")
continue
yfit = polyval([m, b], x)
r = np.corrcoef(x, y)[0,1]
r2 = r**2
pr = pearsonr(x, y)
if r2 < self.args.r2_greater or len(x) < self.args.n_greater:
continue
if newFigFlag:
fig = plt.figure(figsize=(9, 9))
newFigFlag = False
# Make subplot
ax = fig.add_subplot(self.args.nrow, self.args.ncol, axisNum)
ax.scatter(x, y, marker='.', s=3, c='k')
ax.plot(x, yfit, color='k', linewidth=0.5)
if not self.args.ticklabels:
ax.set_xticklabels([])
ax.set_yticklabels([])
if self.args.units:
ax.set_xlabel('%s (%s)' % (xP.name, xP.units))
ax.set_ylabel('%s (%s)' % (yP.name, yP.units))
else:
ax.set_xlabel(xP.name)
ax.set_ylabel(yP.name)
statStr = '$r^2 = %.3f$\n$n = %d$' % (r2, len(x))
ax.text(0.65, 0.05, statStr, size=8, transform=ax.transAxes, horizontalalignment='left', verticalalignment='bottom')
platStr = '\n'.join([pl.name for pl in commonPlatforms])
ax.text(0.05, 0.95, platStr, size=8, transform=ax.transAxes, horizontalalignment='left', verticalalignment='top')
# Save this pair so that we don't plot it again, even with axes reversed
setList.append(set((xP.name, yP.name)))
axisNum += 1
if axisNum > self.args.nrow * self.args.ncol:
self.saveFigure(fig, figCount)
newFigFlag = True
axisNum = 1
figCount += 1
# End for yP in ypList
# Save last set of subplots
self.saveFigure(fig, figCount)
print('Done.')
def process_command_line(self):
'''
The argparse library is included in Python 2.7 and is an added package for STOQS.
'''
import argparse
from argparse import RawTextHelpFormatter
examples = 'Examples:' + '\n\n'
examples += sys.argv[0] + ' -d default\n'
examples += sys.argv[0] + ' -d stoqs_simz_aug2013 --ignore mass_concentration_of_chlorophyll_in_sea_water --sampled --r2_greater 0.6\n'
examples += '\nIf running from cde-package replace ".py" with ".py.cde" in the above list.'
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter,
description='Read Parameter-Parameter data from a STOQS database and make bi-plots',
epilog=examples)
parser.add_argument('-p', '--platform', action='store', help='One or more platform names separated by spaces', nargs='*')
parser.add_argument('-d', '--database', action='store', help='Database alias', default='stoqs_september2013_o', required=True)
parser.add_argument('--daytime', action='store_true', help='Select only daytime hours: 10 am to 2 pm local time')
parser.add_argument('--nighttime', action='store_true', help='Select only nighttime hours: 10 pm to 2 am local time')
parser.add_argument('--minDepth', action='store', help='Minimum depth for data queries', default=None, type=float)
parser.add_argument('--maxDepth', action='store', help='Maximum depth for data queries', default=None, type=float)
parser.add_argument('--sampled', action='store_true', help='Compare Sampled Parameters to every other Parameter')
parser.add_argument('--r2_greater', action='store', help='Plot only correlations with r^2 greater than thisvalue', default=0.0, type=float)
parser.add_argument('--n_greater', action='store', help='Plot only correlations with n greater than this value', default=1, type=int)
parser.add_argument('--ignore', action='store', help='Ignore these Parameter names', nargs='*')
parser.add_argument('--nrow', action='store', help='Number of subplots in a column', default=4, type=int)
parser.add_argument('--ncol', action='store', help='Number of subplots in a row', default=4, type=int)
parser.add_argument('--ticklabels', action='store_true', help='Label ticks')
parser.add_argument('--units', action='store_true', help='Add (units) to axis names')
parser.add_argument('--plotDir', action='store', help='Directory where to write the plot output', default='.')
parser.add_argument('--plotPrefix', action='store', help='Prefix to use in naming plot files', default='')
parser.add_argument('--title', action='store', help='Title to appear on top of plot')
parser.add_argument('-v', '--verbose', nargs='?', choices=[1,2,3], type=int, help='Turn on verbose output. Higher number = more output.', const=1, default=0)
self.args = parser.parse_args()
self.commandline = ' '.join(sys.argv)
if __name__ == '__main__':
bp = CrossProductBiPlot()
bp.process_command_line()
bp.makeCrossProductBiPlots()
| gpl-3.0 |
dphang/sage | dota/learner/learner.py | 1 | 1348 | """
Uses scikit-learn to train a knn classifier on a set of labeled replay data, in JSON format. We can then use this classifier
to classify similar important events in future replays.
Events have a few labels:
farm: hero is simply hitting creeps to gain experience and gold. This will be the default event should there be no other event.
gank: hero is attempting to ambush another hero
harass: hero casts a spell or attacks an enemy
teamfight: there is a teamfight
push: either team is pushing into the enemy's base
Based on these labels, this program will attempt to discover new events.
"""
from sklearn import neighbors, preprocessing
class Learner:
"""
Learner using k nearest neighbors algorithm.
"""
def __init__(self):
self.nbrs = neighbors.KNeighborsClassifier(n_neighbors=1)
self.scaler = preprocessing.data.StandardScaler()
self.training_data = None
def train(self, features, labels):
"""
Train this learner on training data using k nearest neighbors
"""
features = self.scaler.fit_transform(features)
self.nbrs.fit(features, labels)
def classify(self, features):
"""
Classifies these examples
"""
features = self.scaler.transform(features)
return self.nbrs.predict(features)
| mit |
arabenjamin/scikit-learn | sklearn/preprocessing/tests/test_imputation.py | 213 | 11911 | import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.preprocessing.imputation import Imputer
from sklearn.pipeline import Pipeline
from sklearn import grid_search
from sklearn import tree
from sklearn.random_projection import sparse_random_matrix
def _check_statistics(X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test:
- along the two axes
- with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"axis = {0}, sparse = {1}" % (strategy, missing_values)
# Normal matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
X_trans = imputer.fit(X).transform(X.copy())
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, False))
assert_array_equal(X_trans, X_true, err_msg.format(0, False))
# Normal matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(X.transpose())
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform, X.copy().transpose())
else:
X_trans = imputer.transform(X.copy().transpose())
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, False))
# Sparse matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, True))
assert_array_equal(X_trans, X_true, err_msg.format(0, True))
# Sparse matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(sparse.csc_matrix(X.transpose()))
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform,
sparse.csc_matrix(X.copy().transpose()))
else:
X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, True))
def test_imputation_shape():
# Verify the shapes of the imputed matrix for different strategies.
X = np.random.randn(10, 2)
X[::2] = np.nan
for strategy in ['mean', 'median', 'most_frequent']:
imputer = Imputer(strategy=strategy)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (10, 2))
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert_equal(X_imputed.shape, (10, 2))
def test_imputation_mean_median_only_zero():
# Test imputation using the mean and median strategies, when
# missing_values == 0.
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
X_imputed_mean = np.array([
[3, 5],
[1, 3],
[2, 7],
[6, 13],
])
statistics_mean = [np.nan, 3, np.nan, np.nan, 7]
# Behaviour of median with NaN is undefined, e.g. different results in
# np.median and np.ma.median
X_for_median = X[:, [0, 1, 2, 4]]
X_imputed_median = np.array([
[2, 5],
[1, 3],
[2, 5],
[6, 13],
])
statistics_median = [np.nan, 2, np.nan, 5]
_check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0)
_check_statistics(X_for_median, X_imputed_median, "median",
statistics_median, 0)
def test_imputation_mean_median():
# Test imputation using the mean and median strategies, when
# missing_values != 0.
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0]+1)
values[4::2] = - values[4::2]
tests = [("mean", "NaN", lambda z, v, p: np.mean(np.hstack((z, v)))),
("mean", 0, lambda z, v, p: np.mean(v)),
("median", "NaN", lambda z, v, p: np.median(np.hstack((z, v)))),
("median", 0, lambda z, v, p: np.median(v))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_median_special_cases():
# Test median imputation with sparse boundary cases
X = np.array([
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]).transpose()
X_imputed_median = np.array([
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, .5],
]).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5]
_check_statistics(X, X_imputed_median, "median",
statistics_median, 'NaN')
def test_imputation_most_frequent():
# Test imputation using the most-frequent strategy.
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in Imputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, Imputer will need to be updated
# to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
def test_imputation_pipeline_grid_search():
# Test imputation within a pipeline + gridsearch.
pipeline = Pipeline([('imputer', Imputer(missing_values=0)),
('tree', tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"],
'imputer__axis': [0, 1]
}
l = 100
X = sparse_random_matrix(l, l, density=0.10)
Y = sparse_random_matrix(l, 1, density=0.10).toarray()
gs = grid_search.GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_pickle():
# Test for pickling imputers.
import pickle
l = 100
X = sparse_random_matrix(l, l, density=0.10)
for strategy in ["mean", "median", "most_frequent"]:
imputer = Imputer(missing_values=0, strategy=strategy)
imputer.fit(X)
imputer_pickled = pickle.loads(pickle.dumps(imputer))
assert_array_equal(imputer.transform(X.copy()),
imputer_pickled.transform(X.copy()),
"Fail to transform the data after pickling "
"(strategy = %s)" % (strategy))
def test_imputation_copy():
# Test imputation with copy
X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_false(np.all(X == Xt))
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_true(np.all(X == Xt))
# copy=False, sparse csr, axis=1 => no copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=0 => no copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=1 => copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=1, missing_values=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=0, strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
assert_false(sparse.issparse(Xt))
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
| bsd-3-clause |
jorge2703/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 181 | 15664 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(sp.issparse(Y))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
| bsd-3-clause |