repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
surhudm/scipy | scipy/interpolate/_cubic.py | 8 | 29300 | """Interpolation algorithms using piecewise cubic polynomials."""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib.six import string_types
from . import BPoly, PPoly
from .polyint import _isscalar
from scipy._lib._util import _asarray_validated
from scipy.linalg import solve_banded, solve
__all__ = ["PchipInterpolator", "pchip_interpolate", "pchip",
"Akima1DInterpolator", "CubicSpline"]
class PchipInterpolator(BPoly):
r"""PCHIP 1-d monotonic cubic interpolation.
`x` and `y` are arrays of values used to approximate some function f,
with ``y = f(x)``. The interpolant uses monotonic cubic splines
to find the value of new points. (PCHIP stands for Piecewise Cubic
Hermite Interpolating Polynomial).
Parameters
----------
x : ndarray
A 1-D array of monotonically increasing real values. `x` cannot
include duplicate values (otherwise f is overspecified)
y : ndarray
A 1-D array of real values. `y`'s length along the interpolation
axis must be equal to the length of `x`. If N-D array, use `axis`
parameter to select correct axis.
axis : int, optional
Axis in the y array corresponding to the x-coordinate values.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Methods
-------
__call__
derivative
antiderivative
roots
See Also
--------
Akima1DInterpolator
CubicSpline
BPoly
Notes
-----
The interpolator preserves monotonicity in the interpolation data and does
not overshoot if the data is not smooth.
The first derivatives are guaranteed to be continuous, but the second
derivatives may jump at :math:`x_k`.
Determines the derivatives at the points :math:`x_k`, :math:`f'_k`,
by using PCHIP algorithm [1]_.
Let :math:`h_k = x_{k+1} - x_k`, and :math:`d_k = (y_{k+1} - y_k) / h_k`
are the slopes at internal points :math:`x_k`.
If the signs of :math:`d_k` and :math:`d_{k-1}` are different or either of
them equals zero, then :math:`f'_k = 0`. Otherwise, it is given by the
weighted harmonic mean
.. math::
\frac{w_1 + w_2}{f'_k} = \frac{w_1}{d_{k-1}} + \frac{w_2}{d_k}
where :math:`w_1 = 2 h_k + h_{k-1}` and :math:`w_2 = h_k + 2 h_{k-1}`.
The end slopes are set using a one-sided scheme [2]_.
References
----------
.. [1] F. N. Fritsch and R. E. Carlson, Monotone Piecewise Cubic Interpolation,
SIAM J. Numer. Anal., 17(2), 238 (1980).
DOI:10.1137/0717021
.. [2] see, e.g., C. Moler, Numerical Computing with Matlab, 2004.
DOI: http://dx.doi.org/10.1137/1.9780898717952
"""
def __init__(self, x, y, axis=0, extrapolate=None):
x = _asarray_validated(x, check_finite=False, as_inexact=True)
y = _asarray_validated(y, check_finite=False, as_inexact=True)
axis = axis % y.ndim
xp = x.reshape((x.shape[0],) + (1,)*(y.ndim-1))
yp = np.rollaxis(y, axis)
dk = self._find_derivatives(xp, yp)
data = np.hstack((yp[:, None, ...], dk[:, None, ...]))
_b = BPoly.from_derivatives(x, data, orders=None)
super(PchipInterpolator, self).__init__(_b.c, _b.x,
extrapolate=extrapolate)
self.axis = axis
def roots(self):
"""
Return the roots of the interpolated function.
"""
return (PPoly.from_bernstein_basis(self._bpoly)).roots()
@staticmethod
def _edge_case(h0, h1, m0, m1):
# one-sided three-point estimate for the derivative
d = ((2*h0 + h1)*m0 - h0*m1) / (h0 + h1)
# try to preserve shape
mask = np.sign(d) != np.sign(m0)
mask2 = (np.sign(m0) != np.sign(m1)) & (np.abs(d) > 3.*np.abs(m0))
mmm = (~mask) & mask2
d[mask] = 0.
d[mmm] = 3.*m0[mmm]
return d
@staticmethod
def _find_derivatives(x, y):
# Determine the derivatives at the points y_k, d_k, by using
# PCHIP algorithm is:
# We choose the derivatives at the point x_k by
# Let m_k be the slope of the kth segment (between k and k+1)
# If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
# else use weighted harmonic mean:
# w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
# 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
# where h_k is the spacing between x_k and x_{k+1}
y_shape = y.shape
if y.ndim == 1:
# So that _edge_case doesn't end up assigning to scalars
x = x[:, None]
y = y[:, None]
hk = x[1:] - x[:-1]
mk = (y[1:] - y[:-1]) / hk
if y.shape[0] == 2:
# edge case: only have two points, use linear interpolation
dk = np.zeros_like(y)
dk[0] = mk
dk[1] = mk
return dk.reshape(y_shape)
smk = np.sign(mk)
condition = (smk[1:] != smk[:-1]) | (mk[1:] == 0) | (mk[:-1] == 0)
w1 = 2*hk[1:] + hk[:-1]
w2 = hk[1:] + 2*hk[:-1]
# values where division by zero occurs will be excluded
# by 'condition' afterwards
with np.errstate(divide='ignore'):
whmean = (w1/mk[:-1] + w2/mk[1:]) / (w1 + w2)
dk = np.zeros_like(y)
dk[1:-1][condition] = 0.0
dk[1:-1][~condition] = 1.0 / whmean[~condition]
# special case endpoints, as suggested in
# Cleve Moler, Numerical Computing with MATLAB, Chap 3.4
dk[0] = PchipInterpolator._edge_case(hk[0], hk[1], mk[0], mk[1])
dk[-1] = PchipInterpolator._edge_case(hk[-1], hk[-2], mk[-1], mk[-2])
return dk.reshape(y_shape)
def pchip_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for pchip interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``. The interpolant uses monotonic cubic splines
to find the value of new points x and the derivatives there.
See `PchipInterpolator` for details.
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : array_like
A 1-D array of real values. `yi`'s length along the interpolation
axis must be equal to the length of `xi`. If N-D array, use axis
parameter to select correct axis.
x : scalar or array_like
Of length M.
der : int or list, optional
Derivatives to extract. The 0-th derivative can be included to
return the function value.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
See Also
--------
PchipInterpolator
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
"""
P = PchipInterpolator(xi, yi, axis=axis)
if der == 0:
return P(x)
elif _isscalar(der):
return P.derivative(der)(x)
else:
return [P.derivative(nu)(x) for nu in der]
# Backwards compatibility
pchip = PchipInterpolator
class Akima1DInterpolator(PPoly):
"""
Akima interpolator
Fit piecewise cubic polynomials, given vectors x and y. The interpolation
method by Akima uses a continuously differentiable sub-spline built from
piecewise cubic polynomials. The resultant curve passes through the given
data points and will appear smooth and natural.
Parameters
----------
x : ndarray, shape (m, )
1-D array of monotonically increasing real values.
y : ndarray, shape (m, ...)
N-D array of real values. The length of `y` along the first axis must
be equal to the length of `x`.
axis : int, optional
Specifies the axis of `y` along which to interpolate. Interpolation
defaults to the first axis of `y`.
Methods
-------
__call__
derivative
antiderivative
roots
See Also
--------
PchipInterpolator
CubicSpline
PPoly
Notes
-----
.. versionadded:: 0.14
Use only for precise data, as the fitted curve passes through the given
points exactly. This routine is useful for plotting a pleasingly smooth
curve through a few given points for purposes of plotting.
References
----------
[1] A new method of interpolation and smooth curve fitting based
on local procedures. Hiroshi Akima, J. ACM, October 1970, 17(4),
589-602.
"""
def __init__(self, x, y, axis=0):
# Original implementation in MATLAB by N. Shamsundar (BSD licensed), see
# http://www.mathworks.de/matlabcentral/fileexchange/1814-akima-interpolation
x, y = map(np.asarray, (x, y))
axis = axis % y.ndim
if np.any(np.diff(x) < 0.):
raise ValueError("x must be strictly ascending")
if x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if x.size != y.shape[axis]:
raise ValueError("x.shape must equal y.shape[%s]" % axis)
# move interpolation axis to front
y = np.rollaxis(y, axis)
# determine slopes between breakpoints
m = np.empty((x.size + 3, ) + y.shape[1:])
dx = np.diff(x)
dx = dx[(slice(None), ) + (None, ) * (y.ndim - 1)]
m[2:-2] = np.diff(y, axis=0) / dx
# add two additional points on the left ...
m[1] = 2. * m[2] - m[3]
m[0] = 2. * m[1] - m[2]
# ... and on the right
m[-2] = 2. * m[-3] - m[-4]
m[-1] = 2. * m[-2] - m[-3]
# if m1 == m2 != m3 == m4, the slope at the breakpoint is not defined.
# This is the fill value:
t = .5 * (m[3:] + m[:-3])
# get the denominator of the slope t
dm = np.abs(np.diff(m, axis=0))
f1 = dm[2:]
f2 = dm[:-2]
f12 = f1 + f2
# These are the mask of where the the slope at breakpoint is defined:
ind = np.nonzero(f12 > 1e-9 * np.max(f12))
x_ind, y_ind = ind[0], ind[1:]
# Set the slope at breakpoint
t[ind] = (f1[ind] * m[(x_ind + 1,) + y_ind] +
f2[ind] * m[(x_ind + 2,) + y_ind]) / f12[ind]
# calculate the higher order coefficients
c = (3. * m[2:-2] - 2. * t[:-1] - t[1:]) / dx
d = (t[:-1] + t[1:] - 2. * m[2:-2]) / dx ** 2
coeff = np.zeros((4, x.size - 1) + y.shape[1:])
coeff[3] = y[:-1]
coeff[2] = t[:-1]
coeff[1] = c
coeff[0] = d
super(Akima1DInterpolator, self).__init__(coeff, x, extrapolate=False)
self.axis = axis
def extend(self, c, x, right=True):
raise NotImplementedError("Extending a 1D Akima interpolator is not "
"yet implemented")
# These are inherited from PPoly, but they do not produce an Akima
# interpolator. Hence stub them out.
@classmethod
def from_spline(cls, tck, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
class CubicSpline(PPoly):
"""Cubic spline data interpolator.
Interpolate data with a piecewise cubic polynomial which is twice
continuously differentiable [1]_. The result is represented as a `PPoly`
instance with breakpoints matching the given data.
Parameters
----------
x : array_like, shape (n,)
1-d array containing values of the independent variable.
Values must be real, finite and in strictly increasing order.
y : array_like
Array containing values of the dependent variable. It can have
arbitrary number of dimensions, but the length along `axis` (see below)
must match the length of `x`. Values must be finite.
axis : int, optional
Axis along which `y` is assumed to be varying. Meaning that for
``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
Default is 0.
bc_type : string or 2-tuple, optional
Boundary condition type. Two additional equations, given by the
boundary conditions, are required to determine all coefficients of
polynomials on each segment [2]_.
If `bc_type` is a string, then the specified condition will be applied
at both ends of a spline. Available conditions are:
* 'not-a-knot' (default): The first and second segment at a curve end
are the same polynomial. It is a good default when there is no
information on boundary conditions.
* 'periodic': The interpolated functions is assumed to be periodic
of period ``x[-1] - x[0]``. The first and last value of `y` must be
identical: ``y[0] == y[-1]``. This boundary condition will result in
``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``.
* 'clamped': The first derivative at curves ends are zero. Assuming
a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition.
* 'natural': The second derivative at curve ends are zero. Assuming
a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition.
If `bc_type` is a 2-tuple, the first and the second value will be
applied at the curve start and end respectively. The tuple values can
be one of the previously mentioned strings (except 'periodic') or a
tuple `(order, deriv_values)` allowing to specify arbitrary
derivatives at curve ends:
* `order`: the derivative order, 1 or 2.
* `deriv_value`: array_like containing derivative values, shape must
be the same as `y`, excluding `axis` dimension. For example, if `y`
is 1D, then `deriv_value` must be a scalar. If `y` is 3D with the
shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2D
and have the shape (n0, n1).
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. If None (default), `extrapolate` is
set to 'periodic' for ``bc_type='periodic'`` and to True otherwise.
Attributes
----------
x : ndarray, shape (n,)
Breakpoints. The same `x` which was passed to the constructor.
c : ndarray, shape (4, n-1, ...)
Coefficients of the polynomials on each segment. The trailing
dimensions match the dimensions of `y`, excluding `axis`. For example,
if `y` is 1-d, then ``c[k, i]`` is a coefficient for
``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
axis : int
Interpolation axis. The same `axis` which was passed to the
constructor.
Methods
-------
__call__
derivative
antiderivative
integrate
roots
See Also
--------
Akima1DInterpolator
PchipInterpolator
PPoly
Notes
-----
Parameters `bc_type` and `interpolate` work independently, i.e. the former
controls only construction of a spline, and the latter only evaluation.
When a boundary condition is 'not-a-knot' and n = 2, it is replaced by
a condition that the first derivative is equal to the linear interpolant
slope. When both boundary conditions are 'not-a-knot' and n = 3, the
solution is sought as a parabola passing through given points.
When 'not-a-knot' boundary conditions is applied to both ends, the
resulting spline will be the same as returned by `splrep` (with ``s=0``)
and `InterpolatedUnivariateSpline`, but these two methods use a
representation in B-spline basis.
.. versionadded:: 0.18.0
Examples
--------
In this example the cubic spline is used to interpolate a sampled sinusoid.
You can see that the spline continuity property holds for the first and
second derivatives and violates only for the third derivative.
>>> from scipy.interpolate import CubicSpline
>>> import matplotlib.pyplot as plt
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> cs = CubicSpline(x, y)
>>> xs = np.arange(-0.5, 9.6, 0.1)
>>> plt.figure(figsize=(6.5, 4))
>>> plt.plot(x, y, 'o', label='data')
>>> plt.plot(xs, np.sin(xs), label='true')
>>> plt.plot(xs, cs(xs), label="S")
>>> plt.plot(xs, cs(xs, 1), label="S'")
>>> plt.plot(xs, cs(xs, 2), label="S''")
>>> plt.plot(xs, cs(xs, 3), label="S'''")
>>> plt.xlim(-0.5, 9.5)
>>> plt.legend(loc='lower left', ncol=2)
>>> plt.show()
In the second example, the unit circle is interpolated with a spline. A
periodic boundary condition is used. You can see that the first derivative
values, ds/dx=0, ds/dy=1 at the periodic point (1, 0) are correctly
computed. Note that a circle cannot be exactly represented by a cubic
spline. To increase precision, more breakpoints would be required.
>>> theta = 2 * np.pi * np.linspace(0, 1, 5)
>>> y = np.c_[np.cos(theta), np.sin(theta)]
>>> cs = CubicSpline(theta, y, bc_type='periodic')
>>> print("ds/dx={:.1f} ds/dy={:.1f}".format(cs(0, 1)[0], cs(0, 1)[1]))
ds/dx=0.0 ds/dy=1.0
>>> xs = 2 * np.pi * np.linspace(0, 1, 100)
>>> plt.figure(figsize=(6.5, 4))
>>> plt.plot(y[:, 0], y[:, 1], 'o', label='data')
>>> plt.plot(np.cos(xs), np.sin(xs), label='true')
>>> plt.plot(cs(xs)[:, 0], cs(xs)[:, 1], label='spline')
>>> plt.axes().set_aspect('equal')
>>> plt.legend(loc='center')
>>> plt.show()
The third example is the interpolation of a polynomial y = x**3 on the
interval 0 <= x<= 1. A cubic spline can represent this function exactly.
To achieve that we need to specify values and first derivatives at
endpoints of the interval. Note that y' = 3 * x**2 and thus y'(0) = 0 and
y'(1) = 3.
>>> cs = CubicSpline([0, 1], [0, 1], bc_type=((1, 0), (1, 3)))
>>> x = np.linspace(0, 1)
>>> np.allclose(x**3, cs(x))
True
References
----------
.. [1] `Cubic Spline Interpolation
<https://en.wikiversity.org/wiki/Cubic_Spline_Interpolation>`_
on Wikiversity.
.. [2] Carl de Boor, "A Practical Guide to Splines", Springer-Verlag, 1978.
"""
def __init__(self, x, y, axis=0, bc_type='not-a-knot', extrapolate=None):
x, y = map(np.asarray, (x, y))
if np.issubdtype(x.dtype, np.complexfloating):
raise ValueError("`x` must contain real values.")
if np.issubdtype(y.dtype, np.complexfloating):
dtype = complex
else:
dtype = float
y = y.astype(dtype, copy=False)
axis = axis % y.ndim
if x.ndim != 1:
raise ValueError("`x` must be 1-dimensional.")
if x.shape[0] < 2:
raise ValueError("`x` must contain at least 2 elements.")
if x.shape[0] != y.shape[axis]:
raise ValueError("The length of `y` along `axis`={0} doesn't "
"match the length of `x`".format(axis))
if not np.all(np.isfinite(x)):
raise ValueError("`x` must contain only finite values.")
if not np.all(np.isfinite(y)):
raise ValueError("`y` must contain only finite values.")
dx = np.diff(x)
if np.any(dx <= 0):
raise ValueError("`x` must be strictly increasing sequence.")
n = x.shape[0]
y = np.rollaxis(y, axis)
bc, y = self._validate_bc(bc_type, y, y.shape[1:], axis)
if extrapolate is None:
if bc[0] == 'periodic':
extrapolate = 'periodic'
else:
extrapolate = True
dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
slope = np.diff(y, axis=0) / dxr
# If bc is 'not-a-knot' this change is just a convention.
# If bc is 'periodic' then we already checked that y[0] == y[-1],
# and the spline is just a constant, we handle this case in the same
# way by setting the first derivatives to slope, which is 0.
if n == 2:
if bc[0] in ['not-a-knot', 'periodic']:
bc[0] = (1, slope[0])
if bc[1] in ['not-a-knot', 'periodic']:
bc[1] = (1, slope[0])
# This is a very special case, when both conditions are 'not-a-knot'
# and n == 3. In this case 'not-a-knot' can't be handled regularly
# as the both conditions are identical. We handle this case by
# constructing a parabola passing through given points.
if n == 3 and bc[0] == 'not-a-knot' and bc[1] == 'not-a-knot':
A = np.zeros((3, 3)) # This is a standard matrix.
b = np.empty((3,) + y.shape[1:], dtype=y.dtype)
A[0, 0] = 1
A[0, 1] = 1
A[1, 0] = dx[1]
A[1, 1] = 2 * (dx[0] + dx[1])
A[1, 2] = dx[0]
A[2, 1] = 1
A[2, 2] = 1
b[0] = 2 * slope[0]
b[1] = 3 * (dxr[0] * slope[1] + dxr[1] * slope[0])
b[2] = 2 * slope[1]
s = solve(A, b, overwrite_a=True, overwrite_b=True,
check_finite=False)
else:
# Find derivative values at each x[i] by solving a tridiagonal
# system.
A = np.zeros((3, n)) # This is a banded matrix representation.
b = np.empty((n,) + y.shape[1:], dtype=y.dtype)
# Filling the system for i=1..n-2
# (x[i-1] - x[i]) * s[i-1] +\
# 2 * ((x[i] - x[i-1]) + (x[i+1] - x[i])) * s[i] +\
# (x[i] - x[i-1]) * s[i+1] =\
# 3 * ((x[i+1] - x[i])*(y[i] - y[i-1])/(x[i] - x[i-1]) +\
# (x[i] - x[i-1])*(y[i+1] - y[i])/(x[i+1] - x[i]))
A[1, 1:-1] = 2 * (dx[:-1] + dx[1:]) # The diagonal
A[0, 2:] = dx[:-1] # The upper diagonal
A[-1, :-2] = dx[1:] # The lower diagonal
b[1:-1] = 3 * (dxr[1:] * slope[:-1] + dxr[:-1] * slope[1:])
bc_start, bc_end = bc
if bc_start == 'periodic':
# Due to the periodicity, and because y[-1] = y[0], the linear
# system has (n-1) unknowns/equations instead of n:
A = A[:, 0:-1]
A[1, 0] = 2 * (dx[-1] + dx[0])
A[0, 1] = dx[-1]
b = b[:-1]
# Also, due to the periodicity, the system is not tri-diagonal.
# We need to compute a "condensed" matrix of shape (n-2, n-2).
# See http://www.cfm.brown.edu/people/gk/chap6/node14.html for
# more explanations.
# The condensed matrix is obtained by removing the last column
# and last row of the (n-1, n-1) system matrix. The removed
# values are saved in scalar variables with the (n-1, n-1)
# system matrix indices forming their names:
a_m1_0 = dx[-2] # lower left corner value: A[-1, 0]
a_m1_m2 = dx[-1]
a_m1_m1 = 2 * (dx[-1] + dx[-2])
a_m2_m1 = dx[-2]
a_0_m1 = dx[0]
b[0] = 3 * (dxr[0] * slope[-1] + dxr[-1] * slope[0])
b[-1] = 3 * (dxr[-1] * slope[-2] + dxr[-2] * slope[-1])
Ac = A[:, :-1]
b1 = b[:-1]
b2 = np.zeros_like(b1)
b2[0] = -a_0_m1
b2[-1] = -a_m2_m1
# s1 and s2 are the solutions of (n-2, n-2) system
s1 = solve_banded((1, 1), Ac, b1, overwrite_ab=False,
overwrite_b=False, check_finite=False)
s2 = solve_banded((1, 1), Ac, b2, overwrite_ab=False,
overwrite_b=False, check_finite=False)
# computing the s[n-2] solution:
s_m1 = ((b[-1] - a_m1_0 * s1[0] - a_m1_m2 * s1[-1]) /
(a_m1_m1 + a_m1_0 * s2[0] + a_m1_m2 * s2[-1]))
# s is the solution of the (n, n) system:
s = np.empty((n,) + y.shape[1:], dtype=y.dtype)
s[:-2] = s1 + s_m1 * s2
s[-2] = s_m1
s[-1] = s[0]
else:
if bc_start == 'not-a-knot':
A[1, 0] = dx[1]
A[0, 1] = x[2] - x[0]
d = x[2] - x[0]
b[0] = ((dxr[0] + 2*d) * dxr[1] * slope[0] +
dxr[0]**2 * slope[1]) / d
elif bc_start[0] == 1:
A[1, 0] = 1
A[0, 1] = 0
b[0] = bc_start[1]
elif bc_start[0] == 2:
A[1, 0] = 2 * dx[0]
A[0, 1] = dx[0]
b[0] = -0.5 * bc_start[1] * dx[0]**2 + 3 * (y[1] - y[0])
if bc_end == 'not-a-knot':
A[1, -1] = dx[-2]
A[-1, -2] = x[-1] - x[-3]
d = x[-1] - x[-3]
b[-1] = ((dxr[-1]**2*slope[-2] +
(2*d + dxr[-1])*dxr[-2]*slope[-1]) / d)
elif bc_end[0] == 1:
A[1, -1] = 1
A[-1, -2] = 0
b[-1] = bc_end[1]
elif bc_end[0] == 2:
A[1, -1] = 2 * dx[-1]
A[-1, -2] = dx[-1]
b[-1] = 0.5 * bc_end[1] * dx[-1]**2 + 3 * (y[-1] - y[-2])
s = solve_banded((1, 1), A, b, overwrite_ab=True,
overwrite_b=True, check_finite=False)
# Compute coefficients in PPoly form.
t = (s[:-1] + s[1:] - 2 * slope) / dxr
c = np.empty((4, n - 1) + y.shape[1:], dtype=t.dtype)
c[0] = t / dxr
c[1] = (slope - s[:-1]) / dxr - t
c[2] = s[:-1]
c[3] = y[:-1]
super(CubicSpline, self).__init__(c, x, extrapolate=extrapolate)
self.axis = axis
@staticmethod
def _validate_bc(bc_type, y, expected_deriv_shape, axis):
"""Validate and prepare boundary conditions.
Returns
-------
validated_bc : 2-tuple
Boundary conditions for a curve start and end.
y : ndarray
y casted to complex dtype if one of the boundary conditions has
complex dtype.
"""
if isinstance(bc_type, string_types):
if bc_type == 'periodic':
if not np.allclose(y[0], y[-1], rtol=1e-15, atol=1e-15):
raise ValueError(
"The first and last `y` point along axis {} must "
"be identical (within machine precision) when "
"bc_type='periodic'.".format(axis))
bc_type = (bc_type, bc_type)
else:
if len(bc_type) != 2:
raise ValueError("`bc_type` must contain 2 elements to "
"specify start and end conditions.")
if 'periodic' in bc_type:
raise ValueError("'periodic' `bc_type` is defined for both "
"curve ends and cannot be used with other "
"boundary conditions.")
validated_bc = []
for bc in bc_type:
if isinstance(bc, string_types):
if bc == 'clamped':
validated_bc.append((1, np.zeros(expected_deriv_shape)))
elif bc == 'natural':
validated_bc.append((2, np.zeros(expected_deriv_shape)))
elif bc in ['not-a-knot', 'periodic']:
validated_bc.append(bc)
else:
raise ValueError("bc_type={} is not allowed.".format(bc))
else:
try:
deriv_order, deriv_value = bc
except Exception:
raise ValueError("A specified derivative value must be "
"given in the form (order, value).")
if deriv_order not in [1, 2]:
raise ValueError("The specified derivative order must "
"be 1 or 2.")
deriv_value = np.asarray(deriv_value)
if deriv_value.shape != expected_deriv_shape:
raise ValueError(
"`deriv_value` shape {} is not the expected one {}."
.format(deriv_value.shape, expected_deriv_shape))
if np.issubdtype(deriv_value.dtype, np.complexfloating):
y = y.astype(complex, copy=False)
validated_bc.append((deriv_order, deriv_value))
return validated_bc, y
| bsd-3-clause |
bzamecnik/ml-playground | chord-recognition/convnet_chord_classification_application.py | 2 | 4286 | # Chord classification
#
# The task is to classify chords (or more precisely pitch class sets) based on chromagram features.
#
# We use a single Beatles song with just two chord and silence.
#
# The task is in fact multilabel classification, since each pitch class is generally independent.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import arrow
import os
import scipy.signal
import scipy.misc
from sklearn.preprocessing import OneHotEncoder, MinMaxScaler
from sklearn.cross_validation import train_test_split
from sklearn.metrics import hamming_loss, accuracy_score
from keras.models import model_from_yaml
from tfr.reassignment import chromagram
from tfr.signal import SignalFrames
from tfr.spectrogram import create_window
## Load model
model_id = 'model_2016-04-16-20-52-03'
model_dir = '../data/beatles/models/' + model_id
model_arch = model_dir + '/' + model_id + '_arch.yaml'
model_weights = model_dir + '/' + model_id + '_weights.h5'
print('loading model:', model_arch)
model = model_from_yaml(open(model_arch).read())
print('loading model wieghts:', model_weights)
model.load_weights(model_weights)
## Load data
song = "The_Beatles/03_-_A_Hard_Day's_Night/05_-_And_I_Love_Her"
audio_file = '../data/beatles/audio-cd/' + song + '.wav'
### Chromagram features
# labels_file = '../data/beatles/chord-pcs/4096_2048/'+song+'.pcs'
# features_file = '../data/beatles/chromagram/block=4096_hop=2048_bins=-48,67_div=1/'+song+'.npz'
# data = np.load(features_file)
# features = data['X']
# times = data['times']
### Chord labels
# df_labels = pd.read_csv(labels_file, sep='\t')
# labels_pcs = df_labels[df_labels.columns[1:]].as_matrix()
block_size = 4096
hop_size = 2048
print('loading audio:', audio_file)
print('splitting audio to blocks')
signal_frames = SignalFrames(audio_file, frame_size=block_size, hop_size=hop_size)
x_blocks, x_times, fs = signal_frames.frames, signal_frames.start_times, signal_frames.sample_rate
w = create_window(block_size)
print('computing chromagram')
X_chromagram = chromagram(x_blocks, w, fs, to_log=True)
features = X_chromagram
## Data preprocessing
### Features
print('scaling the input features')
# scaler = MinMaxScaler()
# X = scaler.fit_transform(features).astype('float32')
# TODO: there's a bug: should be + 120 on both places!!!
X = (features.astype('float32') - 120) / (features.shape[1] - 120)
# reshape for 1D convolution
def conv_reshape(X):
return X.reshape(X.shape[0], X.shape[1], 1)
X_conv = conv_reshape(X)
# visualization
#
# def plot_labels(l, title, fifths=False, resample=True, exact=False):
# if fifths:
# l = l[:,np.arange(12)*7 % 12]
# l = l.T
#
# # file = model_dir+'/'+model_id+'_'+title+'.png'
#
# if exact:
# pass
# # scipy.misc.imsave(file, l)
# else:
# if resample:
# l = scipy.signal.resample(l, 200, axis=1)
# plt.figure(figsize=(20, 2))
# plt.imshow(l, cmap='gray', interpolation='none')
# plt.tight_layout()
# plt.show()
# # plt.savefig(file)
# predicted labels
# labels_pred_full = model.predict_classes(X_conv)
# plot_labels(labels_pred_full, 'pred')
# plot_labels(labels_pred_full, 'exact_pred', exact=True)
# in case of input features with original time order we can apply median filter:
# medfilt(labels_pred_full, (15, 1))
model.compile(class_mode='binary', loss='binary_crossentropy', optimizer='adam')
y_pred = (model.predict(X_conv) >= 0.5).astype(np.int32)
pred_file = '../data/beatles/chord-pcs-predicted/%d_%d/%s/%s.tsv' % (block_size, hop_size, model_id, song)
pred_dir = os.path.dirname(pred_file)
os.makedirs(pred_dir, exist_ok=True)
np.savetxt(pred_file, y_pred, delimiter='\t', fmt='%d')
# def plot_labels_true_pred_diff():
# def plot2d(x):
# plt.imshow(scipy.signal.resample(x.T, 200, axis=1), cmap='gray', interpolation='none')
# plt.figure(figsize=(20, 6))
# ax = plt.subplot(3,1,1)
# plot2d(labels_pcs)
# ax.set_title('true')
# ax = plt.subplot(3,1,2)
# plot2d(labels_pred_full)
# ax.set_title('predicted')
# ax = plt.subplot(3,1,3)
# plot2d(labels_pred_full - labels_pcs)
# ax.set_title('difference')
# plt.tight_layout()
# plt.show()
#
# plot_labels_true_pred_diff()
| mit |
tbischler/PEAKachu | peakachulib/deseq2.py | 1 | 2710 | import numpy as np
import pandas as pd
from rpy2 import robjects
from rpy2.robjects import r, Formula, pandas2ri
from rpy2.robjects.conversion import localconverter
pandas2ri.activate()
class DESeq2Runner(object):
def __init__(self, count_df):
r("suppressMessages(library(DESeq2))")
self._count_df = count_df
def run_deseq2(self, exp_lib_list, ctr_lib_list, size_factors,
pairwise_replicates):
self._count_df = np.round(self._count_df, decimals=0)
self._count_df = self._count_df.astype(int)
conds = ["exp"] * len(exp_lib_list) + ["ctr"] * len(ctr_lib_list)
if pairwise_replicates:
samples = [str(sample) for sample in (
list(range(1, len(exp_lib_list) + 1)) +
list(range(1, len(ctr_lib_list) + 1)))]
colData = robjects.DataFrame({
"conditions": robjects.StrVector(conds),
"samples": robjects.StrVector(samples)})
design = Formula('~ samples + conditions')
else:
colData = robjects.DataFrame(
{"conditions": robjects.StrVector(conds)})
design = Formula('~ conditions')
r_count_df = robjects.DataFrame(self._count_df)
r_count_df.colnames = robjects.rinterface.NULL
dds = r.DESeqDataSetFromMatrix(countData=r_count_df,
colData=colData, design=design)
if size_factors is None:
dds = r.estimateSizeFactors(dds)
else:
assign_sf = r["sizeFactors<-"]
dds = assign_sf(object=dds, value=robjects.FloatVector(
size_factors))
dds = r.estimateDispersions(dds, quiet=True)
dds = r.nbinomWaldTest(dds, quiet=True)
size_factors = pd.Series(r.sizeFactors(dds),
index=self._count_df.columns)
results = r.results(dds, contrast=robjects.StrVector(
("conditions", "exp", "ctr")), altHypothesis="greater")
with localconverter(robjects.default_converter + pandas2ri.converter):
results_df = robjects.conversion.rpy2py(
r['as.data.frame'](results))
results_df.index = self._count_df.index
return(results_df, size_factors)
def calc_size_factors(self):
self._count_df = np.round(self._count_df, decimals=0)
self._count_df = self._count_df.astype(int)
r_count_df = robjects.DataFrame(self._count_df)
r_count_df.colnames = robjects.rinterface.NULL
r_size_factors = r.estimateSizeFactorsForMatrix(r_count_df)
return pd.Series(r_size_factors, index=self._count_df.columns)
| isc |
zertan/PTR-Pipeline | menace/lib/Community.py | 2 | 13338 | #!/usr/bin/env python
from collections import namedtuple
from os.path import join, exists
from os import makedirs#, chdir,getcwd,fchmod
from glob import glob
import shutil
import numpy as np
#import pandas as pd
import re
#import scipy as sc
#from scipy.stats import norm
#from scipy.stats import expon
import scipy.interpolate as interpolate
import scipy.integrate
#from scipy.fftpack import fft, ifft
#from lmfit.models import Model
#from lmfit import conf_interval
#import matplotlib
#import matplotlib.pyplot as plt
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
#from Bio.Seq import MutableSeq
#from Bio.Alphabet import IUPAC
import docker
import configparser
# vars
#cli = docker.Client(base_url='unix://var/run/docker.sock')
class Community:
"A community defined by genome references (Biopython SeqRecords) and corresponding growth parameters."
def __init__(self,name,acc,growth_param,td,mapper,image,env,email):
self.name = name
self.conf = local_conf(join(td,name),mapper,email)
self.d_conf = local_conf(join('/mnt/vol',name),mapper,email)
self.args0=['ptr_pipeline.py','-c',join(self.d_conf['node_path'],'project.conf')]
self.env=env
self.image=image
self.create_dirs(True)
save_config(self.conf,self.d_conf)
self.fetch_ref(acc)
self.pop = self.init_population(acc,growth_param)
self.distribution = self.community_distribution()
self.samples = []
def init_population(self,acc,growth_param):
records=open_records(glob(join(self.conf['ref_path'],'Fasta','*.fasta')))
population = namedtuple("population", "B C D l seq cells")
#refId=[x.id for x in records]
#acc=keys(comm)
#refInd=[acc.index(x) for x in refId]
pop = {}
#for i,rec in enumerate(records):
# add [:-2] to a if . removed
for i,a in enumerate(acc):
pop[a]=population( B = growth_param[i][0],C = growth_param[i][1],
D = growth_param[i][2], l = len(records[a]),
seq = records[a], cells=growth_param[i][3])
return pop
def ptr(self):
for i,a in enumerate(self.pop.keys()):
print a+": "+str(2**growth_param[i][1])
def Asnok(R,C,D,l):
return R/(Gekv(C,D) * l)
def community_distribution(self):
d=np.array([Gekv(p.B,p.C)*p.l*p.cells for p in self.pop.values()])
return d/d.sum()
#def ab_(self):
# return
def sample(self,nr_samples):
nr_samples=np.array(nr_samples*self.distribution)
nr_samples=nr_samples.astype(np.int)
samp_run=[]
for i,p in enumerate(self.pop.keys()):
samp_run.append(inverse_transform_sampling(self.pop[p].C,nr_samples[i],self.pop[p].l))
self.samples.append(samp_run)
def write_reads(self):
for i,samp in enumerate(self.samples):
write_reads(samp,self,self.conf['data_path'],self.name+str(i))
def compare_fit(self):
if not self.samples:
print "The community is not sampled, please run community.sample(nr_samples)"
return;
err_hfit=[]
err_pfit=[]
res_fit=[]
for i,samp in enumerate(self.samples):
for acc in self.pop.keys():
try:
depth_file=join(self.conf['output_path'],self.name+str(i),'npy',acc+'.depth.npy')
best_file=join(self.conf['output_path'],self.name+str(i),'npy',acc+'.depth.best.npy')
signal=2**(np.load(depth_file))
signal=signal/(signal.sum()/len(signal))#*self.pop[acc].l)
from_ptr=np.load(best_file)
res=fit_signal(signal,self.pop[acc].l)
res_fit.append(res)
err_hfit.append((self.pop[acc].C-res.best_values['C'])/self.pop[acc].C)
err_pfit.append((self.pop[acc].C-from_ptr[2]+from_ptr[3])/self.pop[acc].C)
print "Simulated value: "+str(self.pop[acc].C)
print "Error from this fit: "+str(err_hfit[-1])+ ' value: ' + str(res.best_values['C'])
print "Error from initial PTR fit "+str(err_pfit[-1])+' value: ' + str(from_ptr[2]-from_ptr[3])
except Exception as Ex:
print "Ex"
pass
return [res_fit,err_hfit,err_pfit]
def fetch_ref(self,acc=''):
acc_path = join(self.conf['node_path'],"acc")
f = open(acc_path, "w")
if not acc:
f.write("\n".join(self.pop.keys()))
else:
f.write("\n".join(acc))
f.close()
create_mount_run(self.image,td,self.args0+['fetch-references','-s',join(self.d_conf['node_path'],'acc')],self.env)
def build_index(self):
create_mount_run(self.image,td,self.args0+['build-index'],self.env)
def run_pipeline(self):
create_mount_run(self.image,td," ".join(['/bin/bash -c "']+self.args0+['make']+[';']+self.args0+['run"']),self.env)
def collect(self):
create_mount_run(self.image,td,self.args0+['collect'],self.env)
def create_dirs(self,from_init=False):
if exists(self.conf['node_path']) and from_init:
shutil.rmtree(self.conf['node_path'])
for d in [self.conf['node_path'],self.conf['data_path'],self.conf['output_path'],self.conf['ref_path']]:
if not exists(d):
makedirs(d)
def create_mount_run(cli,image,mount_dir,cmd,envs):
if envs:
container = cli.create_container(
image=image, command=cmd, volumes=['/mnt/vol'],
host_config=cli.create_host_config(binds={
mount_dir: {
'bind': '/mnt/vol',
'mode': 'rw',
}
}),
environment=envs
)
else:
container = cli.create_container(
image=image, command=cmd, volumes=['/mnt/vol'],
host_config=cli.create_host_config(binds={
mount_dir: {
'bind': '/mnt/vol',
'mode': 'rw',
}
})
)
ctr=container.get('Id')
cli.start(ctr)
cli.wait(ctr,60*60*24*10)
return cli.logs(ctr)
def local_conf(td,mapper,email,cores):
return {
'project': 'ptr_simulation',
'cluster': '',
'job_name': 'ptr_simulation',
'job_nodes': '1',
'cpu_cores': cores,
'estimated_time': '',
'node_path': td,
'ref_path': join(td,'References'),
'data_path': join(td,'Data'),
'output_path': join(td,'Out'),
'doric_path': join(td,'DoriC'),
'mapper': mapper,
'ref_name': 'sim',
'nr_samples': '1',
'samples_per_node': '1',
'email': email,
'data_prefix': '',
'start_ind': '1',
'job_range': '1',
'ftp_url': '',
'data_url': ''
}
def save_config(lconf,conf):
Config = configparser.ConfigParser()
Config.optionxform = str
cfgfile = open(join(lconf['node_path'],'project.conf'),'w')
Config.add_section('Project')
Config.set('Project','ProjectID',conf['project'])
Config.set('Project','Cluster',conf['cluster'])
Config.set('Project','JobName',conf['job_name'])
Config.set('Project','JobNodes',conf['job_nodes'])
Config.set('Project','CpuCores',conf['cpu_cores'])
Config.set('Project','EstimatedTime',conf['estimated_time'])
Config.add_section('Directories')
Config.set('Directories','Node',conf['node_path'])
Config.set('Directories','References',conf['ref_path'])
Config.set('Directories','Data',conf['data_path'])
Config.set('Directories','Output',conf['output_path'])
Config.set('Directories','DoriC',conf['doric_path'])
Config.add_section('Other')
Config.set('Other','Mapper',conf['mapper'])
Config.set('Other','RefName',conf['ref_name'])
Config.set('Other','NrSamples',conf['nr_samples'])
Config.set('Other','SamplesPerNode',conf['samples_per_node'])
Config.set('Other','Email',conf['email'])
Config.set('Other','DataPrefix',conf['data_prefix'])
Config.set('Other','StartInd',conf['start_ind'])
Config.set('Other','JobRange',conf['job_range'])
Config.set('Other','FtpURL',conf['ftp_url'])
Config.set('Other','DataURL',conf['data_url'])
Config.write(cfgfile)
cfgfile.close()
def write_reads(samples,comm,directory,name):
f1 = open(join(directory,name+"_1.fastq"), "w")
f2 = open(join(directory,name+"_2.fastq"), "w")
for i,p in enumerate(comm.pop.keys()):
for j,pos in enumerate( samples[i].tolist()):
r1,r2 = read_pair(name+"_"+p,str(j+1),comm.pop[p].seq,pos,comm.pop[p].l)
SeqIO.write(r1, f1, "fastq")
SeqIO.write(r2, f2, "fastq")
f1.close()
f2.close()
def open_records(fasta):
records={};
for f in fasta:
handle = open(f, "rU")
tmp=list(SeqIO.parse(handle, "fasta"))
m=re.match('.*(N[CTZ]_([A-Z]{2})*[0-9]{6}).*',tmp[0].id)
records[m.group(0)]=tmp[0].seq
handle.close()
return records
def read_pair(header,ind,seq,pos,l):
def circular_yield(x,pos,sub,l):
if (pos>l):
return x[pos-l:pos-l+sub]
elif (pos+sub>l):
r=pos+sub-l
x2=x[pos:pos+sub-r]
return x2+x[0:r]
else:
return x[pos:pos+sub]
#base_error_rate = .02
#mutation_rate = .001
#nuc=["A","C","G","T"]
# generate a normally distributued insert size of mean 300 and sd 40
#insert_size = int(norm.rvs(size=1,loc=300,scale=30)[0])
insert_size = int(500)
r1 = circular_yield(seq,pos,100,l)
r2 = circular_yield(seq,pos+insert_size,100,l)
# flip reads according to seqs error and mutation rate
#ra=[]
#for r in [r1,r2]:
# rr=np.random.random(100)
# m_ind=[i for i,j in enumerate(rr) if j < base_error_rate]
# char=np.random.choice(nuc,len(m_ind))
# a=list(r)
# for i,ind in enumerate(m_ind):
# a[ind]=char[i]
# ra.append("".join(a))
#r_tmp=r_tmp[:m_ind[0]]
#for i,index in enumerate(m_ind[1:]):
# r_tmp = r_tmp[:index] + char[i] + r[index + 1:]
#[r1,r2]=ra
#nrs=[str(np.random.randint(low=1000,high=2000)),str(np.random.randint(low=10**4,high=2*10**4)),str(np.random.randint(low=10**5,high=2*10**5))]
rec1=SeqRecord(r1,id=header+"_"+str(pos)+"."+str(ind),description="RANDXXLAB"+str(pos)+"_"+str(pos+insert_size+100)+":0:0"+"/1")
rec2=SeqRecord(r2,id=header+"_"+str(pos)+"."+str(ind),description="RANDXXLAB"+str(pos)+"_"+str(pos+insert_size+100)+":0:0"+"/2")
#rec1=SeqRecord(r1,id=header+"."+str(ind),description="FCB00YLABXX:6:"+nrs[0]+":"+nrs[1]+":"+nrs[2]+"/1")
#rec2=SeqRecord(r2,id=header+"."+str(ind),description="FCB00YLABXX:6:"+nrs[0]+":"+nrs[1]+":"+nrs[2]+"/2")
#rec1=SeqRecord(r1,id=header+"_"+str(pos)+"_"+str(pos+insert_size)+"/1",description="")
#rec2=SeqRecord(r2,id=header+"_"+str(pos)+"_"+str(pos+insert_size)+"/2",description="")
rec2=rec2.reverse_complement(id=header+"_"+str(pos)+"."+str(ind),description="RANDXXLAB"+str(pos)+"_"+str(pos+insert_size+100)+":0:0"+"/2")
#rec2=SeqRecord(r2,id=header+"."+str(ind)+"/2",description="")
rec1.letter_annotations['phred_quality']=[17]*100
rec2.letter_annotations['phred_quality']=[17]*100
#rec1.description=
return(rec1,rec2)
def inverse_transform_sampling(C,n_samples,l=1):
x = np.linspace(0, float(1)/1.7, 100)
y = np.linspace(pxn(.5,C,1),pxn(0,C,1),100)
pdf=interpolate.interp1d(x,pxn(x,C,1))
cdf = [scipy.integrate.quad(lambda x: pdf(x),0,i)[0] for i in np.linspace(0,.5,100)]
inv_cdf = interpolate.interp1d(cdf, np.linspace(0,.5,100))
r = np.random.rand(n_samples)
v=l*np.round(inv_cdf(r[:int(n_samples/2)]),4)
v2=l*(1-np.round(inv_cdf(r[int(n_samples/2):]),4))
v=np.concatenate([v,v2])
return v.astype(np.int)
def fit_signal(signal,l):
x1=np.linspace(0,1,len(signal))
piecewiseModel=Model(piecewise_prob)
piecewiseModel.set_param_hint('l', value=1,vary=False)
piecewiseModel.set_param_hint('C', vary=True, value=.1,min=0,max=1)
piecewiseModel.make_params()
res=piecewiseModel.fit(signal,x=x1,weights=np.sin(1.5*x1)+1.5)
return res
def piecewise_prob(x,C,l):
conds=[(0 < x) & (x <= float(l)/2), x > float(l)/2]
funcs=[lambda x: pxn(x,C,l)/float(2),lambda x: pxn((l-x),C,l)/float(2)]
return np.piecewise(x,conds,funcs)
def Gekv(C,D):
return 2**(C + D)/(C*np.log(2))*(1 - 2**(-C))
def pxn(x, C, l):
return (2**(1 + C - (2*C*x)/float(l))*C*np.log(2))/(float(l)*(-1 + 2**C))
def mv_filt(L,omega):
return (1/float(L))*(1-np.exp(-omega*1j*L))/(1-np.exp(-omega*1j))
def Asnok(R,C,D,l):
return R/(Gekv(C,D) * l) | gpl-2.0 |
ZenDevelopmentSystems/scikit-learn | sklearn/tests/test_random_projection.py | 79 | 14035 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import gaussian_random_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import GaussianRandomProjection
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils import DataDimensionalityWarning
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
0.5 * np.ones((10, 10)))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
def test_basic_property_of_random_matrix():
# Check basic properties of random matrix generation
for random_matrix in all_random_matrix:
yield check_input_size_random_matrix, random_matrix
yield check_size_generated, random_matrix
yield check_zero_mean_and_unit_norm, random_matrix
for random_matrix in all_sparse_random_matrix:
yield check_input_with_sparse_random_matrix, random_matrix
random_matrix_dense = \
lambda n_components, n_features, random_state: random_matrix(
n_components, n_features, random_state=random_state,
density=1.0)
yield check_zero_mean_and_unit_norm, random_matrix_dense
def test_gaussian_random_matrix():
# Check some statical properties of Gaussian random matrix
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
# Check some statical properties of sparse random matrix
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [[0, 1, 2]])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
| bsd-3-clause |
zhonghualiu/FaST-LMM | fastlmm/inference/linear_regression.py | 1 | 7274 | """
Created on 2013-08-02
@author: Christian Widmer <chris@shogun-toolbox.org>
@summary: Module for univariate feature selection in the presence of covariates
Motivated by sklearn's linear regression method for feature
selection, we've come up with an extended version that takes
care of covariates
based on sklearn code (f_regression):
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/univariate_selection.py
"""
import numpy as np
from sklearn.utils import safe_sqr, check_array
from scipy import stats
#def TESTBEFOREUSING_get_example_data():
# """
# load plink files
# """
# import fastlmm.pyplink.plink as plink
# import pysnptools.snpreader.bed as Bed
# import fastlmm.util.util as util
# ipheno = 0
# foldIter = 0
# """
# import dataset
# dat = dataset.importDataset("pheno4")
# fn_bed = dat["bedFile"]
# fn_pheno = dat["phenoFile"]
# """
# fn_bed = "../../featureSelection/examples/toydata"
# fn_pheno = "../../featureSelection/examples/toydata.phe"
# pheno = pstpheno.loadPhen(fn_pheno)
# # load data
# bed = plink.Bed(fn_bed)
# indarr = util.intersect_ids([pheno['iid'],bed.iid])
# pheno['iid'] = pheno['iid'][indarr[:,0]]
# pheno['vals'] = pheno['vals'][indarr[:,0]]
# bed = bed[indarr[:,1],:]
# N = pheno['vals'].shape[0]
# y = pheno['vals'][:,ipheno]
# iid = pheno['iid']
# snps = bed.read().standardize()
# return snps, y
def f_regression_block(fun,X,y,blocksize=None,**args):
"""
runs f_regression for each block seperately (saves memory).
-------------------------
fun : method that returns statistics,pval
X : {array-like, sparse matrix} shape = (n_samples, n_features)
The set of regressors that will tested sequentially.
y : array of shape(n_samples).
The data matrix
blocksize : number of SNPs per block
"""
if blocksize==None:
return fun(X,y,**args)
idx_start = 0
idx_stop = int(blocksize)
pval = np.zeros(X.shape[1])
stats = np.zeros(X.shape[1])
while idx_start<X.shape[1]:
stats[idx_start:idx_stop], pval[idx_start:idx_stop] = fun(X[:,idx_start:idx_stop],y,**args)
idx_start = idx_stop
idx_stop += blocksize
if idx_stop>X.shape[1]:
idx_stop = X.shape[1]
return stats,pval
def f_regression_cov_alt(X, y, C):
"""
Implementation as derived in tex document
See pg 12 of following document for definition of F-statistic
http://www-stat.stanford.edu/~jtaylo/courses/stats191/notes/simple_diagnostics.pdf
Parameters
----------
X : {array-like, sparse matrix} shape = (n_samples, n_features)
The set of regressors that will tested sequentially.
y : array of shape(n_samples).
The data matrix
c : {array-like, sparse matrix} shape = (n_samples, n_covariates)
The set of covariates.
Returns
-------
F : array, shape=(n_features,)
F values of features.
pval : array, shape=(n_features,)
p-values of F-scores.
"""
# make sure we don't overwrite input data
old_flag_X = X.flags.writeable
old_flag_C = C.flags.writeable
old_flag_y = y.flags.writeable
X.flags.writeable = False
C.flags.writeable = False
y.flags.writeable = False
#X, C, y = check_arrays(X, C, y, dtype=np.float)
y = y.ravel()
# make copy of input data
X = X.copy(order="F")
y = y.copy()
assert C.shape[1] < C.shape[0]
cpinv = np.linalg.pinv(C)
X -= np.dot(C,(np.dot(cpinv, X))) #most expensive line (runtime)
y -= np.dot(C,(np.dot(cpinv, y)))
yS = safe_sqr(y.T.dot(X)) # will create a copy
# Note: (X*X).sum(0) = X.T.dot(X).diagonal(), computed efficiently
# see e.g.: http://stackoverflow.com/questions/14758283/is-there-a-numpy-scipy-dot-product-calculating-only-the-diagonal-entries-of-the
# TODO: make this smarter using either stride tricks or cython
X *= X
denom = X.sum(0) * y.T.dot(y) - yS
F = yS / denom
# degrees of freedom
dof = (X.shape[0] - 1 - C.shape[1]) / (1) #(df_fm / (df_rm - df_fm))
F *= dof
# convert to p-values
pv = stats.f.sf(F, 1, dof)
# restore old state
X.flags.writeable = old_flag_X
C.flags.writeable = old_flag_C
y.flags.writeable = old_flag_y
return F, pv
def f_regression_cov(X, y, C):
"""Univariate linear regression tests
Quick linear model for testing the effect of a single regressor,
sequentially for many regressors.
This is done in 3 steps:
1. the regressor of interest and the data are orthogonalized
wrt constant regressors
2. the cross correlation between data and regressors is computed
3. it is converted to an F score then to a p-value
Parameters
----------
X : {array-like, sparse matrix} shape = (n_samples, n_features)
The set of regressors that will tested sequentially.
y : array of shape(n_samples).
The data matrix
c : {array-like, sparse matrix} shape = (n_samples, n_covariates)
The set of covariates.
Returns
-------
F : array, shape=(n_features,)
F values of features.
pval : array, shape=(n_features,)
p-values of F-scores.
"""
X = check_arrays(X, dtype=np.float)
C = check_arrays(C, dtype=np.float)
y = check_arrays(y, dtype=np.float)
y = y.ravel()
assert C.shape[1] < C.shape[0]
cpinv = np.linalg.pinv(C)
X -= np.dot(C,(np.dot(cpinv, X)))
y -= np.dot(C,(np.dot(cpinv, y)))
# compute the correlation
corr = np.dot(y, X)
corr /= np.asarray(np.sqrt(safe_sqr(X).sum(axis=0))).ravel()
corr /= np.asarray(np.sqrt(safe_sqr(y).sum())).ravel()
# convert to p-value
dof = (X.shape[0] - 1 - C.shape[1]) / (1) #(df_fm / (df_rm - df_fm))
F = corr ** 2 / (1 - corr ** 2) * dof
pv = stats.f.sf(F, 1, dof)
return F, pv
def test_bias():
"""
make sure we get the same result for setting C=unitvec
"""
S, y = get_example_data()
C = np.ones((len(y),1))
from sklearn.feature_selection import f_regression
F1, pval1 = f_regression(S, y, center=True)
F2, pval2 = f_regression_cov(S, C, y)
F3, pval3 = f_regression_cov_alt(S, C, y)
# make sure values are the same
np.testing.assert_array_almost_equal(F1, F2)
np.testing.assert_array_almost_equal(F2, F3)
np.testing.assert_array_almost_equal(pval1, pval2)
np.testing.assert_array_almost_equal(pval2, pval3)
def test_cov():
"""
compare different implementations, make sure results are the same
"""
S, y = get_example_data()
C = S[:,0:10]
S = S[:,10:]
F1, pval1 = f_regression_cov(S, C, y)
F2, pval2 = f_regression_cov_alt(S, C, y)
np.testing.assert_array_almost_equal(F1, F2)
np.testing.assert_array_almost_equal(pval1, pval2)
def main():
test_cov()
test_bias()
if __name__ == "__main__":
main()
| apache-2.0 |
walterreade/scikit-learn | examples/neighbors/plot_classification.py | 287 | 1790 | """
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
| bsd-3-clause |
tgquintela/Mscthesis | set_computationparameters.py | 1 | 3475 |
"""
set computation parameters
--------------------------
Computation parameters which depends now in the path of files.
"""
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score
from sklearn.cross_validation import KFold
from pySpatialTools.Retrieve import KRetriever, CircRetriever
from pySpatialTools.utils.perturbations import NonePerturbation,\
PermutationPerturbationGeneration
from pythonUtils.sklearn_tools.cross_validation import KFold_list
from collection_auxiliar_functions import *
from collection_creation_functions import *
## Collection of possible files
_pfeatures_files = ['2016-11-01-raw_finance_-7893940389519280411',
'2016-11-01-raw_type_firms_cnae2_-6727949354442861019',
]
_qvalues_files = ['2016-11-01-tono_mag_diff_-7344402660780134763',
]
## Collection of possible parameters
#models_direct_info =\
# [('rf_reg_nest10', RandomForestRegressor, {"n_estimators": 10}),
# ('rf_reg_nest25', RandomForestRegressor, {"n_estimators": 25}),
## ('rf_reg_nest50', RandomForestRegressor, {"n_estimators": 50}),
## ('rf_reg_nest75', RandomForestRegressor, {"n_estimators": 75})
# ]
#scorer_info = [('r2_score', r2_score, {}, dummy_function_conversion), ]
#perts_info = [('none_perturb1', NonePerturbation, {}, f_pert_null_instantiation),
# ('globalpermut', PermutationPerturbationGeneration,
# {'seed': 0}, f_pert_features_instantiation),
# ('')
# ]
#samplings_info = [('kfold10', KFold, {"n_folds": 10})]
perts_info =\
create_permutation_feature(2, rate_pert=1.0, name=None) +\
create_permutation_feature(2, rate_pert=0.8, name=None)
format_info =\
create_null_format_info()
models_direct_info =\
creation_models(RandomForestRegressor, 'rf_reg',
[("n_estimators", [10], 'nest')])
samplings_info =\
creation_sampling(KFold, 'kfold', [("n_folds", [10], '')], f_stringer=None)
scorer_info =\
creation_scorers(r2_score, 'r2_score', [])
## Collection of possible list of parameters
pars_dir_model0 = (perts_info, format_info, models_direct_info, samplings_info,
scorer_info)
print pars_dir_model0
## Final parameter list collection
#pars_directmodel =\
# [((_pfeatures_files[0], _qvalues_files[0], f_filter_finance),
# pars_dir_model0,
# 'finance-mag-DirectModel-None_perturb-None_filter-rf_reg-Kfold-r2_score'),
# ((_pfeatures_files[0], _qvalues_files[0], f_filter_logfinance),
# pars_dir_model0,
# 'financefilt-mag-DirectModel-None_perturb-None_filter-rf_reg-Kfold-r2_score')
# ]
perts_sptemp_info =\
create_permutation_sptemp(2, rate_pert=1.0, name=None) +\
create_permutation_sptemp(2, rate_pert=0.8, name=None)
format_sptemp_info =\
create_null_sptemp_format_info()
models_sptemp_info = [('None_model')]
samplings_sptemp_info =\
creation_sampling(KFold_list, 'Kfold_sptemp', [("n_folds", [3], '')],
f_stringer=None)
scorer_sptemp_info =\
creation_scorers(r2_score, 'r2_score', [])
pars_loc_model0 = (perts_sptemp_info, format_sptemp_info, models_sptemp_info,
samplings_sptemp_info, scorer_sptemp_info)
pars_loconly_model =\
[((_pfeatures_files[0], _qvalues_files[0], f_filter_finance),
pars_loc_model0,
'finance-mag-LocOnlyModel-None_perturb-None_filter-rf_reg-Kfold-r2_score'),
]
| mit |
rs2/pandas | pandas/io/json/_normalize.py | 1 | 12555 | # ---------------------------------------------------------------------
# JSON normalization routines
from collections import defaultdict
import copy
from typing import Any, DefaultDict, Dict, Iterable, List, Optional, Union
import numpy as np
from pandas._libs.writers import convert_json_to_lines
from pandas._typing import Scalar
from pandas.util._decorators import deprecate
import pandas as pd
from pandas import DataFrame
def convert_to_line_delimits(s):
"""
Helper function that converts JSON lists to line delimited JSON.
"""
# Determine we have a JSON list to turn to lines otherwise just return the
# json object, only lists can
if not s[0] == "[" and s[-1] == "]":
return s
s = s[1:-1]
return convert_json_to_lines(s)
def nested_to_record(
ds,
prefix: str = "",
sep: str = ".",
level: int = 0,
max_level: Optional[int] = None,
):
"""
A simplified json_normalize
Converts a nested dict into a flat dict ("record"), unlike json_normalize,
it does not attempt to extract a subset of the data.
Parameters
----------
ds : dict or list of dicts
prefix: the prefix, optional, default: ""
sep : str, default '.'
Nested records will generate names separated by sep,
e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar
level: int, optional, default: 0
The number of levels in the json string.
max_level: int, optional, default: None
The max depth to normalize.
.. versionadded:: 0.25.0
Returns
-------
d - dict or list of dicts, matching `ds`
Examples
--------
IN[52]: nested_to_record(dict(flat1=1,dict1=dict(c=1,d=2),
nested=dict(e=dict(c=1,d=2),d=2)))
Out[52]:
{'dict1.c': 1,
'dict1.d': 2,
'flat1': 1,
'nested.d': 2,
'nested.e.c': 1,
'nested.e.d': 2}
"""
singleton = False
if isinstance(ds, dict):
ds = [ds]
singleton = True
new_ds = []
for d in ds:
new_d = copy.deepcopy(d)
for k, v in d.items():
# each key gets renamed with prefix
if not isinstance(k, str):
k = str(k)
if level == 0:
newkey = k
else:
newkey = prefix + sep + k
# flatten if type is dict and
# current dict level < maximum level provided and
# only dicts gets recurse-flattened
# only at level>1 do we rename the rest of the keys
if not isinstance(v, dict) or (
max_level is not None and level >= max_level
):
if level != 0: # so we skip copying for top level, common case
v = new_d.pop(k)
new_d[newkey] = v
continue
else:
v = new_d.pop(k)
new_d.update(nested_to_record(v, newkey, sep, level + 1, max_level))
new_ds.append(new_d)
if singleton:
return new_ds[0]
return new_ds
def _json_normalize(
data: Union[Dict, List[Dict]],
record_path: Optional[Union[str, List]] = None,
meta: Optional[Union[str, List[Union[str, List[str]]]]] = None,
meta_prefix: Optional[str] = None,
record_prefix: Optional[str] = None,
errors: str = "raise",
sep: str = ".",
max_level: Optional[int] = None,
) -> "DataFrame":
"""
Normalize semi-structured JSON data into a flat table.
Parameters
----------
data : dict or list of dicts
Unserialized JSON objects.
record_path : str or list of str, default None
Path in each object to list of records. If not passed, data will be
assumed to be an array of records.
meta : list of paths (str or list of str), default None
Fields to use as metadata for each record in resulting table.
meta_prefix : str, default None
If True, prefix records with dotted (?) path, e.g. foo.bar.field if
meta is ['foo', 'bar'].
record_prefix : str, default None
If True, prefix records with dotted (?) path, e.g. foo.bar.field if
path to records is ['foo', 'bar'].
errors : {'raise', 'ignore'}, default 'raise'
Configures error handling.
* 'ignore' : will ignore KeyError if keys listed in meta are not
always present.
* 'raise' : will raise KeyError if keys listed in meta are not
always present.
sep : str, default '.'
Nested records will generate names separated by sep.
e.g., for sep='.', {'foo': {'bar': 0}} -> foo.bar.
max_level : int, default None
Max number of levels(depth of dict) to normalize.
if None, normalizes all levels.
.. versionadded:: 0.25.0
Returns
-------
frame : DataFrame
Normalize semi-structured JSON data into a flat table.
Examples
--------
>>> data = [{'id': 1, 'name': {'first': 'Coleen', 'last': 'Volk'}},
... {'name': {'given': 'Mose', 'family': 'Regner'}},
... {'id': 2, 'name': 'Faye Raker'}]
>>> pd.json_normalize(data)
id name.first name.last name.given name.family name
0 1.0 Coleen Volk NaN NaN NaN
1 NaN NaN NaN Mose Regner NaN
2 2.0 NaN NaN NaN NaN Faye Raker
>>> data = [{'id': 1,
... 'name': "Cole Volk",
... 'fitness': {'height': 130, 'weight': 60}},
... {'name': "Mose Reg",
... 'fitness': {'height': 130, 'weight': 60}},
... {'id': 2, 'name': 'Faye Raker',
... 'fitness': {'height': 130, 'weight': 60}}]
>>> pd.json_normalize(data, max_level=0)
id name fitness
0 1.0 Cole Volk {'height': 130, 'weight': 60}
1 NaN Mose Reg {'height': 130, 'weight': 60}
2 2.0 Faye Raker {'height': 130, 'weight': 60}
Normalizes nested data up to level 1.
>>> data = [{'id': 1,
... 'name': "Cole Volk",
... 'fitness': {'height': 130, 'weight': 60}},
... {'name': "Mose Reg",
... 'fitness': {'height': 130, 'weight': 60}},
... {'id': 2, 'name': 'Faye Raker',
... 'fitness': {'height': 130, 'weight': 60}}]
>>> pd.json_normalize(data, max_level=1)
id name fitness.height fitness.weight
0 1.0 Cole Volk 130 60
1 NaN Mose Reg 130 60
2 2.0 Faye Raker 130 60
>>> data = [{'state': 'Florida',
... 'shortname': 'FL',
... 'info': {'governor': 'Rick Scott'},
... 'counties': [{'name': 'Dade', 'population': 12345},
... {'name': 'Broward', 'population': 40000},
... {'name': 'Palm Beach', 'population': 60000}]},
... {'state': 'Ohio',
... 'shortname': 'OH',
... 'info': {'governor': 'John Kasich'},
... 'counties': [{'name': 'Summit', 'population': 1234},
... {'name': 'Cuyahoga', 'population': 1337}]}]
>>> result = pd.json_normalize(data, 'counties', ['state', 'shortname',
... ['info', 'governor']])
>>> result
name population state shortname info.governor
0 Dade 12345 Florida FL Rick Scott
1 Broward 40000 Florida FL Rick Scott
2 Palm Beach 60000 Florida FL Rick Scott
3 Summit 1234 Ohio OH John Kasich
4 Cuyahoga 1337 Ohio OH John Kasich
>>> data = {'A': [1, 2]}
>>> pd.json_normalize(data, 'A', record_prefix='Prefix.')
Prefix.0
0 1
1 2
Returns normalized data with columns prefixed with the given string.
"""
def _pull_field(
js: Dict[str, Any], spec: Union[List, str]
) -> Union[Scalar, Iterable]:
"""Internal function to pull field"""
result = js
if isinstance(spec, list):
for field in spec:
result = result[field]
else:
result = result[spec]
return result
def _pull_records(js: Dict[str, Any], spec: Union[List, str]) -> List:
"""
Internal function to pull field for records, and similar to
_pull_field, but require to return list. And will raise error
if has non iterable value.
"""
result = _pull_field(js, spec)
# GH 31507 GH 30145, GH 26284 if result is not list, raise TypeError if not
# null, otherwise return an empty list
if not isinstance(result, list):
if pd.isnull(result):
result = []
else:
raise TypeError(
f"{js} has non list value {result} for path {spec}. "
"Must be list or null."
)
return result
if isinstance(data, list) and not data:
return DataFrame()
# A bit of a hackjob
if isinstance(data, dict):
data = [data]
if record_path is None:
if any([isinstance(x, dict) for x in y.values()] for y in data):
# naive normalization, this is idempotent for flat records
# and potentially will inflate the data considerably for
# deeply nested structures:
# {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@}
#
# TODO: handle record value which are lists, at least error
# reasonably
data = nested_to_record(data, sep=sep, max_level=max_level)
return DataFrame(data)
elif not isinstance(record_path, list):
record_path = [record_path]
if meta is None:
meta = []
elif not isinstance(meta, list):
meta = [meta]
_meta = [m if isinstance(m, list) else [m] for m in meta]
# Disastrously inefficient for now
records: List = []
lengths = []
meta_vals: DefaultDict = defaultdict(list)
meta_keys = [sep.join(val) for val in _meta]
def _recursive_extract(data, path, seen_meta, level=0):
if isinstance(data, dict):
data = [data]
if len(path) > 1:
for obj in data:
for val, key in zip(_meta, meta_keys):
if level + 1 == len(val):
seen_meta[key] = _pull_field(obj, val[-1])
_recursive_extract(obj[path[0]], path[1:], seen_meta, level=level + 1)
else:
for obj in data:
recs = _pull_records(obj, path[0])
recs = [
nested_to_record(r, sep=sep, max_level=max_level)
if isinstance(r, dict)
else r
for r in recs
]
# For repeating the metadata later
lengths.append(len(recs))
for val, key in zip(_meta, meta_keys):
if level + 1 > len(val):
meta_val = seen_meta[key]
else:
try:
meta_val = _pull_field(obj, val[level:])
except KeyError as e:
if errors == "ignore":
meta_val = np.nan
else:
raise KeyError(
"Try running with errors='ignore' as key "
f"{e} is not always present"
) from e
meta_vals[key].append(meta_val)
records.extend(recs)
_recursive_extract(data, record_path, {}, level=0)
result = DataFrame(records)
if record_prefix is not None:
result = result.rename(columns=lambda x: f"{record_prefix}{x}")
# Data types, a problem
for k, v in meta_vals.items():
if meta_prefix is not None:
k = meta_prefix + k
if k in result:
raise ValueError(
f"Conflicting metadata name {k}, need distinguishing prefix "
)
result[k] = np.array(v, dtype=object).repeat(lengths)
return result
json_normalize = deprecate(
"pandas.io.json.json_normalize", _json_normalize, "1.0.0", "pandas.json_normalize"
)
| bsd-3-clause |
xavierwu/scikit-learn | sklearn/linear_model/ridge.py | 60 | 44642 | """
Ridge regression
"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Reuben Fletcher-Costin <reuben.fletchercostin@gmail.com>
# Fabian Pedregosa <fabian@fseoane.net>
# Michael Eickenberg <michael.eickenberg@nsup.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel, _rescale_data
from .sag import sag_solver
from .sag_fast import get_max_squared_sum
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import check_X_y
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
The l_2 penalty to be used. If an array is passed, penalties are
assumed to be specific to targets
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
return_n_iter : boolean, default False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
Notes
-----
This function won't compute the intercept.
"""
# SAG needs X and y columns to be C-contiguous and np.float64
if solver == 'sag':
X = check_array(X, accept_sparse=['csr'],
dtype=np.float64, order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='F')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
y = check_array(y, dtype='numeric', ensure_2d=False)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
if solver != 'sag':
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag'):
raise ValueError('Solver %s not understood' % solver)
n_iter = None
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == 'lsqr':
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
elif solver == 'sag':
# precompute max_squared_sum for all targets
max_squared_sum = get_max_squared_sum(X)
coef = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
coef_, n_iter_, _ = sag_solver(
X, target.ravel(), sample_weight, 'squared', alpha_i,
max_iter, tol, verbose, random_state, False, max_squared_sum,
dict())
coef[i] = coef_
n_iter[i] = n_iter_
coef = np.asarray(coef)
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
self.coef_, self.n_iter_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}, shape (n_targets)
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``C^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
RidgeClassifier, RidgeCV, KernelRidge
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``C^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is faster than other solvers when both
n_samples and n_features are large.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto", random_state=None):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
weighted_alpha = (sample_weight * alpha
if sample_weight is not None
else alpha)
if error:
out, c = _errors(weighted_alpha, y, v, Q, QT_y)
else:
out, c = _values(weighted_alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used, else, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| bsd-3-clause |
Aasmi/scikit-learn | sklearn/decomposition/nmf.py | 15 | 19103 | """ Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (original Python and NumPy port)
# License: BSD 3 clause
from __future__ import division
from math import sqrt
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.optimize import nnls
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.validation import check_is_fitted
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def check_non_negative(X, whom):
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
def _initialize_nmf(X, n_components, variant=None, eps=1e-6,
random_state=None):
"""NNDSVD algorithm for NMF initialization.
Computes a good initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array, [n_samples, n_features]
The data matrix to be decomposed.
n_components : array, [n_components, n_features]
The number of components desired in the approximation.
variant : None | 'a' | 'ar'
The variant of the NNDSVD algorithm.
Accepts None, 'a', 'ar'
None: leaves the zero entries as zero
'a': Fills the zero entries with the average of X
'ar': Fills the zero entries with standard normal random variates.
Default: None
eps: float
Truncate all values less then this in output to zero.
random_state : numpy.RandomState | int, optional
The generator used to fill in the zeros, when using variant='ar'
Default: numpy.random
Returns
-------
(W, H) :
Initial guesses for solving X ~= WH such that
the number of columns in W is n_components.
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
if variant not in (None, 'a', 'ar'):
raise ValueError("Invalid variant name")
U, S, V = randomized_svd(X, n_components)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if variant == "a":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif variant == "ar":
random_state = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * random_state.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * random_state.randn(len(H[H == 0])) / 100)
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the
projected gradient descent algorithm.
min || WH - V ||_2
Parameters
----------
V, W : array-like
Constant matrices.
H : array-like
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like
Solution to the non-negative least squares problem.
grad : array-like
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix factorization.
Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = np.dot(W.T, W)
# values justified in the paper
alpha = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(19):
# Gradient step.
Hn = H - alpha * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_alpha = not suff_decr
if decr_alpha:
if suff_decr:
H = Hn
break
else:
alpha *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
alpha /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
class ProjectedGradientNMF(BaseEstimator, TransformerMixin):
"""Non-Negative matrix factorization by Projected Gradient (NMF)
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all components
are kept
init : 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'random'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise random.
Valid options::
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'random': non-negative random matrices
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : int, default: 200
Number of iterations to compute.
nls_max_iter : int, default: 2000
Number of iterations in NLS subproblem.
random_state : int or RandomState
Random number generator seed control.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import ProjectedGradientNMF
>>> model = ProjectedGradientNMF(n_components=2, init='random',
... random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, sparseness=None,
tol=0.0001)
>>> model.components_
array([[ 0.77032744, 0.11118662],
[ 0.38526873, 0.38228063]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00746...
>>> model = ProjectedGradientNMF(n_components=2,
... sparseness='components', init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0,
sparseness='components', tol=0.0001)
>>> model.components_
array([[ 1.67481991, 0.29614922],
[ 0. , 0.4681982 ]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.513...
References
----------
This implements
C.-J. Lin. Projected gradient methods
for non-negative matrix factorization. Neural
Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with
Sparseness Constraints. Journal of Machine Learning
Research 2004.
NNDSVD is introduced in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
def __init__(self, n_components=None, init=None, sparseness=None, beta=1,
eta=0.1, tol=1e-4, max_iter=200, nls_max_iter=2000,
random_state=None):
self.n_components = n_components
self.init = init
self.tol = tol
if sparseness not in (None, 'data', 'components'):
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, (None, 'data', 'components')))
self.sparseness = sparseness
self.beta = beta
self.eta = eta
self.max_iter = max_iter
self.nls_max_iter = nls_max_iter
self.random_state = random_state
def _init(self, X):
n_samples, n_features = X.shape
init = self.init
if init is None:
if self.n_components_ < n_features:
init = 'nndsvd'
else:
init = 'random'
random_state = self.random_state
if init == 'nndsvd':
W, H = _initialize_nmf(X, self.n_components_)
elif init == 'nndsvda':
W, H = _initialize_nmf(X, self.n_components_, variant='a')
elif init == 'nndsvdar':
W, H = _initialize_nmf(X, self.n_components_, variant='ar')
elif init == "random":
rng = check_random_state(random_state)
W = rng.randn(n_samples, self.n_components_)
# we do not write np.abs(W, out=W) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(W, W)
H = rng.randn(self.n_components_, n_features)
np.abs(H, H)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random')))
return W, H
def _update_W(self, X, H, W, tolW):
n_samples, n_features = X.shape
if self.sparseness is None:
W, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW,
self.nls_max_iter)
elif self.sparseness == 'data':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(self.beta) * np.ones((1,
self.n_components_))]),
W.T, tolW, self.nls_max_iter)
elif self.sparseness == 'components':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((self.n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
W.T, tolW, self.nls_max_iter)
return W.T, gradW.T, iterW
def _update_H(self, X, H, W, tolH):
n_samples, n_features = X.shape
if self.sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH,
self.nls_max_iter)
elif self.sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((self.n_components_, n_features))]),
safe_vstack([W,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
H, tolH, self.nls_max_iter)
elif self.sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(self.beta)
* np.ones((1, self.n_components_))]),
H, tolH, self.nls_max_iter)
return H, gradH, iterH
def fit_transform(self, X, y=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, "NMF.fit")
n_samples, n_features = X.shape
if not self.n_components:
self.n_components_ = n_features
else:
self.n_components_ = self.n_components
W, H = self._init(X)
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = norm(np.r_[gradW, gradH.T])
tolW = max(0.001, self.tol) * init_grad # why max?
tolH = tolW
tol = self.tol * init_grad
for n_iter in range(1, self.max_iter + 1):
# stopping condition
# as discussed in paper
proj_norm = norm(np.r_[gradW[np.logical_or(gradW < 0, W > 0)],
gradH[np.logical_or(gradH < 0, H > 0)]])
if proj_norm < tol:
break
# update W
W, gradW, iterW = self._update_W(X, H, W, tolW)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = self._update_H(X, H, W, tolH)
if iterH == 1:
tolH = 0.1 * tolH
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
sqnorm_X = np.dot(X.data, X.data)
norm_WHT = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(sqnorm_X + norm_WHT - 2. * cross_prod)
self.reconstruction_err_ = error
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
H[H == 0] = 0 # fix up negative zeros
self.components_ = H
if n_iter == self.max_iter:
warnings.warn("Iteration limit reached during fit. Solving for W exactly.")
return self.transform(X)
self.n_iter_ = n_iter
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be transformed by the model
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
check_is_fitted(self, 'n_components_')
X = check_array(X, accept_sparse='csc')
Wt = np.zeros((self.n_components_, X.shape[0]))
check_non_negative(X, "ProjectedGradientNMF.transform")
if sp.issparse(X):
Wt, _, _ = _nls_subproblem(X.T, self.components_.T, Wt,
tol=self.tol,
max_iter=self.nls_max_iter)
else:
for j in range(0, X.shape[0]):
Wt[:, j], _ = nnls(self.components_.T, X[j, :])
return Wt.T
class NMF(ProjectedGradientNMF):
__doc__ = ProjectedGradientNMF.__doc__
pass
| bsd-3-clause |
jrcohen02/brainx_archive2 | brainx/recarrutil.py | 4 | 6751 | """Some utilities for manipulating recarrays.
Warning
-------
This module should *never* be imported as 'import *'
"""
import numpy as np
import numpy.testing as nt
import sys
# The functionality in this module is now better provided by
# Pandas' DataFrame -- http://pandas.pydata.org/
sys.stderr.write('brainx.recarrutil will be removed,'
' install pandas instead\n')
# XXX - It's probably OK to import something, but for now let's ban * imports
# altogether .
__all__ = []
#-----------------------------------------------------------------------------
# Functions and public utilities
#-----------------------------------------------------------------------------
def extrude(arr,flatten=False):
"""Create a view of a recarray with one extra 'extruded' dimension.
XXX - document more...
"""
dt = arr.dtype
fieldtypes = [ v[0] for v in dt.fields.values() ]
if len(set(fieldtypes)) > 1:
raise ValueError("dtype of recarray must be uniform")
newdtype = fieldtypes[0]
nfields = len(dt.fields)
# If axis is None, for a normal array this means flatten everything and
# return a single number. In our case, we actually want to keep the last
# dimension (the "extruded" one) alive so that we can reconstruct the
# recarray in the end.
if flatten:
newshape = (arr.size,nfields)
else:
newshape = arr.shape + (nfields,)
# Make the new temp array we'll work with
return np.reshape(arr.view(newdtype),newshape)
def intrude(arr,dtype):
"""Intrude a recarray by 'flattening' its last dimension into a composite
dtype.
XXX - finish doc
"""
outshape = arr.shape[:-1]
return (np.reshape(arr.view(dtype),outshape)).view(np.recarray)
def offset_axis(axis):
"""Axis handling logic that is generic to all reductions."""
flatten = axis is None
if flatten:
axis = 0
else:
if axis < 0:
# The case of a negative input axis needs compensation, because we
# are adding a dimension by ourselves
axis -= 1
return flatten, axis
def reduction_factory(name):
"""Create a reduction operation for a given method name.
"""
def op(arr, axis=None):
# XXX what the hell is this logic?
flatten, axis = offset_axis(axis)
newarr = extrude(arr,flatten)
# Do the operation on the new array
method = getattr(newarr,name)
result = method(axis)
# Make the output back into a recarray of the original dtype
return intrude(result, arr.dtype)
doc = "%s of a recarray, preserving its structure." % name
op.__doc__ = doc
op.func_name = name
return op
# For methods in the array interface that take an axis argument, the pattern is
# always the same: extrude, operate, intrude. So we just auto-generate these
# functions here.
reduction_names = ['mean', 'std', 'var', 'min', 'max',
'sum', 'cumsum', 'prod', 'cumprod' ]
for fname in reduction_names:
exec "%s = reduction_factory('%s')" % (fname, fname)
def binop_factory(func):
"""Create a binary operation for a given name.
"""
def op(a1, a2, out=None):
new_a1 = extrude(a1)
new_a2 = extrude(a2)
if out is not None:
out = extrude(out)
# Do the operation on the new array
if out is None:
result = func(new_a1, new_a2)
else:
result = func(new_a1, new_a2, out)
# Make the output back into a recarray of the original dtype
return intrude(result, a1.dtype)
doc = "Binary %s of two recarrays, preserving their structure." % name
op.__doc__ = doc
op.func_name = name
return op
# For methods in the array interface that take an axis argument, the pattern is
# always the same: extrude, operate, intrude. So we just auto-generate these
# functions here.
binops = [('add', np.add), ('subtract', np.subtract),
('multiply', np.multiply), ('divide', np.divide) ]
#binops = [('add',np.add), np.subtract, np.multiply, np.divide ]
for name, func in binops:
exec "%s = binop_factory(func)" % name
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
def test_mean_zero():
dt = np.dtype(dict(names=['x','y'], formats=[float,float]))
z = np.zeros((2,3), dt)
nt.assert_equal(mean(z),z)
return 1
def mk_xyz():
"""Test utility, make x, y, z arrays."""
dt = np.dtype(dict(names=['x','y'],formats=[float,float]))
x = np.arange(6,dtype=float).reshape(2,3)
y = np.arange(10,16,dtype=float).reshape(2,3)
z = np.empty( (2,3), dt).view(np.recarray)
z.x = x
z.y = y
return x, y, z
def mk_xyzw():
"""Test utility, make x, y, z, w arrays."""
x, y, z = mk_xyz()
w = z.copy()
w.x *= 2
w.y *= 2
return x, y, z, w
def test_reductions():
x, y, z = mk_xyz()
for fname in reduction_names:
reduction = eval(fname)
xmeth = getattr(x, fname)
ymeth = getattr(y, fname)
for axis in [None,0,1,-1,-2]:
zred = reduction(z,axis)
nt.assert_equal(zred.x, xmeth(axis))
nt.assert_equal(zred.y, ymeth(axis))
def test_binops():
x, y, z, w = mk_xyzw()
binop_names = [n for (n, op) in binops]
for fname in binop_names:
op = eval(fname)
npop = getattr(np, fname)
opres = op(z,w)
nt.assert_equal(opres.x, npop(z.x, w.x))
nt.assert_equal(opres.y, npop(z.y, w.y))
# Test support utilities
def eval_tests(testgen):
"""Little utility to consume a nose-compliant test generator.
Returns
-------
The number of executed tests. An exception is raised if any fails."""
return len([ t[0](*t[1:]) for t in testgen() ])
# Mark it as not being a test itself, so nose doesn't try to run it
eval_tests.__test__ = False
def run_test_suite():
"""Call all our tests in sequence.
This lets us run the script as a test suite without needing nose or any
other test runner for simple cases"""
from time import clock
# Initialize counters
ntests = 0
start = clock()
# Call the tests and count them
ntests += test_mean_zero()
ntests += eval_tests(test_reductions)
ntests += eval_tests(test_binops)
# Stop clock and summarize
end = clock()
print '-'*70
print "Ran %s tests in %.3f" % (ntests, end-start)
print '\nOK'
run_test_suite.__test__ = False
# If run as a script, just run all the tests and print summary if successful
if __name__ == '__main__':
run_test_suite()
| bsd-3-clause |
dtaht/ns-3-codel-dev | src/core/examples/sample-rng-plot.py | 188 | 1246 | # -*- Mode:Python; -*-
# /*
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# */
# Demonstrate use of ns-3 as a random number generator integrated with
# plotting tools; adapted from Gustavo Carneiro's ns-3 tutorial
import numpy as np
import matplotlib.pyplot as plt
import ns.core
# mu, var = 100, 225
rng = ns.core.NormalVariable(100.0, 225.0)
x = [rng.GetValue() for t in range(10000)]
# the histogram of the data
n, bins, patches = plt.hist(x, 50, normed=1, facecolor='g', alpha=0.75)
plt.title('ns-3 histogram')
plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
plt.axis([40, 160, 0, 0.03])
plt.grid(True)
plt.show()
| gpl-2.0 |
pslacerda/GromacsWrapper | gromacs/utilities.py | 1 | 21856 | # GromacsWrapper: utilities.py
# Copyright (c) 2009 Oliver Beckstein <orbeckst@gmail.com>
# Released under the GNU Public License 3 (or higher, your choice)
# See the file COPYING for details.
"""
:mod:`gromacs.utilities` -- Helper functions and classes
========================================================
The module defines some convenience functions and classes that are
used in other modules; they do *not* make use of :mod:`gromacs.tools`
or :mod:`gromacs.cbook` and can be safely imported at any time.
Classes
-------
:class:`FileUtils` provides functions related to filename handling. It
can be used as a base or mixin class. The :class:`gromacs.analysis.Simulation`
class is derived from it.
.. autoclass:: FileUtils
:members:
.. autoclass:: AttributeDict
.. autoclass:: Timedelta
Functions
---------
Some additional convenience functions that deal with files and
directories:
.. function:: openany(directory[,mode='r'])
Context manager to open a compressed (bzip2, gzip) or plain file
(uses :func:`anyopen`).
.. autofunction:: anyopen
.. autofunction:: realpath
.. function:: in_dir(directory[,create=True])
Context manager to execute a code block in a directory.
* The *directory* is created if it does not exist (unless
*create* = ``False`` is set)
* At the end or after an exception code always returns to
the directory that was the current directory before entering
the block.
.. autofunction:: find_first
.. autofunction:: withextsep
Functions that improve list processing and which do *not* treat
strings as lists:
.. autofunction:: iterable
.. autofunction:: asiterable
.. autofunction:: firstof
Functions that help handling Gromacs files:
.. autofunction:: unlink_f
.. autofunction:: unlink_gmx
.. autofunction:: unlink_gmx_backups
.. autofunction:: number_pdbs
Functions that make working with matplotlib_ easier:
.. _matplotlib: http://matplotlib.sourceforge.net/
.. autofunction:: activate_subplot
.. autofunction:: remove_legend
Miscellaneous functions:
.. autofunction:: convert_aa_code
.. autofunction:: autoconvert
Data
----
.. autodata:: amino_acid_codes
"""
from __future__ import absolute_import, with_statement
__docformat__ = "restructuredtext en"
import os
import glob
import fnmatch
import re
import warnings
import errno
import subprocess
from contextlib import contextmanager
import bz2, gzip
import datetime
import logging
logger = logging.getLogger('gromacs.utilities')
from .exceptions import AutoCorrectionWarning
def Property(func):
"""Simple decorator wrapper to make full fledged properties.
See eg http://adam.gomaa.us/blog/2008/aug/11/the-python-property-builtin/
"""
return property(**func())
class AttributeDict(dict):
"""A dictionary with pythonic access to keys as attributes --- useful for interactive work."""
def __getattribute__(self,x):
try:
return super(AttributeDict,self).__getattribute__(x)
except AttributeError:
return self[x]
def __setattr__(self,name,value):
try:
super(AttributeDict,self).__setitem__(name, value)
except KeyError:
super(AttributeDict,self).__setattr__(name, value)
def __getstate__(self):
return self
def __setstate__(self, state):
self.update(state)
def autoconvert(s):
"""Convert input to a numerical type if possible.
1. A non-string object is returned as it is
2. Try conversion to int, float, str.
"""
if not type(s) is str:
return s
for converter in int, float, str: # try them in increasing order of lenience
try:
return converter(s)
except ValueError:
pass
raise ValueError("Failed to autoconvert %r" % s)
@contextmanager
def openany(datasource, mode='r', **kwargs):
"""Open the datasource and close it when the context exits.
:Arguments:
*datasource*
a stream or a filename
*mode*
``'r'`` opens for reading, ``'w'`` for writing ['r']
*kwargs*
additional keyword arguments that are passed through to the
actual handler; if these are not appropriate then an
exception will be raised by the handler
"""
stream, filename = anyopen(datasource, mode=mode, **kwargs)
try:
yield stream
finally:
stream.close()
def anyopen(datasource, mode='r', **kwargs):
"""Open datasource (gzipped, bzipped, uncompressed) and return a stream.
:Arguments:
*datasource*
a stream or a filename
*mode*
``'r'`` opens for reading, ``'w'`` for writing ['r']
*kwargs*
additional keyword arguments that are passed through to the
actual handler; if these are not appropriate then an
exception will be raised by the handler
"""
handlers = {'bz2': bz2.BZ2File, 'gz': gzip.open, '': file}
if mode.startswith('r'):
if hasattr(datasource,'next') or hasattr(datasource,'readline'):
stream = datasource
filename = '(%s)' % stream.name # maybe that does not always work?
else:
stream = None
filename = datasource
for ext in ('bz2', 'gz', ''): # file == '' should be last
openfunc = handlers[ext]
stream = _get_stream(datasource, openfunc, mode=mode, **kwargs)
if not stream is None:
break
if stream is None:
raise IOError("Cannot open %(filename)r in mode=%(mode)r." % vars())
elif mode.startswith('w'):
if hasattr(datasource, 'write'):
stream = datasource
filename = '(%s)' % stream.name # maybe that does not always work?
else:
stream = None
filename = datasource
name, ext = os.path.splitext(filename)
if ext.startswith(os.path.extsep):
ext = ext[1:]
if not ext in ('bz2', 'gz'):
ext = '' # anything else but bz2 or gz is just a normal file
openfunc = handlers[ext]
stream = openfunc(datasource, mode=mode, **kwargs)
if stream is None:
raise IOError("Cannot open %(filename)r in mode=%(mode)r with type %(ext)r." % vars())
else:
raise NotImplementedError("Sorry, mode=%(mode)r is not implemented for %(datasource)r" % vars())
return stream, filename
def _get_stream(filename, openfunction=file, mode='r'):
try:
stream = openfunction(filename, mode=mode)
except IOError:
return None
try:
stream.readline()
stream.close()
stream = openfunction(filename,'r')
except IOError:
stream.close()
stream = None
return stream
# TODO: make it work for non-default charge state amino acids.
#: translation table for 1-letter codes --> 3-letter codes
#: .. Note: This does not work for HISB and non-default charge state aa!
amino_acid_codes = {'A':'ALA', 'C':'CYS', 'D':'ASP', 'E':'GLU',
'F':'PHE', 'G':'GLY', 'H':'HIS', 'I':'ILE',
'K':'LYS', 'L':'LEU', 'M':'MET', 'N':'ASN',
'P':'PRO', 'Q':'GLN', 'R':'ARG', 'S':'SER',
'T':'THR', 'V':'VAL', 'W':'TRP', 'Y':'TYR'}
inverse_aa_codes = {three: one for one,three in amino_acid_codes.items()}
def convert_aa_code(x):
"""Converts between 3-letter and 1-letter amino acid codes."""
if len(x) == 1:
return amino_acid_codes[x.upper()]
elif len(x) == 3:
return inverse_aa_codes[x.upper()]
else:
raise ValueError("Can only convert 1-letter or 3-letter amino acid codes, "
"not %r" % x)
@contextmanager
def in_dir(directory, create=True):
"""Context manager to execute a code block in a directory.
* The directory is created if it does not exist (unless
create=False is set)
* At the end or after an exception code always returns to
the directory that was the current directory before entering
the block.
"""
startdir = os.getcwd()
try:
try:
os.chdir(directory)
logger.debug("Working in %(directory)r..." % vars())
except OSError, err:
if create and err.errno == errno.ENOENT:
os.makedirs(directory)
os.chdir(directory)
logger.info("Working in %(directory)r (newly created)..." % vars())
else:
logger.exception("Failed to start working in %(directory)r." % vars())
raise
yield os.getcwd()
finally:
os.chdir(startdir)
def realpath(*args):
"""Join all args and return the real path, rooted at /.
Expands ``~`` and environment variables such as :envvar:`$HOME`.
Returns ``None`` if any of the args is none.
"""
if None in args:
return None
return os.path.realpath(
os.path.expandvars(os.path.expanduser(os.path.join(*args))))
def find_first(filename, suffices=None):
"""Find first *filename* with a suffix from *suffices*.
:Arguments:
*filename*
base filename; this file name is checked first
*suffices*
list of suffices that are tried in turn on the root of *filename*; can contain the
ext separator (:data:`os.path.extsep`) or not
:Returns: The first match or ``None``.
"""
# struct is not reliable as it depends on qscript so now we just try everything...
root,extension = os.path.splitext(filename)
if suffices is None:
suffices = []
else:
suffices = withextsep(suffices)
extensions = [extension] + suffices # native name is first
for ext in extensions:
fn = root + ext
if os.path.exists(fn):
return fn
return None
def withextsep(extensions):
"""Return list in which each element is guaranteed to start with :data:`os.path.extsep`."""
def dottify(x):
if x.startswith(os.path.extsep):
return x
return os.path.extsep + x
return [dottify(x) for x in asiterable(extensions)]
def find_files(directory, pattern):
"""Find files recursively under *directory*, matching *pattern* (generator).
*pattern* is a UNIX-style glob pattern as used ny :func:`fnmatch.fnmatch`.
Recipe by Bruno Oliveira from
http://stackoverflow.com/questions/2186525/use-a-glob-to-find-files-recursively-in-python
"""
for root, dirs, files in os.walk(directory):
for basename in files:
if fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
yield filename
class FileUtils(object):
"""Mixin class to provide additional file-related capabilities."""
#: Default extension for files read/written by this class.
default_extension = None
def _init_filename(self, filename=None, ext=None):
"""Initialize the current filename :attr:`FileUtils.real_filename` of the object.
Bit of a hack.
- The first invocation must have ``filename != None``; this will set a
default filename with suffix :attr:`FileUtils.default_extension`
unless another one was supplied.
- Subsequent invocations either change the filename accordingly or
ensure that the default filename is set with the proper suffix.
"""
extension = ext or self.default_extension
filename = self.filename(filename, ext=extension, use_my_ext=True, set_default=True)
#: Current full path of the object for reading and writing I/O.
self.real_filename = os.path.realpath(filename)
def filename(self,filename=None,ext=None,set_default=False,use_my_ext=False):
"""Supply a file name for the class object.
Typical uses::
fn = filename() ---> <default_filename>
fn = filename('name.ext') ---> 'name'
fn = filename(ext='pickle') ---> <default_filename>'.pickle'
fn = filename('name.inp','pdf') --> 'name.pdf'
fn = filename('foo.pdf',ext='png',use_my_ext=True) --> 'foo.pdf'
The returned filename is stripped of the extension
(``use_my_ext=False``) and if provided, another extension is
appended. Chooses a default if no filename is given.
Raises a ``ValueError`` exception if no default file name is known.
If ``set_default=True`` then the default filename is also set.
``use_my_ext=True`` lets the suffix of a provided filename take
priority over a default ``ext`` tension.
.. versionchanged:: 0.3.1
An empty string as *ext* = "" will suppress appending an extension.
"""
if filename is None:
if not hasattr(self,'_filename'):
self._filename = None # add attribute to class
if self._filename:
filename = self._filename
else:
raise ValueError("A file name is required because no default file name was defined.")
my_ext = None
else:
filename, my_ext = os.path.splitext(filename)
if set_default: # replaces existing default file name
self._filename = filename
if my_ext and use_my_ext:
ext = my_ext
if ext is not None:
if ext.startswith(os.extsep):
ext = ext[1:] # strip a dot to avoid annoying mistakes
if ext != "":
filename = filename + os.extsep + ext
return filename
def check_file_exists(self, filename, resolve='exception', force=None):
"""If a file exists then continue with the action specified in ``resolve``.
``resolve`` must be one of
"ignore"
always return ``False``
"indicate"
return ``True`` if it exists
"warn"
indicate and issue a :exc:`UserWarning`
"exception"
raise :exc:`IOError` if it exists
Alternatively, set *force* for the following behaviour (which
ignores *resolve*):
``True``
same as *resolve* = "ignore" (will allow overwriting of files)
``False``
same as *resolve* = "exception" (will prevent overwriting of files)
``None``
ignored, do whatever *resolve* says
"""
def _warn(x):
msg = "File %r already exists." % x
logger.warn(msg)
warnings.warn(msg)
return True
def _raise(x):
msg = "File %r already exists." % x
logger.error(msg)
raise IOError(errno.EEXIST, x, msg)
solutions = {'ignore': lambda x: False, # file exists, but we pretend that it doesn't
'indicate': lambda x: True, # yes, file exists
'warn': _warn,
'warning': _warn,
'exception': _raise,
'raise': _raise,
}
if force is True:
resolve = 'ignore'
elif force is False:
resolve = 'exception'
if not os.path.isfile(filename):
return False
else:
return solutions[resolve](filename)
def infix_filename(self, name, default, infix, ext=None):
"""Unless *name* is provided, insert *infix* before the extension *ext* of *default*."""
if name is None:
p, oldext = os.path.splitext(default)
if ext is None:
ext = oldext
if ext.startswith(os.extsep):
ext = ext[1:]
name = self.filename(p+infix, ext=ext)
return name
def __repr__(self):
fmt = "%s(filename=%%r)" % self.__class__.__name__
try:
fn = self.filename()
except ValueError:
fn = None
return fmt % fn
def iterable(obj):
"""Returns ``True`` if *obj* can be iterated over and is *not* a string."""
if isinstance(obj, basestring):
return False # avoid iterating over characters of a string
if hasattr(obj, 'next'):
return True # any iterator will do
try:
len(obj) # anything else that might work
except TypeError:
return False
return True
def asiterable(obj):
"""Returns obj so that it can be iterated over; a string is *not* treated as iterable"""
if not iterable(obj):
obj = [obj]
return obj
def firstof(obj):
"""Returns the first entry of a sequence or the obj.
Treats strings as single objects.
"""
return asiterable(obj)[0]
# In utilities so that it can be safely used in tools, cbook, ...
def unlink_f(path):
"""Unlink path but do not complain if file does not exist."""
try:
os.unlink(path)
except OSError, err:
if err.errno != errno.ENOENT:
raise
def unlink_gmx(*args):
"""Unlink (remove) Gromacs file(s) and all corresponding backups."""
for path in args:
unlink_f(path)
unlink_gmx_backups(*args)
def unlink_gmx_backups(*args):
"""Unlink (rm) all backup files corresponding to the listed files."""
for path in args:
dirname, filename = os.path.split(path)
fbaks = glob.glob(os.path.join(dirname, '#'+filename+'.*#'))
for bak in fbaks:
unlink_f(bak)
def mkdir_p(path):
"""Create a directory *path* with subdirs but do not complain if it exists.
This is like GNU ``mkdir -p path``.
"""
try:
os.makedirs(path)
except OSError, err:
if err.errno != errno.EEXIST:
raise
def cat(f=None, o=None):
"""Concatenate files *f*=[...] and write to *o*"""
# need f, o to be compatible with trjcat and eneconv
if f is None or o is None:
return
target = o
infiles = asiterable(f)
logger.debug("cat %s > %s " % (" ".join(infiles), target))
with open(target, 'w') as out:
rc = subprocess.call(['cat'] + infiles, stdout=out)
if rc != 0:
msg = "failed with return code %d: cat %r > %r " % (rc, " ".join(infiles), target)
logger.exception(msg)
raise OSError(errno.EIO, msg, target)
# helpers for matplotlib
def activate_subplot(numPlot):
"""Make subplot *numPlot* active on the canvas.
Use this if a simple ``subplot(numRows, numCols, numPlot)``
overwrites the subplot instead of activating it.
"""
# see http://www.mail-archive.com/matplotlib-users@lists.sourceforge.net/msg07156.html
from pylab import gcf, axes
numPlot -= 1 # index is 0-based, plots are 1-based
return axes(gcf().get_axes()[numPlot])
def remove_legend(ax=None):
"""Remove legend for axes or gca.
See http://osdir.com/ml/python.matplotlib.general/2005-07/msg00285.html
"""
from pylab import gca, draw
if ax is None:
ax = gca()
ax.legend_ = None
draw()
# time functions
class Timedelta(datetime.timedelta):
"""Extension of :class:`datetime.timedelta`.
Provides attributes ddays, dhours, dminutes, dseconds to measure
the delta in normal time units.
ashours gives the total time in fractional hours.
"""
@property
def dhours(self):
"""Hours component of the timedelta."""
return self.seconds / 3600
@property
def dminutes(self):
"""Minutes component of the timedelta."""
return self.seconds/60 - 60*self.dhours
@property
def dseconds(self):
"""Seconds component of the timedelta."""
return self.seconds - 3600*self.dhours - 60*self.dminutes
@property
def ashours(self):
"""Timedelta in (fractional) hours."""
return 24*self.days + self.seconds / 3600.0
def strftime(self, fmt="%d:%H:%M:%S"):
"""Primitive string formatter.
The only directives understood are the following:
============ ==========================
Directive meaning
============ ==========================
%d day as integer
%H hour [00-23]
%h hours including days
%M minute as integer [00-59]
%S second as integer [00-59]
============ ==========================
"""
substitutions = {
"%d": str(self.days),
"%H": "%02d" % self.dhours,
"%h": str(24*self.days + self.dhours),
"%M": "%02d" % self.dminutes,
"%S": "%02d" % self.dseconds,
}
s = fmt
for search, replacement in substitutions.items():
s = s.replace(search, replacement)
return s
NUMBERED_PDB = re.compile(r"(?P<PREFIX>.*\D)(?P<NUMBER>\d+)\.(?P<SUFFIX>pdb)")
def number_pdbs(*args, **kwargs):
"""Rename pdbs x1.pdb ... x345.pdb --> x0001.pdb ... x0345.pdb
:Arguments:
- *args*: filenames or glob patterns (such as "pdb/md*.pdb")
- *format*: format string including keyword *num* ["%(num)04d"]
"""
format = kwargs.pop('format', "%(num)04d")
name_format = "%(prefix)s" + format +".%(suffix)s"
filenames = []
map(filenames.append, map(glob.glob, args)) # concatenate all filename lists
filenames = filenames[0] # ... ugly
for f in filenames:
m = NUMBERED_PDB.search(f)
if m is None:
continue
num = int(m.group('NUMBER'))
prefix = m.group('PREFIX')
suffix = m.group('SUFFIX')
newname = name_format % vars()
logger.info("Renaming %(f)r --> %(newname)r" % vars())
try:
os.rename(f, newname)
except OSError:
logger.exception("renaming failed")
| gpl-3.0 |
mxOBS/deb-pkg_trusty_chromium-browser | native_client/pnacl/driver/pnacl-driver.py | 1 | 31126 | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import subprocess
from driver_tools import AddHostBinarySearchPath, DefaultOutputName, \
DefaultPCHOutputName, DriverChain, GetArch, ParseArgs, ParseTriple, \
Run, RunDriver, RunWithEnv, TempNameGen, UnrecognizedOption
from driver_env import env
from driver_log import DriverOpen, Log
import filetype
import pathtools
EXTRA_ENV = {
'ALLOW_TRANSLATE': '0', # Allow bitcode translation before linking.
# It doesn't normally make sense to do this.
'ALLOW_NATIVE' : '0', # Allow native objects (.S,.s,.o) to be in the
# linker line for .pexe generation.
# It doesn't normally make sense to do this.
# CXX_EH_MODE specifies how to deal with C++ exception handling:
# * 'none': Strips out use of C++ exception handling.
# * 'sjlj': Enables the setjmp()+longjmp()-based implementation of
# C++ exception handling. This is supported in PNaCl's stable
# ABI.
# * 'zerocost': Enables the zero-cost implementation of C++
# exception handling. This is not supported in PNaCl's stable
# ABI.
'CXX_EH_MODE': 'none',
'FORCE_INTERMEDIATE_LL': '0',
# Produce an intermediate .ll file
# Useful for debugging.
# NOTE: potentially different code paths and bugs
# might be triggered by this
'LANGUAGE' : '', # C or CXX (set by SetTool)
'INCLUDE_CXX_HEADERS': '0', # This is set by RunCC.
# Command-line options
'GCC_MODE' : '', # '' (default), '-E', '-c', or '-S'
'STDINC' : '1', # Include standard headers (-nostdinc sets to 0)
'STDINCCXX' : '1', # Include standard cxx headers (-nostdinc++ sets to 0)
'USE_STDLIB' : '1', # Include standard libraries (-nostdlib sets to 0)
'STDLIB' : '', # C++ Standard Library.
'STDLIB_TRUNC': '', # C++ Standard Library, truncated to pass as -lXXX.
'STDLIB_IDIR' : '', # C++ Standard Library include directory.
# Note: the above C++ Standard Library
# settings use a default if their value
# remains uset.
'DEFAULTLIBS' : '1', # Link with default libraries
'DIAGNOSTIC' : '0', # Diagnostic flag detected
'PIC' : '0', # Generate PIC
# TODO(robertm): Switch the default to 1
'NO_ASM' : '0', # Disallow use of inline assembler
'NEED_DASH_E' : '0', # Used for stdin inputs, which must have an explicit
# type set (using -x) unless -E is specified.
'VERBOSE' : '0', # Verbose (-v)
'SHOW_VERSION': '0', # Version (--version)
'PTHREAD' : '0', # use pthreads?
'INPUTS' : '', # Input files
'OUTPUT' : '', # Output file
'UNMATCHED' : '', # Unrecognized parameters
'BIAS_NONE' : '',
'BIAS_ARM' : '-D__arm__ -D__ARM_ARCH_7A__ -D__ARMEL__',
'BIAS_MIPS32' : '-D__MIPS__ -D__mips__ -D__MIPSEL__',
'BIAS_X8632' : '-D__i386__ -D__i386 -D__i686 -D__i686__ -D__pentium4__',
'BIAS_X8664' : '-D__amd64__ -D__amd64 -D__x86_64__ -D__x86_64 -D__core2__',
'BIAS_ARM_NONSFI': '${BIAS_ARM} -D__native_client_nonsfi__',
'BIAS_X8632_NONSFI': '${BIAS_X8632} -D__native_client_nonsfi__',
'FRONTEND_TRIPLE' : 'le32-unknown-nacl',
'OPT_LEVEL' : '', # Default for most tools is 0, but we need to know
# if it's explicitly set or not when the driver
# is only used for linking + translating.
'CC_FLAGS' : '-O${#OPT_LEVEL ? ${OPT_LEVEL} : 0} ' +
'-fno-vectorize -fno-slp-vectorize ' +
'-fno-common ${PTHREAD ? -pthread} ' +
'-nostdinc ${BIAS_%BIAS%} ' +
# BUG: http://code.google.com/p/nativeclient/issues/detail?id=2345
# it would be better to detect asm use inside clang
# as some uses of asm are borderline legit, e.g.
# <prototype> asm("<function-name>");
'${NO_ASM ? -Dasm=ASM_FORBIDDEN -D__asm__=ASM_FORBIDDEN} ' +
'-target ${FRONTEND_TRIPLE}',
'ISYSTEM' : '${ISYSTEM_USER} ${STDINC ? ${ISYSTEM_BUILTIN}}',
'ISYSTEM_USER' : '', # System include directories specified by
# using the -isystem flag.
'ISYSTEM_BUILTIN':
'${BASE_USR}/usr/include ' +
'${ISYSTEM_CLANG} ' +
'${ISYSTEM_CXX} ' +
'${BASE_USR}/include ' +
'${BASE_SDK}/include ',
'ISYSTEM_CLANG' : '${BASE_LLVM}/lib/clang/${CLANG_VER}/include',
'ISYSTEM_CXX' :
'${INCLUDE_CXX_HEADERS && STDINCCXX ? ${ISYSTEM_CXX_include_paths}}',
'ISYSTEM_CXX_include_paths' :
'${BASE_USR}/include/c++/${STDLIB_IDIR} ' +
'${BASE_USR}/include/c++/${STDLIB_IDIR}/arm-none-linux-gnueabi ' +
'${BASE_USR}/include/c++/${STDLIB_IDIR}/backward',
# Only propagate opt level to linker if explicitly set, so that the
# linker will know if an opt level was explicitly set or not.
'LD_FLAGS' : '${#OPT_LEVEL ? -O${OPT_LEVEL}} -static ' +
'${PIC ? -fPIC} ${@AddPrefix:-L:SEARCH_DIRS} ' +
'--pnacl-exceptions=${CXX_EH_MODE}',
'SEARCH_DIRS' : '', # Directories specified using -L
# Library Strings
'EMITMODE' : '${!USE_STDLIB ? nostdlib : static}',
# This is setup so that LD_ARGS_xxx is evaluated lazily.
'LD_ARGS' : '${LD_ARGS_%EMITMODE%}',
# ${ld_inputs} signifies where to place the objects and libraries
# provided on the command-line.
'LD_ARGS_nostdlib': '-nostdlib ${ld_inputs}',
'LD_ARGS_static':
'${CXX_EH_MODE==zerocost ? -l:crt1_for_eh.x : -l:crt1.x} ' +
'-l:crti.bc -l:crtbegin.bc '
'${CXX_EH_MODE==sjlj ? -l:sjlj_eh_redirect.bc : '
'${CXX_EH_MODE==none ? -l:unwind_stubs.bc}} ' +
'${ld_inputs} ' +
'--start-group ${STDLIBS} --end-group',
'LLVM_PASSES_TO_DISABLE': '',
# Flags for translating to native .o files.
'TRANSLATE_FLAGS' : '-O${#OPT_LEVEL ? ${OPT_LEVEL} : 0}',
'STDLIBS' : '${DEFAULTLIBS ? '
'${LIBSTDCPP} ${LIBPTHREAD} ${LIBNACL} ${LIBC} ${LIBPNACLMM}}',
'LIBSTDCPP' : '${IS_CXX ? -l${STDLIB_TRUNC} -lm }',
'LIBC' : '-lc',
'LIBNACL' : '-lnacl',
'LIBPNACLMM': '-lpnaclmm',
# Enabled/disabled by -pthreads
'LIBPTHREAD': '${PTHREAD ? -lpthread}',
# IS_CXX is set by pnacl-clang and pnacl-clang++ programmatically
'CC' : '${IS_CXX ? ${CLANGXX} : ${CLANG}}',
'RUN_CC': '${CC} ${emit_llvm_flag} ${mode} ${CC_FLAGS} ' +
'${@AddPrefix:-isystem :ISYSTEM} ' +
'-x${typespec} "${infile}" -o ${output}',
}
def AddLLVMPassDisableFlag(*args):
env.append('LLVM_PASSES_TO_DISABLE', *args)
env.append('LD_FLAGS', *args)
def AddLDFlag(*args):
env.append('LD_FLAGS', *args)
def AddTranslatorFlag(*args):
# pass translator args to ld in case we go all the way to .nexe
env.append('LD_FLAGS', *['-Wt,' + a for a in args])
# pass translator args to translator in case we go to .o
env.append('TRANSLATE_FLAGS', *args)
def AddCCFlag(*args):
env.append('CC_FLAGS', *args)
def AddDiagnosticFlag(*args):
env.append('CC_FLAGS', *args)
env.set('DIAGNOSTIC', '1')
def SetTarget(*args):
arch = ParseTriple(args[0])
env.set('FRONTEND_TRIPLE', args[0])
AddLDFlag('--target=' + args[0])
def SetStdLib(*args):
"""Set the C++ Standard Library."""
lib = args[0]
assert lib == 'libc++' or lib == 'libstdc++', (
'Invalid C++ standard library: -stdlib=%s' % lib)
env.set('STDLIB', lib)
env.set('STDLIB_TRUNC', lib[3:])
if lib == 'libc++':
env.set('STDLIB_IDIR', 'v1')
if env.getbool('IS_CXX'):
# libc++ depends on pthread for C++11 features as well as some
# exception handling (which may get removed later by the PNaCl ABI
# simplification) and initialize-once.
env.set('PTHREAD', '1')
elif lib == 'libstdc++':
env.set('STDLIB_IDIR', '4.6.2')
def IsPortable():
return env.getone('FRONTEND_TRIPLE').startswith('le32-')
stdin_count = 0
def AddInputFileStdin():
global stdin_count
# When stdin is an input, -x or -E must be given.
forced_type = filetype.GetForcedFileType()
if not forced_type:
# Only allowed if -E is specified.
forced_type = 'c'
env.set('NEED_DASH_E', '1')
stdin_name = '__stdin%d__' % stdin_count
env.append('INPUTS', stdin_name)
filetype.ForceFileType(stdin_name, forced_type)
stdin_count += 1
def IsStdinInput(f):
return f.startswith('__stdin') and f.endswith('__')
def HandleDashX(arg):
if arg == 'none':
filetype.SetForcedFileType(None)
return
filetype.SetForcedFileType(filetype.GCCTypeToFileType(arg))
def AddVersionFlag(*args):
env.set('SHOW_VERSION', '1')
AddDiagnosticFlag(*args)
def AddBPrefix(prefix):
""" Add a path to the list searched for host binaries and include dirs. """
AddHostBinarySearchPath(prefix)
prefix = pathtools.normalize(prefix)
if pathtools.isdir(prefix) and not prefix.endswith('/'):
prefix += '/'
# Add prefix/ to the library search dir if it exists
if pathtools.isdir(prefix):
env.append('SEARCH_DIRS', prefix)
# Add prefix/include to isystem if it exists
include_dir = prefix + 'include'
if pathtools.isdir(include_dir):
env.append('ISYSTEM_USER', include_dir)
CustomPatterns = [
( '--driver=(.+)', "env.set('CC', pathtools.normalize($0))\n"),
( '--pnacl-allow-native', "env.set('ALLOW_NATIVE', '1')"),
( '--pnacl-allow-translate', "env.set('ALLOW_TRANSLATE', '1')"),
( '--pnacl-frontend-triple=(.+)', SetTarget),
( ('-target','(.+)'), SetTarget),
( ('--target=(.+)'), SetTarget),
( '--pnacl-exceptions=(none|sjlj|zerocost)', "env.set('CXX_EH_MODE', $0)"),
# TODO(mseaborn): Remove "--pnacl-allow-exceptions", which is
# superseded by "--pnacl-exceptions".
( '--pnacl-allow-exceptions', "env.set('CXX_EH_MODE', 'zerocost')"),
( '(--pnacl-allow-nexe-build-id)', AddLDFlag),
( '(--pnacl-disable-abi-check)', AddLDFlag),
( '(--pnacl-disable-pass=.+)', AddLLVMPassDisableFlag),
]
GCCPatterns = [
( '-o(.+)', "env.set('OUTPUT', pathtools.normalize($0))"),
( ('-o', '(.+)'), "env.set('OUTPUT', pathtools.normalize($0))"),
( '-E', "env.set('GCC_MODE', '-E')"),
( '-S', "env.set('GCC_MODE', '-S')"),
( '-c', "env.set('GCC_MODE', '-c')"),
( '-allow-asm', "env.set('NO_ASM', '0')"),
( '-nostdinc', "env.set('STDINC', '0')"),
( '-nostdinc\+\+', "env.set('STDINCCXX', '0')"),
( '-nostdlib', "env.set('USE_STDLIB', '0')"),
( '-nodefaultlibs', "env.set('DEFAULTLIBS', '0')"),
( '-?-stdlib=(.*)', SetStdLib),
( ('-?-stdlib', '(.*)'), SetStdLib),
# Flags to pass to native linker
( '(-Wn,.*)', AddLDFlag),
( '-rdynamic', "env.append('LD_FLAGS', '-export-dynamic')"),
# Flags to pass to pnacl-translate
( '-Wt,(.*)', AddTranslatorFlag),
( ('-Xtranslator','(.*)'), AddTranslatorFlag),
# We don't care about -fPIC, but pnacl-ld and pnacl-translate do.
( '-fPIC', "env.set('PIC', '1')"),
# We must include -l, -Xlinker, and -Wl options into the INPUTS
# in the order they appeared. This is the exactly behavior of gcc.
# For example: gcc foo.c -Wl,--start-group -lx -ly -Wl,--end-group
#
( '(-l.+)', "env.append('INPUTS', $0)"),
( ('(-l)','(.+)'), "env.append('INPUTS', $0+$1)"),
( ('-Xlinker','(.*)'), "env.append('INPUTS', '-Xlinker=' + $0)"),
( '(-Wl,.*)', "env.append('INPUTS', $0)"),
( '(-Bstatic)', "env.append('INPUTS', $0)"),
( '(-Bdynamic)', "env.append('INPUTS', $0)"),
( '-O([sz])', "env.set('OPT_LEVEL', $0)\n"),
( '-O([0-3])', "env.set('OPT_LEVEL', $0)\n"),
( '-O([0-9]+)', "env.set('OPT_LEVEL', '3')\n"),
( '-O', "env.set('OPT_LEVEL', '1')\n"),
( ('-isystem', '(.*)'),
"env.append('ISYSTEM_USER', pathtools.normalize($0))"),
( '-isystem(.+)',
"env.append('ISYSTEM_USER', pathtools.normalize($0))"),
( ('-I', '(.+)'), "env.append('CC_FLAGS', '-I'+pathtools.normalize($0))"),
( '-I(.+)', "env.append('CC_FLAGS', '-I'+pathtools.normalize($0))"),
# -I is passed through, so we allow -isysroot and pass it through as well.
# However -L is intercepted and interpreted, so it would take more work
# to handle -sysroot w/ libraries.
( ('-isysroot', '(.+)'),
"env.append('CC_FLAGS', '-isysroot ' + pathtools.normalize($0))"),
( '-isysroot(.+)',
"env.append('CC_FLAGS', '-isysroot ' + pathtools.normalize($0))"),
# NOTE: the -iquote =DIR syntax (substitute = with sysroot) doesn't work.
# Clang just says: ignoring nonexistent directory "=DIR"
( ('-iquote', '(.+)'),
"env.append('CC_FLAGS', '-iquote', pathtools.normalize($0))"),
( ('-iquote(.+)'),
"env.append('CC_FLAGS', '-iquote', pathtools.normalize($0))"),
( ('-idirafter', '(.+)'),
"env.append('CC_FLAGS', '-idirafter'+pathtools.normalize($0))"),
( '-idirafter(.+)',
"env.append('CC_FLAGS', '-idirafter'+pathtools.normalize($0))"),
( ('(-include)','(.+)'), AddCCFlag),
( ('(-include.+)'), AddCCFlag),
( '(--relocatable-pch)', AddCCFlag),
( '(-g)', AddCCFlag),
( '(-W.*)', AddCCFlag),
( '(-w)', AddCCFlag),
( '(-std=.*)', AddCCFlag),
( '(-ansi)', AddCCFlag),
( ('(-D)','(.*)'), AddCCFlag),
( '(-D.+)', AddCCFlag),
( ('(-U)','(.*)'), AddCCFlag),
( '(-U.+)', AddCCFlag),
( '(-f.*)', AddCCFlag),
( '(-pedantic)', AddCCFlag),
( '(-pedantic-errors)', AddCCFlag),
( '(-g.*)', AddCCFlag),
( '(-v|--v)', "env.append('CC_FLAGS', $0)\n"
"env.set('VERBOSE', '1')"),
( '(-pthreads?)', "env.set('PTHREAD', '1')"),
# No-op: accepted for compatibility in case build scripts pass it.
( '-static', ""),
( ('-B','(.*)'), AddBPrefix),
( ('-B(.+)'), AddBPrefix),
( ('-L','(.+)'), "env.append('SEARCH_DIRS', pathtools.normalize($0))"),
( '-L(.+)', "env.append('SEARCH_DIRS', pathtools.normalize($0))"),
( '(-Wp,.*)', AddCCFlag),
( '(-Xpreprocessor .*)', AddCCFlag),
( ('(-Xclang)', '(.*)'), AddCCFlag),
# Accept and ignore default flags
( '-m32', ""),
( '-emit-llvm', ""),
( '(-MG)', AddCCFlag),
( '(-MMD)', AddCCFlag),
( '(-MM?)', "env.append('CC_FLAGS', $0)\n"
"env.set('GCC_MODE', '-E')"),
( '(-MP)', AddCCFlag),
( ('(-MQ)','(.*)'), AddCCFlag),
( '(-MD)', AddCCFlag),
( ('(-MT)','(.*)'), AddCCFlag),
( ('(-MF)','(.*)'), "env.append('CC_FLAGS', $0, pathtools.normalize($1))"),
( ('-x', '(.+)'), HandleDashX),
( '-x(.+)', HandleDashX),
( ('(-mllvm)', '(.+)'), AddCCFlag),
# Ignore these gcc flags
( '(-msse)', ""),
( '(-march=armv7-a)', ""),
( '(-pipe)', ""),
( '(-s)', AddLDFlag),
( '(--strip-all)', AddLDFlag),
( '(--strip-debug)', AddLDFlag),
# Ignore these assembler flags
( '(-Qy)', ""),
( ('(--traditional-format)', '.*'), ""),
( '(-gstabs)', ""),
( '(--gstabs)', ""),
( '(-gdwarf2)', ""),
( '(--gdwarf2)', ""),
( '(--fatal-warnings)', ""),
( '(-meabi=.*)', ""),
( '(-mfpu=.*)', ""),
( '(-mfloat-abi=.+)', AddCCFlag),
# GCC diagnostic mode triggers
( '(-print-.*)', AddDiagnosticFlag),
( '(--print.*)', AddDiagnosticFlag),
( '(-dumpspecs)', AddDiagnosticFlag),
( '(--version)', AddVersionFlag),
# These are preprocessor flags which should be passed to the frontend, but
# should not prevent the usual -i flags (which DIAGNOSTIC mode does)
( '(-d[DIMNU])', AddCCFlag),
( '(-d.*)', AddDiagnosticFlag),
# Catch all other command-line arguments
( '(-.+)', "env.append('UNMATCHED', $0)"),
# Standard input
( '-', AddInputFileStdin),
# Input Files
# Call ForceFileType for all input files at the time they are
# parsed on the command-line. This ensures that the gcc "-x"
# setting is correctly applied.
( '(.*)', "env.append('INPUTS', pathtools.normalize($0))\n"
"filetype.ForceFileType(pathtools.normalize($0))"),
]
def CheckSetup():
if not env.has('IS_CXX'):
Log.Fatal('"pnacl-driver" cannot be used directly. '
'Use pnacl-clang or pnacl-clang++.')
def DriverOutputTypes(driver_flag, compiling_to_native):
output_type_map = {
('-E', False) : 'pp',
('-E', True) : 'pp',
('-c', False) : 'po',
('-c', True) : 'o',
('-S', False) : 'll',
('-S', True) : 's',
('', False) : 'pexe',
('', True) : 'nexe',
}
return output_type_map[(driver_flag, compiling_to_native)]
def ReadDriverRevision():
rev_file = env.getone('DRIVER_REV_FILE')
# Might be an SVN version or a GIT hash (depending on the NaCl src client)
nacl_ver = DriverOpen(rev_file, 'rb').readlines()[0]
m = re.search(r'\[SVN\].*/native_client:\s*(\d+)', nacl_ver)
if m:
return m.group(1)
m = re.search(r'\[GIT\].*/native_client.git:\s*(\w+)', nacl_ver)
if m:
return m.group(1)
# fail-fast: if the REV file exists but regex search failed,
# we need to fix the regex to get nacl-version.
if not m:
Log.Fatal('Failed to parse REV file to get nacl-version.')
def main(argv):
env.update(EXTRA_ENV)
CheckSetup()
ParseArgs(argv, CustomPatterns + GCCPatterns)
# "configure", especially when run as part of a toolchain bootstrap
# process, will invoke gcc with various diagnostic options and
# parse the output. In these cases we do not alter the incoming
# commandline. It is also important to not emit spurious messages.
if env.getbool('DIAGNOSTIC'):
if env.getbool('SHOW_VERSION'):
code, stdout, stderr = Run(env.get('CC') + env.get('CC_FLAGS'),
redirect_stdout=subprocess.PIPE)
out = stdout.split('\n')
nacl_version = ReadDriverRevision()
out[0] += ' nacl-version=%s' % nacl_version
stdout = '\n'.join(out)
print stdout,
else:
Run(env.get('CC') + env.get('CC_FLAGS'))
return 0
unmatched = env.get('UNMATCHED')
if len(unmatched) > 0:
UnrecognizedOption(*unmatched)
# If -arch was given, we are compiling directly to native code
compiling_to_native = GetArch() is not None
if env.getbool('ALLOW_NATIVE'):
if not compiling_to_native:
Log.Fatal("--pnacl-allow-native without -arch is not meaningful.")
# For native/mixed links, also bring in the native libgcc and
# libcrt_platform to avoid link failure if pre-translated native
# code needs functions from it.
env.append('LD_FLAGS', env.eval('-L${LIBS_NATIVE_ARCH}'))
env.append('STDLIBS', '-lgcc')
env.append('STDLIBS', '-lcrt_platform')
if not env.get('STDLIB'):
# Default C++ Standard Library.
SetStdLib('libc++')
flags_and_inputs = env.get('INPUTS')
output = env.getone('OUTPUT')
if len(flags_and_inputs) == 0:
if env.getbool('VERBOSE'):
# -v can be invoked without any inputs. Runs the original
# command without modifying the commandline for this case.
Run(env.get('CC') + env.get('CC_FLAGS'))
return 0
else:
Log.Fatal('No input files')
gcc_mode = env.getone('GCC_MODE')
output_type = DriverOutputTypes(gcc_mode, compiling_to_native)
# INPUTS consists of actual input files and a subset of flags like -Wl,<foo>.
# Create a version with just the files.
inputs = [f for f in flags_and_inputs if not IsFlag(f)]
header_inputs = [f for f in inputs
if filetype.IsHeaderType(filetype.FileType(f))]
# Handle PCH case specially (but only for a limited sense...)
if header_inputs and gcc_mode != '-E':
# We only handle doing pre-compiled headers for all inputs or not at
# all at the moment. This is because DriverOutputTypes only assumes
# one type of output, depending on the "gcc_mode" flag. When mixing
# header inputs w/ non-header inputs, some of the outputs will be
# pch while others will be output_type. We would also need to modify
# the input->output chaining for the needs_linking case.
if len(header_inputs) != len(inputs):
Log.Fatal('mixed compiling of headers and source not supported')
CompileHeaders(header_inputs, output)
return 0
needs_linking = (gcc_mode == '')
if env.getbool('NEED_DASH_E') and gcc_mode != '-E':
Log.Fatal("-E or -x required when input is from stdin")
# There are multiple input files and no linking is being done.
# There will be multiple outputs. Handle this case separately.
if not needs_linking:
if output != '' and len(inputs) > 1:
Log.Fatal('Cannot have -o with -c, -S, or -E and multiple inputs: %s',
repr(inputs))
for f in inputs:
intype = filetype.FileType(f)
if not (filetype.IsSourceType(intype) or filetype.IsHeaderType(intype)):
if ((output_type == 'pp' and intype != 'S') or
(output_type == 'll') or
(output_type == 'po' and intype != 'll') or
(output_type == 's' and intype not in ('ll','po','S')) or
(output_type == 'o' and intype not in ('ll','po','S','s'))):
Log.Fatal("%s: Unexpected type of file for '%s'",
pathtools.touser(f), gcc_mode)
if output == '':
f_output = DefaultOutputName(f, output_type)
else:
f_output = output
namegen = TempNameGen([f], f_output)
CompileOne(f, output_type, namegen, f_output)
return 0
# Linking case
assert(needs_linking)
assert(output_type in ('pso','so','pexe','nexe'))
if output == '':
output = pathtools.normalize('a.out')
namegen = TempNameGen(flags_and_inputs, output)
# Compile all source files (c/c++/ll) to .po
for i in xrange(0, len(flags_and_inputs)):
if IsFlag(flags_and_inputs[i]):
continue
intype = filetype.FileType(flags_and_inputs[i])
if filetype.IsSourceType(intype) or intype == 'll':
flags_and_inputs[i] = CompileOne(flags_and_inputs[i], 'po', namegen)
# Compile all .s/.S to .o
if env.getbool('ALLOW_NATIVE'):
for i in xrange(0, len(flags_and_inputs)):
if IsFlag(flags_and_inputs[i]):
continue
intype = filetype.FileType(flags_and_inputs[i])
if intype in ('s','S'):
flags_and_inputs[i] = CompileOne(flags_and_inputs[i], 'o', namegen)
# We should only be left with .po and .o and libraries
for f in flags_and_inputs:
if IsFlag(f):
continue
intype = filetype.FileType(f)
if intype in ('o','s','S') or filetype.IsNativeArchive(f):
if not env.getbool('ALLOW_NATIVE'):
Log.Fatal('%s: Native object files not allowed in link. '
'Use --pnacl-allow-native to override.', pathtools.touser(f))
assert(intype in ('po','o','so','ldscript') or filetype.IsArchive(f))
# Fix the user-specified linker arguments
ld_inputs = []
for f in flags_and_inputs:
if f.startswith('-Xlinker='):
ld_inputs.append(f[len('-Xlinker='):])
elif f.startswith('-Wl,'):
ld_inputs += f[len('-Wl,'):].split(',')
else:
ld_inputs.append(f)
if env.getbool('ALLOW_NATIVE'):
ld_inputs.append('--pnacl-allow-native')
# Invoke the linker
env.set('ld_inputs', *ld_inputs)
ld_args = env.get('LD_ARGS')
ld_flags = env.get('LD_FLAGS')
RunDriver('pnacl-ld', ld_flags + ld_args + ['-o', output])
return 0
def IsFlag(f):
return f.startswith('-')
def CompileHeaders(header_inputs, output):
if output != '' and len(header_inputs) > 1:
Log.Fatal('Cannot have -o <out> and compile multiple header files: %s',
repr(header_inputs))
for f in header_inputs:
f_output = output if output else DefaultPCHOutputName(f)
RunCC(f, f_output, mode='', emit_llvm_flag='')
def CompileOne(infile, output_type, namegen, output = None):
if output is None:
output = namegen.TempNameForInput(infile, output_type)
chain = DriverChain(infile, output, namegen)
SetupChain(chain, filetype.FileType(infile), output_type)
chain.run()
return output
def RunCC(infile, output, mode, emit_llvm_flag='-emit-llvm'):
intype = filetype.FileType(infile)
typespec = filetype.FileTypeToGCCType(intype)
include_cxx_headers = ((env.get('LANGUAGE') == 'CXX') or
(intype in ('c++', 'c++-header')))
env.setbool('INCLUDE_CXX_HEADERS', include_cxx_headers)
if IsStdinInput(infile):
infile = '-'
RunWithEnv("${RUN_CC}", infile=infile, output=output,
emit_llvm_flag=emit_llvm_flag, mode=mode,
typespec=typespec)
def RunLLVMAS(infile, output):
if IsStdinInput(infile):
infile = '-'
# This is a bitcode only step - so get rid of "-arch xxx" which
# might be inherited from the current invocation
RunDriver('pnacl-as', [infile, '-o', output],
suppress_inherited_arch_args=True)
def RunNativeAS(infile, output):
if IsStdinInput(infile):
infile = '-'
RunDriver('pnacl-as', [infile, '-o', output])
def RunTranslate(infile, output, mode):
if not env.getbool('ALLOW_TRANSLATE'):
Log.Fatal('%s: Trying to convert bitcode to an object file before '
'bitcode linking. This is supposed to wait until '
'translation. Use --pnacl-allow-translate to override.',
pathtools.touser(infile))
args = env.get('TRANSLATE_FLAGS') + [mode, '--allow-llvm-bitcode-input',
infile, '-o', output]
if env.getbool('PIC'):
args += ['-fPIC']
RunDriver('pnacl-translate', args)
def RunOpt(infile, outfile, pass_list):
filtered_list = [pass_option for pass_option in pass_list
if pass_option not in env.get('LLVM_PASSES_TO_DISABLE')]
RunDriver('pnacl-opt', filtered_list + [infile, '-o', outfile])
def SetupChain(chain, input_type, output_type):
assert(output_type in ('pp','ll','po','s','o'))
cur_type = input_type
# source file -> pp
if filetype.IsSourceType(cur_type) and output_type == 'pp':
chain.add(RunCC, 'cpp', mode='-E')
cur_type = 'pp'
if cur_type == output_type:
return
# header file -> pre-process
if filetype.IsHeaderType(cur_type) and output_type == 'pp':
chain.add(RunCC, 'cpp', mode='-E')
cur_type = 'pp'
if cur_type == output_type:
return
# source file -> ll
if (filetype.IsSourceType(cur_type) and
(env.getbool('FORCE_INTERMEDIATE_LL') or output_type == 'll')):
chain.add(RunCC, 'll', mode='-S')
cur_type = 'll'
if cur_type == output_type:
return
# ll -> po
if cur_type == 'll':
chain.add(RunLLVMAS, 'po')
cur_type = 'po'
if cur_type == output_type:
return
# source file -> po (we also force native output to go through this phase
if filetype.IsSourceType(cur_type) and output_type in ('po', 'o', 's'):
chain.add(RunCC, 'po', mode='-c')
cur_type = 'po'
if cur_type == output_type:
return
# po -> o
if (cur_type == 'po' and output_type == 'o'):
# If we aren't using biased bitcode, then at least -expand-byval
# must be run to work with the PPAPI shim calling convention.
if IsPortable():
chain.add(RunOpt, 'expand.po', pass_list=['-expand-byval'])
chain.add(RunTranslate, 'o', mode='-c')
cur_type = 'o'
if cur_type == output_type:
return
# po -> s
if cur_type == 'po':
# If we aren't using biased bitcode, then at least -expand-byval
# must be run to work with the PPAPI shim calling convention.
if IsPortable():
chain.add(RunOpt, 'expand.po', pass_list=['-expand-byval'])
chain.add(RunTranslate, 's', mode='-S')
cur_type = 's'
if cur_type == output_type:
return
# S -> s
if cur_type == 'S':
chain.add(RunCC, 's', mode='-E')
cur_type = 's'
if output_type == 'pp':
return
if cur_type == output_type:
return
# s -> o
if cur_type == 's' and output_type == 'o':
chain.add(RunNativeAS, 'o')
cur_type = 'o'
if cur_type == output_type:
return
Log.Fatal("Unable to compile .%s to .%s", input_type, output_type)
def get_help(argv):
tool = env.getone('SCRIPT_NAME')
if '--help-full' in argv:
# To get ${CC}, etc.
env.update(EXTRA_ENV)
code, stdout, stderr = Run('"${CC}" -help',
redirect_stdout=subprocess.PIPE,
redirect_stderr=subprocess.STDOUT,
errexit=False)
return stdout
else:
return """
This is a "GCC-compatible" driver using clang under the hood.
Usage: %s [options] <inputs> ...
BASIC OPTIONS:
-o <file> Output to <file>.
-E Only run the preprocessor.
-S Generate bitcode assembly.
-c Generate bitcode object.
-I <dir> Add header search path.
-L <dir> Add library search path.
-D<key>[=<val>] Add definition for the preprocessor.
-W<id> Toggle warning <id>.
-f<feature> Enable <feature>.
-Wl,<arg> Pass <arg> to the linker.
-Xlinker <arg> Pass <arg> to the linker.
-Wt,<arg> Pass <arg> to the translator.
-Xtranslator <arg> Pass <arg> to the translator.
-Wp,<arg> Pass <arg> to the preprocessor.
-Xpreprocessor,<arg> Pass <arg> to the preprocessor.
-x <language> Treat subsequent input files as having type <language>.
-static Produce a static executable (the default).
-Bstatic Link subsequent libraries statically.
-Bdynamic Link subsequent libraries dynamically.
-fPIC Ignored (only used by translator backend)
(accepted for compatibility).
-pipe Ignored (for compatibility).
-O<n> Optimation level <n>: 0, 1, 2, 3, 4 or s.
-g Generate complete debug information.
-gline-tables-only Generate debug line-information only
(allowing for stack traces).
-flimit-debug-info Generate limited debug information.
-save-temps Keep intermediate compilation results.
-v Verbose output / show commands.
-h | --help Show this help.
--help-full Show underlying clang driver's help message
(warning: not all options supported).
""" % (tool)
| bsd-3-clause |
ddsc/dikedata-api | dikedata_api/views.py | 1 | 55485 | # (c) Nelen & Schuurmans. MIT licensed, see LICENSE.rst.
from __future__ import unicode_literals
from datetime import datetime
import calendar
import json
import logging
import mimetypes
import numpy as np
import requests
import time
from django.contrib.auth.models import User, Group as Role
from django.core.exceptions import ValidationError
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.db.models import Sum
from django.http import Http404, HttpResponse
from django.db.models import Q
from rest_framework import exceptions as ex, generics
from rest_framework.parsers import JSONParser, FormParser
from rest_framework.renderers import JSONRenderer, BrowsableAPIRenderer
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.views import APIView
from rest_framework.pagination import PaginationSerializer
from rest_framework.exceptions import ParseError
from rest_framework import status
from rest_framework.request import clone_request
from haystack.query import SearchQuerySet
from lizard_security.models import (
DataSet, DataOwner, UserGroup, PermissionMapper
)
from ddsc_core.auth import PERMISSION_CHANGE
from ddsc_core.models import (Alarm, Alarm_Active, Alarm_Item, IdMapping,
Location, LogicalGroup, LogicalGroupEdge, Source,
Timeseries, Manufacturer, StatusCache)
from ddsc_core.models.aquo import Compartment
from ddsc_core.models.aquo import MeasuringDevice
from ddsc_core.models.aquo import MeasuringMethod
from ddsc_core.models.aquo import Parameter
from ddsc_core.models.aquo import ProcessingMethod
from ddsc_core.models.aquo import ReferenceFrame
from ddsc_core.models.aquo import Unit
from dikedata_api import mixins, serializers
from dikedata_api.parsers import CSVParser
from dikedata_api.douglas_peucker import decimate_until
from dikedata_api.renderers import CSVRenderer
from tslib.readers import ListReader
logger = logging.getLogger(__name__)
COLNAME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
COLNAME_FORMAT_MS = '%Y-%m-%dT%H:%M:%S.%fZ' # supports milliseconds
FILENAME_FORMAT = '%Y-%m-%dT%H.%M.%S.%fZ'
GEOSERVER_FORMAT = COLNAME_FORMAT # used in geoserver
mimetypes.init()
BOOL_LOOKUPS = ("isnull",)
INT_LOOKUPS = ("year", "month", "day", "week_day",)
STR_LOOKUPS = (
"contains", "icontains", "startswith", "istartswith", "endswith",
"iendswith", "search", "regex", "iregex",
)
ALL_LOOKUPS = BOOL_LOOKUPS + INT_LOOKUPS + STR_LOOKUPS + (
"exact", "iexact", "gt", "gte", "lt", "lte",
)
class InvalidKey(ParseError):
def __init__(self, key):
message = "Unknown field or lookup: %s." % key
super(ParseError, self).__init__(message)
def customfilter(view, qs, filter_json=None, order_field=None):
"""
Function for adding filters to queryset.
set 'customfilter_fields for allowed fields'
:param view: view (self)
:param qs: queryset
:param filter_json: raw_json of filter key. example is '{"name__contains": "bla", "uuid__startswith": "7"}'
:return:
"""
filter_fields = {}
if not view.customfilter_fields == '*':
for item in view.customfilter_fields:
if type(item) == tuple:
filter_fields[item[0]] = item[1]
else:
filter_fields[item] = item
exclude = False
if filter_json:
filter_dict = json.loads(filter_json)
else:
filter_dict = {}
for key, value in filter_dict.items():
#support for points in stead of double underscores
key = key.replace('.', '__')
#get key and lookup
possible_lookup = key.rsplit('__', 1)
if len(possible_lookup) == 2 and possible_lookup[1] in ALL_LOOKUPS:
key = possible_lookup[0]
lookup = possible_lookup[1]
else:
lookup = 'exact'
#check on include or exclude
if type(value) == unicode and value.startswith('#'):
exclude = True
value = value.lstrip('#')
#check if key is allowed
if key in filter_fields.keys():
key = filter_fields[key]
if value:
if exclude:
qs = qs.exclude(**{'%s__%s' % (key, lookup): value})
else:
qs = qs.filter(**{'%s__%s' % (key, lookup): value})
if order_field:
order_field = order_field.replace('.','__')
reverse = False
if order_field.startswith('-'):
reverse = True
order_field = order_field.lstrip('-')
if order_field in filter_fields:
order_field = filter_fields[order_field]
if reverse:
order_field = '-' + order_field
qs = qs.order_by(order_field)
return qs
def write_events(user, data):
if user is None:
raise ex.NotAuthenticated("User not logged in.")
reader = ListReader(data)
permission = True
locations = {}
series = {}
events = []
total = 0
for (uuid, df) in reader.get_series():
if uuid not in series:
try:
series[uuid] = Timeseries.objects.get(uuid=uuid)
except Timeseries.DoesNotExist:
map = IdMapping.objects.get(user__username=user, remote_id=uuid)
series[uuid] = map.timeseries
locations[series[uuid].location_id] = 1
events.append((uuid, df))
if not user.has_perm(PERMISSION_CHANGE, series[uuid]):
permission = False
if not permission:
raise ex.PermissionDenied("Permission denied")
for (uuid, df) in events:
series[uuid].set_events(df)
total += len(df)
series[uuid].save()
return total, len(series), len(locations)
def sanitize_filename(fn):
'''strips characters not allowed in a filename'''
# illegal characters in Windows and Linux filenames, such as slashes
filename_badchars = "<>:\"/\\|?*\0"
# build character translation table
filename_badchars_table = {ord(char): None for char in filename_badchars}
if isinstance(fn, unicode): # TODO remove for python 3
# strip characters like ":"
fn = fn.translate(filename_badchars_table)
# remove trailing space or period, which are not allowed in Windows
fn = fn.rstrip(". ")
else:
raise Exception("only unicode strings are supported")
return fn
class APIReadOnlyListView(mixins.BaseMixin, mixins.GetListModelMixin,
generics.MultipleObjectAPIView):
customfilter_fields = '*'
select_related = None
def get_queryset(self):
qs = self.model.objects
filter = self.request.QUERY_PARAMS.get('filter', None)
order = self.request.QUERY_PARAMS.get('order', None)
if filter or order:
qs = customfilter(self, qs, filter, order)
if self.select_related:
qs = qs.select_related(*self.select_related)
return qs.distinct()
class APIListView(mixins.PostListModelMixin, APIReadOnlyListView):
pass
class APIDetailView(mixins.BaseMixin, mixins.DetailModelMixin,
generics.SingleObjectAPIView):
select_related = None
def get_queryset(self):
qs = self.model.objects
if self.select_related:
qs = qs.select_related(*self.select_related)
return qs
class Aquo(APIReadOnlyListView):
#model = Parameter
#serializer_class = serializers.Aqu
customfilter_fields = ('id', 'code', 'description', 'visible')
class Parameter(Aquo):
model = Parameter
serializer_class = serializers.ParameterSerializer
customfilter_fields = ('id', 'code', 'description', 'group', 'visible')
class Compartment(Aquo):
model = Compartment
serializer_class = serializers.CompartmentSerializer
class MeasuringDevice(Aquo):
model = MeasuringDevice
serializer_class = serializers.MeasuringDeviceSerializer
class MeasuringMethod(Aquo):
model = MeasuringMethod
serializer_class = serializers.MeasuringMethodSerializer
class ProcessingMethod(Aquo):
model = ProcessingMethod
serializer_class = serializers.ProcessingMethodSerializer
class ReferenceFrame(Aquo):
model = ReferenceFrame
serializer_class = serializers.ReferenceFrameSerializer
class Unit(Aquo):
model = Unit
serializer_class = serializers.UnitSerializer
class ManufacturerList(APIListView):
model = Manufacturer
serializer_class = serializers.ManufacturerSerializer
customfilter_fields = ['code', 'name']
class UserList(mixins.ProtectedListModelMixin, APIReadOnlyListView):
model = User
serializer_class = serializers.UserListSerializer
class UserDetail(mixins.ProtectedDetailModelMixin, APIDetailView):
model = User
serializer_class = serializers.UserDetailSerializer
class UserGroupList(mixins.ProtectedListModelMixin, APIReadOnlyListView):
model = UserGroup
serializer_class = serializers.UserGroupListSerializer
class UserGroupDetail(mixins.ProtectedDetailModelMixin, APIDetailView):
model = UserGroup
serializer_class = serializers.UserGroupDetailSerializer
class PermissionMapperList(mixins.ProtectedListModelMixin, APIReadOnlyListView):
model = PermissionMapper
serializer_class = serializers.PermissionMapperSerializer
class PermissionMapperDetail(mixins.ProtectedDetailModelMixin, APIDetailView):
model = PermissionMapper
serializer_class = serializers.PermissionMapperSerializer
class RoleList(mixins.ProtectedListModelMixin, APIReadOnlyListView):
model = Role
serializer_class = serializers.RoleListSerializer
class RoleDetail(mixins.ProtectedDetailModelMixin, APIDetailView):
model = Role
serializer_class = serializers.RoleDetailSerializer
class DataSetList(APIListView):
model = DataSet
serializer_class = serializers.DataSetListSerializer
select_related = ['owner']
def get_queryset(self):
qs = super(DataSetList, self).get_queryset()
if not self.request.user.is_authenticated():
qs = self.model.objects.none()
elif self.request.user.is_superuser:
qs = qs
elif self.request.QUERY_PARAMS.get('management', False):
qs = qs.filter(owner__data_managers=self.request.user)
else:
qs = qs.filter(permission_mappers__user_group__members=self.request.user)
return qs
class DataSetDetail(APIDetailView):
model = DataSet
serializer_class = serializers.DataSetDetailSerializer
select_related = ['owner']
class DataOwnerList(APIListView):
model = DataOwner
serializer_class = serializers.DataOwnerListSerializer
def get_queryset(self):
qs = super(DataOwnerList, self).get_queryset()
if not self.request.user.is_authenticated():
qs = self.model.objects.none()
elif self.request.user.is_superuser:
qs = qs
elif self.request.QUERY_PARAMS.get('management', False):
qs = qs.filter(data_managers=self.request.user)
else:
qs.filter(dataset__in=DataSet.objects.filter(permission_mappers__user_group__members=self.request.user).distinct())
return qs.distinct()
class DataOwnerDetail(APIDetailView):
model = DataOwner
serializer_class = serializers.DataOwnerDetailSerializer
def get_queryset(self):
qs = super(DataOwnerDetail, self).get_queryset()
if not self.request.user.is_authenticated():
qs = self.model.objects.none()
return qs.distinct()
class LocationList(APIListView):
model = Location
serializer_class = serializers.LocationListSerializer
customfilter_fields = ('id', 'uuid', 'name', ('owner', 'owner__name'), 'point_geometry', 'show_on_map')
def get_queryset(self):
qs = super(LocationList, self).get_queryset()
if not self.request.user.is_authenticated():
# There is much wrong with the query below. It performs a LEFT
# OUTER JOIN, so will also return locations that don't have
# any timeseries, which is not very useful. Moreover, it
# does not return a unique set of Locations. Also, the
# timeseries endpoint is inaccessible to unauthenticated
# users, so even a correct version isn't of much use at
# the moment. For that reason, return an EmptyQuerySet.
##qs = self.model.objects.filter(timeseries__owner=None)
qs = self.model.objects.none()
elif self.request.user.is_superuser:
qs = qs
elif self.request.QUERY_PARAMS.get('management', False):
qs = qs.filter(Q(owner__data_managers=self.request.user)|Q(owner=None))
else:
qs = qs.filter(timeseries__data_set__in=DataSet.objects.filter(permission_mappers__user_group__members=self.request.user).distinct())
#special filters
kwargs = {}
parameter = self.request.QUERY_PARAMS.get('parameter', None)
if parameter:
kwargs['timeseries__parameter__in'] = parameter.split(',')
logicalgroup = self.request.QUERY_PARAMS.get('logicalgroup', None)
if logicalgroup:
kwargs['timeseries__logical_groups__in'] = logicalgroup.split(',')
has_geometry = self.request.QUERY_PARAMS.get('has_geometry', None)
if has_geometry == 'true':
kwargs['point_geometry__isnull'] = False
for_map = self.request.QUERY_PARAMS.get('for_map', None)
if for_map == 'true':
kwargs['show_on_map'] = True
return qs.filter(**kwargs).distinct()
class LocationDetail(APIDetailView):
model = Location
serializer_class = serializers.LocationDetailSerializer
slug_field = 'uuid'
slug_url_kwarg = 'uuid'
def get_queryset(self):
qs = super(LocationDetail, self).get_queryset()
if not self.request.user.is_authenticated():
qs = self.model.objects.filter(timeseries__owner=None)
elif self.request.user.is_superuser:
qs = qs
else:
qs = qs.filter(Q(timeseries__data_set__in=DataSet.objects.filter(permission_mappers__user_group__members=self.request.user).distinct())|
Q(owner__data_managers=self.request.user)|Q(owner=None))
return qs.distinct()
class LocationSearch(APIView):
'''
Hybrid response. Geocode and searchresult.
'''
def get(self, request):
if not self.request.user.is_authenticated():
qs = []
else:
query = self.request.QUERY_PARAMS.get('q', None)
sqs = Location.objects.filter(name__icontains=query)
qs = []
locations = []
for location in sqs:
if (location not in locations and
(location.owner == None or
self.request.user in location.owner.data_managers.all() or
self.request.user.is_superuser)):
location_json = serializers.LocationListSerializer(location).data
locations.append(location)
location_json['geocode'] = False
qs.append(location_json)
# geocoding.
geocode = requests.get('http://nominatim.openstreetmap.org/'
'search?viewbox=8.668,53.520716,2.07641601,50.8198&'
'bounded=1&addressdetails=0&'
'format=json&limit=1&q=' + str(query))
geocode = geocode.json()
if query:
for result in geocode:
result = {
'point_geometry': [
float(result['lon']),
float(result['lat'])
],
'id': '9999999',
'uuid': 'geocode',
'name': result['display_name'].split(',')[0],
'geocode': True
}
qs.append(result)
return Response(qs, status=status.HTTP_200_OK)
class TimeseriesList(APIListView):
model = Timeseries
serializer_class = serializers.TimeseriesListSerializer
customfilter_fields = ('id', 'uuid', 'name', ('location', 'location__name'), ('parameter', 'parameter__code',),
('unit', 'unit__code',), ('owner', 'owner__name',), ('source', 'source__name',))
select_related = ['location', 'parameter', 'unit', 'owner', 'source']
def get_queryset(self):
qs = super(TimeseriesList, self).get_queryset()
if not self.request.user.is_authenticated():
qs = self.model.objects.none()
elif self.request.user.is_superuser:
qs = qs
elif self.request.QUERY_PARAMS.get('management', False):
qs = qs.filter(Q(owner__data_managers=self.request.user)|Q(owner=None))
# Q(data_set__permission_mappers__in=
# PermissionMapper.objects.filter(permission_group__permissions__codename='change_timeseries',
# user_group__members=self.request.user)
# ))
else:
qs = qs.filter(data_set__in=DataSet.objects.filter(permission_mappers__user_group__members=self.request.user).distinct())
kwargs = {}
logicalgroup = self.request.QUERY_PARAMS.get('logicalgroup', None)
if logicalgroup:
kwargs['logical_groups__in'] = logicalgroup.split(',')
location = self.request.QUERY_PARAMS.get('location', None)
if location:
kwargs['location__uuid__in'] = location.split(',')
parameter = self.request.QUERY_PARAMS.get('parameter', None)
if parameter:
kwargs['parameter__in'] = parameter.split(',')
value_type = self.request.QUERY_PARAMS.get('value_type', None)
if value_type:
kwargs['value_type__in'] = value_type.split(',')
name = self.request.QUERY_PARAMS.get('name', None)
if name:
kwargs['name__icontains'] = name
source = self.request.QUERY_PARAMS.get('source', None)
if source:
kwargs['source__name__icontains'] = source
return qs.filter(**kwargs).distinct()
class TimeseriesDetail(APIDetailView):
model = Timeseries
serializer_class = serializers.TimeseriesDetailSerializer
slug_field = 'uuid'
slug_url_kwarg = 'uuid'
select_related = ['location', 'parameter', 'unit', 'source', 'owner', 'processing_method', 'measuring_method',
'measuring_device', 'compartment', 'reference_frame']
def get_queryset(self):
qs = super(TimeseriesDetail, self).get_queryset()
if not self.request.user.is_authenticated():
qs = self.model.objects.none()
elif self.request.user.is_superuser:
qs = qs
else:
qs = qs.filter(Q(data_set__in=DataSet.objects.filter(permission_mappers__user_group__members=self.request.user).distinct()) |
Q(owner__data_managers=self.request.user)|Q(owner=None))
return qs.distinct()
class TimeseriesBehind(TimeseriesList):
"""Return all timeseries that are late.
Frequency is in seconds. For example, a frequency of 86400 means that the
latest_value_timestamp of a timeseries should be no older than 1 day.
"""
def get_queryset(self):
qs = super(TimeseriesBehind, self).get_queryset()
# NB: without the filter on `frequency`, Django does not understand
# the `where` clause, because this column is in a related table.
qs = qs.filter(source__frequency__isnull=False).extra(where=[
"latest_value_timestamp < now() - frequency * INTERVAL '1'"])
return qs
class TimeseriesSearch(APIListView):
model = Timeseries
serializer_class = serializers.TimeseriesListSerializer
def get_queryset(self):
if not self.request.user.is_authenticated():
qs = []
else:
query = self.request.QUERY_PARAMS.get('q', None)
sqs = SearchQuerySet().models(Timeseries).filter(
content__startswith=query)
sqs = sqs.filter_or(location_name__startswith=query)
sqs = sqs.filter_or(name__startswith=query)
qs = []
for item in sqs:
timeseries = item.object
if (timeseries not in qs and
(timeseries.owner == None or
timeseries.owner == self.request.user)):
qs.append(timeseries)
return qs
class BaseEventView(mixins.BaseMixin, mixins.PostListModelMixin, APIView):
pass
class MultiEventList(BaseEventView):
parser_classes = JSONParser, FormParser, CSVParser
def post(self, request, uuid=None):
start = time.time()
serializer = serializers.MultiEventListSerializer(data=request.DATA)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
e, t, l = write_events(getattr(request, 'user', None), serializer.data)
headers = self.get_success_headers(serializer.data)
elapsed = (time.time() - start) * 1000
logger.info("POST: Wrote %d events for %d timeseries at %d locations " \
"in %d ms for user %s" %
(e, t, l, elapsed, getattr(request, 'user', None)))
return Response(serializer.data, status=201, headers=headers)
class EventList(BaseEventView):
renderer_classes = JSONRenderer, BrowsableAPIRenderer, CSVRenderer
def post(self, request, uuid=None):
start = time.time()
ts = Timeseries.objects.get(uuid=uuid)
if not request.user.has_perm(PERMISSION_CHANGE, ts):
raise ex.PermissionDenied('No change permission on timeseries')
if ts.is_file():
if not isinstance(request.META, dict):
raise ValidationError("Missing request header")
dt = request.META.get('HTTP_DATETIME', None)
if not dt:
raise ValidationError("Missing request header param")
try:
timestamp = datetime.strptime(dt, COLNAME_FORMAT)
except ValueError:
# use the alternative format
timestamp = datetime.strptime(dt, COLNAME_FORMAT_MS)
ts.set_file(timestamp, request.FILES)
data = {'datetime' : dt, 'value' : reverse('event-detail',
args=[uuid, dt], request=request)}
ts.save()
headers = self.get_success_headers(data)
return Response(data, status=201, headers=headers)
serializer = serializers.EventListSerializer(data=request.DATA)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
data = [{"uuid": uuid, "events": serializer.data}]
e, t, l = write_events(getattr(request, 'user', None), data)
headers = self.get_success_headers(serializer.data)
elapsed = (time.time() - start) * 1000
logger.info("POST: Wrote %d events for %d timeseries at %d locations " \
"in %d ms for user %s" %
(e, t, l, elapsed, getattr(request, 'user', None)))
return Response(serializer.data, status=201, headers=headers)
def get(self, request, uuid=None):
qs = Timeseries.objects
if not self.request.user.is_authenticated():
qs = Timeseries.objects.none()
elif self.request.user.is_superuser:
qs = Timeseries.objects
else:
qs = Timeseries.objects.filter(data_set__in=DataSet.objects.filter(permission_mappers__user_group__members=self.request.user).distinct()).distinct()
ts = qs.get(uuid=uuid)
headers = {}
# grab GET parameters
start = self.request.QUERY_PARAMS.get('start', None)
end = self.request.QUERY_PARAMS.get('end', None)
filter = self.request.QUERY_PARAMS.get('filter', None)
format = self.request.QUERY_PARAMS.get('format', None)
eventsformat = self.request.QUERY_PARAMS.get('eventsformat', None)
page_num = self.request.QUERY_PARAMS.get('page', 1)
combine_with = self.request.QUERY_PARAMS.get('combine_with', None)
ignore_rejected = self.request.QUERY_PARAMS.get('ignore_rejected', None)
# parse start and end date
if start is not None:
try:
start = datetime.strptime(start, COLNAME_FORMAT)
except ValueError:
# use the alternative format
start = datetime.strptime(start, COLNAME_FORMAT_MS)
if end is not None:
try:
end = datetime.strptime(end, COLNAME_FORMAT)
except ValueError:
# use the alternative format
end = datetime.strptime(end, COLNAME_FORMAT_MS)
if format == 'csv':
# in case of csv return a dataframe and let the renderer handle it
response = ts.get_events(start=start, end=end, filter=filter)
headers['Content-Disposition'] = "attachment; filename='%s-%s.csv'" \
% (uuid, sanitize_filename(ts.name))
elif eventsformat is None:
df = ts.get_events(start=start, end=end, filter=filter, ignore_rejected=ignore_rejected)
all = self.format_default(request, ts, df)
ps = generics.MultipleObjectAPIView(request=request)
page_size = ps.get_paginate_by(None)
if not page_size:
return Response(all)
paginator = Paginator(all, page_size)
try:
page = paginator.page(page_num)
except PageNotAnInteger:
page = paginator.page(1)
except EmptyPage:
page = paginator.page(paginator.num_pages)
context = {'request':request}
serializer = PaginationSerializer(instance=page, context=context)
response = serializer.data
elif eventsformat == 'flot' and combine_with is not None:
# scatterplot, pad to hourly frequency
other_ts = Timeseries.objects.get(uuid=combine_with)
# returns an object ready for a jQuery scatter plot
df_xaxis = ts.get_events(
start=start,
end=end,
filter=filter,
ignore_rejected=ignore_rejected).asfreq('1H', method='pad')
df_yaxis = other_ts.get_events(
start=start,
end=end,
filter=filter,
ignore_rejected=ignore_rejected).asfreq('1H', method='pad')
response = self.format_flot_scatter(request, df_xaxis, df_yaxis, ts, other_ts, start, end)
elif eventsformat == 'flot':
# only return in jQuery Flot compatible format when requested
timer_start = datetime.now()
df = ts.get_events(
start=start,
end=end,
filter=filter,
ignore_rejected=ignore_rejected)
timer_get_events = datetime.now() - timer_start
response = self.format_flot(request, ts, df, start, end, timer_get_events=timer_get_events)
if len(df) == 0:
# look at db for latest value
if ts.latest_value_timestamp is not None:
ts_start = ts.latest_value_timestamp - (end - start)
else:
ts_start = start
ts_end = ts.latest_value_timestamp
df = ts.get_events(
start=ts_start,
end=ts_end,
filter=filter,
ignore_rejected=ignore_rejected)
response = self.format_flot(request, ts, df, start, end)
return Response(data=response, headers=headers)
@staticmethod
def format_default(request, ts, df):
if (
ts.is_file() and ts.value_type ==
Timeseries.ValueType.GEO_REMOTE_SENSING
):
# GeoTIFFs are published as WMS via GeoServer. Our API cannot
# provide clients with a GetMap request URL here, because we
# don't have a bbox at this point. Return workspace:layer
# for convenience. We don't even need that for it can
# be deduced from datetime and/or value?
# The uuid not remote_id is used, because the latter
# is not guaranteed to be unique across suppliers.
layer = "{workspace}:{layer}_{{}}".format(
workspace="ddsc", layer=ts.uuid)
events = [
dict([
('datetime', timestamp.strftime(COLNAME_FORMAT_MS)),
('value', reverse('event-detail', args=[
ts.uuid, timestamp.strftime(FILENAME_FORMAT)],
request=request)),
('layer', layer.format(
timestamp.strftime(GEOSERVER_FORMAT)))
]) for timestamp, row in df.iterrows()
]
elif ts.is_file():
events = [
dict([
('datetime', timestamp.strftime(COLNAME_FORMAT_MS)),
('value', reverse('event-detail', args=[
ts.uuid, timestamp.strftime(FILENAME_FORMAT)],
request=request))
]) for timestamp, row in df.iterrows()
]
else:
events = [
dict(
[('datetime', timestamp.strftime(COLNAME_FORMAT_MS))] +
[(colname, row[i]) for i, colname in enumerate(df.columns)]
) for timestamp, row in df.iterrows()
]
return events
@staticmethod
def format_flot_scatter(request, df_xaxis, df_yaxis, ts, other_ts, start, end):
# Drop NaN values. (Recent pandas versions support inplace drop.)
df_xaxis = df_xaxis.dropna(subset=["value"])
df_yaxis = df_yaxis.dropna(subset=["value"])
if len(df_xaxis) > 0 and len(df_yaxis) > 0:
data = zip(df_xaxis['value'].values, df_yaxis['value'].values)
else:
data = []
line = {
'label': '{} vs. {}'.format(ts, other_ts),
'data': data,
# These are added to determine the axis which will be related
# to the graph line.
'axis_label_x': '{}, {} ({})'.format(
str(ts),
str(ts.parameter),
str(ts.unit)
),
'axis_label_y': '{}, {} ({})'.format(
str(other_ts),
str(other_ts.parameter),
str(other_ts.unit)
),
# These are used to reset the graph boundaries when the first
# line is plotted.
'xmin': None,
'xmax': None
}
return line
@staticmethod
def format_flot(request, ts, df, start=None, end=None, timer_get_events=None):
tolerance = request.QUERY_PARAMS.get('tolerance', None)
width = request.QUERY_PARAMS.get('width', None)
height = request.QUERY_PARAMS.get('height', None)
timer_to_js_timestamps = None
timer_douglas_peucker = None
timer_zip = None
# Drop NaN values. (Recent pandas versions support inplace drop.)
df = df.dropna(subset=["value"])
if len(df) > 0:
def to_js_timestamp(dt):
# Both are passed directly to Javascript's Date constructor.
# Older browsers only support the first, but we can drop support for them.
# So, just use the ISO 8601 format.
return float(calendar.timegm(dt.timetuple()) * 1000)
#return dt.strftime(COLNAME_FORMAT_MS)
# Add values to the response.
# Convert event dates to timestamps with milliseconds since epoch.
# TODO see if source timezone / display timezone are relevant
timer_start = datetime.now()
timestamps = [to_js_timestamp(dt) for dt in df.index]
# Decimate only operates on Numpy arrays, so convert our timestamps
# back to one.
timestamps = np.array(timestamps)
timer_to_js_timestamps = datetime.now() - timer_start
values = df['value'].values
# Decimate values (a.k.a. line simplification), using Ramer-Douglas-Peucker.
# Determine tolerance using either the provided value,
# or calculate it using width and height of the graph.
if tolerance is not None:
try:
tolerance = float(tolerance)
except ValueError:
tolerance = None
elif width is not None and height is not None:
# Assume graph scales with min and max of the entire range here.
# Otherwise we need to pass axes min/max as well.
# Disable horizontal tolerance for now.
'''
try:
width = float(width)
if start and end:
# use min and max of the actual requested graph range
tolerance_w_requested = (to_js_timestamp(end) - to_js_timestamp(start)) / width
else:
tolerance_w_requested = 0
# Check with min and max of the entire timeseries, and use
# whichever is higher.
# Timestamps are sorted, so we can just do this.
tolerance_w_possible = (timestamps[-1] - timestamps[0]) / width
tolerance_w = max(tolerance_w_requested, tolerance_w_possible)
except ValueError:
tolerance_w = None
'''
try:
height = float(height)
tolerance_h = (values.max() - values.min()) / height
except ValueError:
tolerance_h = None
# Just use vertical tolerance for now, until we have a better 2D solution.
tolerance = tolerance_h
# Apply the actual line simplification.
# Only possible on 2 or more values.
if tolerance is not None and len(df) > 1:
before = len(values)
timer_start = datetime.now()
timestamps, values = decimate_until(timestamps, values, tolerance)
timer_douglas_peucker = datetime.now() - timer_start
logger.debug('decimate: %s values left of %s, with tol = %s', len(values), before, tolerance)
timer_start = datetime.now()
data = zip(timestamps, values)
timer_zip = datetime.now() - timer_start
xmin = timestamps[-1] # timestamps is sorted
xmax = timestamps[0] # timestamps is sorted
else:
# No events, nothing to return.
data = []
xmin = None
xmax = None
line = {
'label': str(ts),
'data': data,
# These are added to determine the axis which will be related
# to the graph line.
'axis_label': '{} ({})'.format(str(ts.parameter), str(ts.unit)),
'parameter_pk': ts.parameter.pk,
# These are used to reset the graph boundaries when the first
# line is plotted.
'xmin': xmin,
'xmax': xmax,
'timer_get_events': str(timer_get_events),
'timer_to_js_timestamps': str(timer_to_js_timestamps),
'timer_douglas_peucker': str(timer_douglas_peucker),
'timer_zip': str(timer_zip),
}
return line
class EventDetail(BaseEventView):
def get(self, request, uuid=None, dt=None):
ts = Timeseries.objects.get(uuid=uuid)
if not ts.is_file():
raise MethodNotAllowed(
"Cannot GET single event detail of non-file timeseries.")
timestamp = datetime.strptime(dt, FILENAME_FORMAT)
(file_data, file_mime, file_size) = ts.get_file(timestamp)
response = HttpResponse(file_data, mimetype=file_mime)
if file_mime is not None:
response['Content-Type'] = file_mime
if (ts.value_type == Timeseries.ValueType.FILE):
file_ext = mimetypes.guess_extension(file_mime)
file_name = "%s-%s%s" % (ts.uuid, dt, file_ext)
response['Content-Disposition'] = 'attachment; filename=' + file_name
if (file_size > 0):
response['Content-Length'] = file_size
return response
class SourceList(APIListView):
model = Source
serializer_class = serializers.SourceListSerializer
customfilter_fields = ('id', 'uuid', 'name', ('owner', 'owner__name',),
('manufacturer', 'manufacturer__name',), 'details', 'frequency', 'timeout')
select_related = ['manufacturer', 'owner']
def get_queryset(self):
qs = super(SourceList, self).get_queryset()
if not self.request.user.is_authenticated():
qs = self.model.objects.none()
elif self.request.user.is_superuser:
qs = qs
elif self.request.QUERY_PARAMS.get('management', False):
qs = qs.filter(Q(owner__data_managers=self.request.user)|Q(owner=None))
else:
qs = qs.filter(timeseries__data_set__in=DataSet.objects.filter(permission_mappers__user_group__members=self.request.user).distinct())
return qs.distinct()
class SourceDetail(APIDetailView):
model = Source
serializer_class = serializers.SourceDetailSerializer
slug_field = 'uuid'
slug_url_kwarg = 'uuid'
select_related = ['manufacturer', 'owner']
def get_queryset(self):
qs = super(SourceDetail, self).get_queryset()
if not self.request.user.is_authenticated():
qs = self.model.objects.none()
elif self.request.user.is_superuser:
qs = qs
else:
qs = qs.filter(Q(timeseries__data_set__in=DataSet.objects.filter(permission_mappers__user_group__members=self.request.user).distinct())|
Q(owner__data_managers=self.request.user)|Q(owner=None))
return qs.distinct()
class LogicalGroupList(APIListView):
model = LogicalGroup
serializer_class = serializers.LogicalGroupListSerializer
select_related = ['owner', 'parents']
def get_queryset(self):
qs = super(LogicalGroupList, self).get_queryset()
if not self.request.user.is_authenticated():
qs = self.model.objects.none()
elif self.request.user.is_superuser:
qs = qs
elif self.request.QUERY_PARAMS.get('management', False):
qs = qs.filter(owner__data_managers=self.request.user)
else:
qs = qs.filter(owner__dataset__in=DataSet.objects.filter(permission_mappers__user_group__members=self.request.user).distinct())
#special filters
kwargs = {}
location = self.request.QUERY_PARAMS.get('location', None)
if location:
kwargs['timeseries__location__uuid__in'] = location.split(',')
parameter = self.request.QUERY_PARAMS.get('parameter', None)
if parameter:
kwargs['timeseries__parameter__in'] = parameter.split(',')
return qs.filter(**kwargs).distinct()
def post_save(self, obj, created=True):
"""
custom function for saving many2manuy relation to self
This save method is not transaction save and without validation on m2m parent relation.
Django Restframework acts strange with 2 coonections to same model, so model instance is crated directly.
"""
cur_parent_links = dict([(item.parent.id, item) for item in obj.parents.all()])
req_parent_links = self.request.DATA.getlist('parents')
for item in req_parent_links:
item = json.loads(item)
if item['parent'] in cur_parent_links and not self.request.method == 'POST':
del cur_parent_links[item['parent']]
elif 'parent' in item and item['parent'] is not None:
#create item
print 'create link'
item['child'] = obj
item['parent'] = LogicalGroup.objects.get(pk=item['parent'])
parent_link = LogicalGroupEdge(**item)
#todo: validation
#errors = parent_link.errors
parent_link.save()
#delete the leftovers
for item in cur_parent_links.values():
item.delete()
class LogicalGroupDetail(APIDetailView):
model = LogicalGroup
serializer_class = serializers.LogicalGroupDetailSerializer
select_related = ['owner', 'parents']
def get_queryset(self):
qs = super(LogicalGroupDetail, self).get_queryset()
if not self.request.user.is_authenticated():
qs = self.model.objects.none()
elif self.request.user.is_superuser:
qs = qs
else:
qs = qs.filter(Q(owner__dataset__in=DataSet.objects.filter(permission_mappers__user_group__members=self.request.user).distinct())|
Q(owner__data_managers=self.request.user))
return qs.distinct()
def post_save(self, obj, created=True):
"""
custom function for saving many2manuy relation to self
This save method is not transaction save and without validation on m2m parent relation.
Django Restframework acts strange with 2 coonections to same model, so model instance is crated directly.
"""
cur_parent_links = dict([(item.parent.id, item) for item in obj.parents.all()])
req_parent_links = self.request.DATA.getlist('parents')
for item in req_parent_links:
item = json.loads(item)
if item['parent'] in cur_parent_links and not self.request.method == 'POST':
del cur_parent_links[item['parent']]
elif 'parent' in item and item['parent'] is not None:
#create item
print 'create link'
item['child'] = obj
item['parent'] = LogicalGroup.objects.get(pk=item['parent'])
parent_link = LogicalGroupEdge(**item)
#todo: validation
#errors = parent_link.errors
parent_link.save()
#delete the leftovers
for item in cur_parent_links.values():
item.delete()
class AlarmActiveList(APIListView):
model = Alarm_Active
serializer_class = serializers.Alarm_ActiveListSerializer
select_related = ['alarm']
def get_queryset(self):
qs = super(AlarmActiveList, self).get_queryset()
if not self.request.QUERY_PARAMS.get('all', False):
#only return active alarms
qs = qs.filter(active=True)
if self.request.user.is_superuser:
return qs
else:
return qs.filter(alarm__object_id=self.request.user.id).distinct()
class AlarmActiveDetail(APIDetailView):
model = Alarm_Active
serializer_class = serializers.Alarm_ActiveDetailSerializer
select_related = ['alarm']
def get_queryset(self):
qs = super(AlarmActiveDetail, self).get_queryset()
if self.request.user.is_superuser:
return qs
else:
return qs.filter(alarm__object_id=self.request.user.id).distinct()
class AlarmSettingList(APIListView):
model = Alarm
serializer_class = serializers.AlarmSettingListSerializer
def get_queryset(self):
qs = super(AlarmSettingList, self).get_queryset()
if not self.request.user.is_authenticated():
return self.model.objects.none()
elif self.request.user.is_superuser:
return qs
else:
return qs.filter(object_id=self.request.user.id).distinct()
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.DATA, files=request.FILES)
if serializer.is_valid():
if self.pre_save(serializer.object):
self.object = serializer.save(force_insert=True)
self.post_save(self.object, created=True)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED,
headers=headers)
else:
return Response(self.errors, status=status.HTTP_400_BAD_REQUEST)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def pre_save(self, obj):
if obj.object_id is None:
obj.content_object = self.request.user
self.update_alarm_items = []
self.create_alarm_items = []
self.delete_alarm_items = []
errors = []
error = False
cur_alarm_items = dict([(item.id, item) for item in obj.alarm_item_set.all()])
req_alarm_items = self.request.DATA.getlist('alarm_item_set')
for item in req_alarm_items:
item = json.loads(item)
if self.request.method == 'POST' or not 'id' in item or item['id'] is None:
#create item
item['alarm_id'] = obj.id
alarm_item = serializers.AlarmItemDetailSerializer(None, data=item)
if alarm_item.is_valid():
self.create_alarm_items.append(alarm_item)
else:
errors.append(alarm_item.errors)
error = True
elif item['id'] in cur_alarm_items:
#update
cur_item = cur_alarm_items[item['id']]
alarm_item = serializers.AlarmItemDetailSerializer(cur_item, data=item)
if alarm_item.is_valid():
self.update_alarm_items.append(alarm_item)
else:
errors.append(alarm_item.errors)
error = True
del cur_alarm_items[item['id']]
#delete the leftovers
for alarm_item in cur_alarm_items.values():
self.delete_alarm_items.append(alarm_item)
if error:
self.errors = {'alarm_item_set': errors}
return False
else:
return True
def post_save(self, obj, created=True):
"""
custom function for saving nested alarm items
This save method is not transaction save and without validation on the alarm_items.
Please refactor this function when write support is added to django rest framework
(work in progress at this moment)
"""
for item in self.update_alarm_items:
item.save()
for item in self.create_alarm_items:
item.object.alarm = obj
item.save()
for item in self.delete_alarm_items:
item.delete()
class AlarmSettingDetail(APIDetailView):
model = Alarm
serializer_class = serializers.AlarmSettingDetailSerializer
select_related = ['alarm_item_set', 'alarm_item_set__alarm_type'] #todo: this doesn't work, find other way
def get_queryset(self):
qs = super(AlarmSettingDetail, self).get_queryset()
if not self.request.user.is_authenticated():
return self.model.objects.none()
elif self.request.user.is_superuser:
return qs
else:
return qs.filter(object_id=self.request.user.id).distinct()
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
self.object = None
try:
self.object = self.get_object()
except Http404:
# If this is a PUT-as-create operation, we need to ensure that
# we have relevant permissions, as if this was a POST request.
self.check_permissions(clone_request(request, 'POST'))
created = True
save_kwargs = {'force_insert': True}
success_status_code = status.HTTP_201_CREATED
else:
created = False
save_kwargs = {'force_update': True}
success_status_code = status.HTTP_200_OK
serializer = self.get_serializer(self.object, data=request.DATA,
files=request.FILES, partial=partial)
if serializer.is_valid():
if self.pre_save(serializer.object):
self.object = serializer.save(**save_kwargs)
self.post_save(self.object, created=created)
return Response(serializer.data, status=success_status_code)
else:
return Response(self.errors, status=status.HTTP_400_BAD_REQUEST)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def pre_save(self, obj):
if obj.object_id is None:
obj.content_object = self.request.user
self.update_alarm_items = []
self.create_alarm_items = []
self.delete_alarm_items = []
errors = []
error = False
cur_alarm_items = dict([(item.id, item) for item in obj.alarm_item_set.all()])
req_alarm_items = self.request.DATA.getlist('alarm_item_set')
for item in req_alarm_items:
item = json.loads(item)
if self.request.method == 'POST' or not 'id' in item or item['id'] is None:
#create item
item['alarm_id'] = obj.id
alarm_item = serializers.AlarmItemDetailSerializer(None, data=item)
if alarm_item.is_valid():
self.create_alarm_items.append(alarm_item)
else:
errors.append(alarm_item.errors)
error = True
elif item['id'] in cur_alarm_items:
#update
cur_item = cur_alarm_items[item['id']]
alarm_item = serializers.AlarmItemDetailSerializer(cur_item, data=item)
if alarm_item.is_valid():
self.update_alarm_items.append(alarm_item)
else:
errors.append(alarm_item.errors)
error = True
del cur_alarm_items[item['id']]
#delete the leftovers
for alarm_item in cur_alarm_items.values():
self.delete_alarm_items.append(alarm_item)
if error:
self.errors = {'alarm_item_set': errors}
return False
else:
return True
def post_save(self, obj, created=True):
"""
custom function for saving nested alarm items
This save method is not transaction save and without validation on the alarm_items.
Please refactor this function when write support is added to django rest framework
(work in progress at this moment)
"""
for item in self.update_alarm_items:
item.save()
for item in self.create_alarm_items:
item.object.alarm = obj
item.save()
for item in self.delete_alarm_items:
item.delete()
class AlarmItemDetail(APIDetailView):
model = Alarm_Item
serializer_class = serializers.AlarmItemDetailSerializer
class StatusCacheList(APIListView):
model = StatusCache
serializer_class = serializers.StatusCacheListSerializer
customfilter_fields = ('id', 'timeseries__name', ('timeseries__parameter', 'timeseries__parameter__code'),
'nr_of_measurements_total', 'nr_of_measurements_reliable', 'nr_of_measurements_doubtful',
'nr_of_measurements_unreliable', 'min_val', 'max_val', 'mean_val', 'std_val', 'status_date')
select_related = ['timeseries', 'timeseries__parameter']
def get_queryset(self):
qs = super(StatusCacheList, self).get_queryset()
if not self.request.user.is_authenticated():
qs = self.model.objects.none()
elif self.request.user.is_superuser:
qs = qs
else:
qs = qs.filter(timeseries__data_set__in=DataSet.objects.filter(permission_mappers__user_group__members=self.request.user).distinct())
return qs.distinct()
class StatusCacheDetail(APIDetailView):
model = StatusCache
serializer_class = serializers.StatusCacheDetailSerializer
customfilter_fields = ('id', 'timeseries__name', ('timeseries__parameter', 'timeseries__parameter__code'),
'nr_of_measurements_total', 'nr_of_measurements_reliable', 'nr_of_measurements_doubtful',
'nr_of_measurements_unreliable', 'min_val', 'max_val', 'mean_val', 'std_val', 'status_date')
select_related = ['timeseries', 'timeseries__parameter']
def get_queryset(self):
qs = super(StatusCacheDetail, self).get_queryset()
if not self.request.user.is_authenticated():
qs = self.model.objects.none()
elif self.request.user.is_superuser:
qs = qs
else:
qs = qs.filter(timeseries__data_set__in=DataSet.objects.filter(permission_mappers__user_group__members=self.request.user).distinct())
return qs
class Summary(APIReadOnlyListView):
def get(self, request, uuid=None):
if not request.user.is_authenticated():
total = 0
disrupted_timeseries = 0
active_alarms = 0
new_events = 0
else:
ts_manager = Timeseries.objects
aa_manager = Alarm_Active.objects
sc_manager = StatusCache.objects
if not request.user.is_superuser:
ts_manager = ts_manager.filter(data_set__in=DataSet.objects.filter(permission_mappers__user_group__members=request.user))
aa_manager = aa_manager.filter(alarm__object_id=request.user.id)
sc_manager = sc_manager.filter(timeseries__data_set__in=DataSet.objects.filter(permission_mappers__user_group__members=request.user))
total = ts_manager.count()
disrupted_timeseries = ts_manager.values('source__frequency').extra(
where=["latest_value_timestamp < now() - ddsc_core_source" \
".frequency * INTERVAL '1 SECOND'"]).count()
active_alarms = aa_manager.filter(active=True).count()
status = sc_manager.values('date') \
.annotate((Sum('nr_of_measurements_total'))) \
.order_by('-date')[:1]
if len(status) > 0 and 'nr_of_measurements_total__sum' in status[0]:
new_events = status[0]['nr_of_measurements_total__sum']
else:
new_events = 0
data = {
'timeseries' : {
'total' : total,
'disrupted' : disrupted_timeseries,
},
'alarms' : {
'active' : active_alarms,
},
'events' : {
'new' : new_events if new_events else 0,
}
}
return Response(data=data)
| mit |
alex-bauer/kelvin-power-challenge | src/modeling/models.py | 1 | 11023 | import operator
import sys
sys.path.append("../")
import numpy as np
import xgboost as xgb
from keras import callbacks
from keras.layers import TimeDistributed
from keras.layers.advanced_activations import PReLU
from keras.layers.normalization import BatchNormalization
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, TimeDistributedDense, Flatten
from keras.layers.recurrent import LSTM
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor
from sklearn.preprocessing import StandardScaler
from utils.utils import *
def slidingWindow(X, windowSize=10, numWindows=-1):
if numWindows == -1:
numWindows = len(X)
print("Generating %d windows" % (numWindows/windowSize))
i = 0
while i < numWindows:
yield list(X.iloc[i:i + windowSize].values)
i += windowSize
def train_predict_reg(X_train, y_train, X_val, params, weights=None):
if weights is None:
weights = np.ones(len(y_train))
features = list(X_train.columns)
X_val = X_val.copy()
X_train = X_train.copy()
if 'reg_skl_etr' in params["model"]:
X_train = X_train.fillna(-999.0)
X_val = X_val.fillna(-999.0)
X_train = X_train.replace(-np.inf, -10000)
X_train = X_train.replace(np.inf, 10000)
X_val = X_val.replace(-np.inf, -10000)
X_val = X_val.replace(np.inf, 10000)
clf = ExtraTreesRegressor(n_estimators=int(params['n_estimators']),
min_samples_leaf=max(1, int(params['min_samples_leaf'])),
max_features=params['max_features'],
max_depth=None if not params.has_key('max_depth') else int(params['max_depth']),
random_state=params['random_state'], n_jobs=params['n_jobs'])
clf.fit(X_train, y_train)
features = list(X_train.columns)
print sorted(zip(features, clf.feature_importances_), key=lambda x: x[1], reverse=True)
y_val_prob = clf.predict(X_val)
return clf, y_val_prob, None
if 'reg_skl_rfr' in params["model"]:
X_train = X_train.fillna(-999.0)
X_val = X_val.fillna(-999.0)
X_train = X_train.replace(-np.inf, -10000)
X_train = X_train.replace(np.inf, 10000)
X_val = X_val.replace(-np.inf, -10000)
X_val = X_val.replace(np.inf, 10000)
clf = RandomForestRegressor(n_estimators=int(params['n_estimators']),
min_samples_leaf=max(1, int(params['min_samples_leaf'])),
max_features=params['max_features'],
max_depth=None if not params.has_key('max_depth') else int(params['max_depth']),
random_state=params['random_state'], n_jobs=params['n_jobs'])
clf.fit(X_train, y_train)
features = list(X_train.columns)
print sorted(zip(features, clf.feature_importances_), key=lambda x: x[1], reverse=True)
y_val_prob = clf.predict(X_val)
return clf, y_val_prob, None
if params["model"] == 'reg_keras_dnn':
X_train = X_train.replace([np.inf, -np.inf], np.nan)
X_val = X_val.replace([np.inf, -np.inf], np.nan)
X_train = X_train.fillna(X_train.mean())
X_val = X_val.fillna(X_val.mean())
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_val = scaler.transform(X_val)
y_scaler = StandardScaler(with_std=False)
y_train = y_scaler.fit_transform(y_train)
model = Sequential()
# ## input layer
model.add(Dropout(params["input_dropout"], input_shape=[X_train.shape[1]]))
hidden_layers = params['hidden_layers']
units = params["hidden_units"]
while hidden_layers > 0:
model.add(Dense(units, init='glorot_uniform'))
if params["batch_norm"]:
model.add(BatchNormalization())
if params["hidden_activation"] == "prelu":
model.add(PReLU())
else:
model.add(Activation(params['hidden_activation']))
model.add(Dropout(params["hidden_dropout"]))
hidden_layers -= 1
model.add(Dense(33, init='glorot_uniform', activation='sigmoid'))
# ## output layer
model.add(Dense(33, init='glorot_uniform', activation='linear'))
model.compile(loss='mean_squared_error', optimizer='adam')
## to array
X_train_ndarray = X_train
y_train_ndarray = y_train
X_val_ndarray = X_val
X_es_train, X_es_eval, y_es_train, y_es_eval = train_test_split(X_train, y_train,
test_size=0.1,
random_state=0)
if params['early_stopping']:
earlyStopping = callbacks.EarlyStopping(monitor='val_loss', patience=5, verbose=2, mode='auto')
## train
model.fit(X_es_train, y_es_train,
nb_epoch=params['nb_epoch'], batch_size=params['batch_size'], callbacks=[earlyStopping],
validation_data=[X_es_eval, y_es_eval], verbose=2)
else:
model.fit(X_train_ndarray, y_train_ndarray,
nb_epoch=params['nb_epoch'], batch_size=params['batch_size'],
validation_split=0.1, verbose=2)
##prediction
pred = model.predict(X_val_ndarray, verbose=0)
pred = y_scaler.inverse_transform(pred)
return model, pred, None
if params["model"] == 'reg_keras_lstm':
scaler = StandardScaler()
X_train = pd.DataFrame(scaler.fit_transform((X_train.fillna(0).values)), columns=X_train.columns,
index=X_train.index)
X_val = pd.DataFrame(scaler.transform(X_val.fillna(0).values), columns=X_val.columns, index=X_val.index)
num_units = params["hidden_units"]
sequence_length = params['sequence_length']
input_dim = X_train.shape[1]
output_dim = y_train.shape[1]
batch_size = params['batch_size']
backwards = params['backwards'] if 'backwards' in params else False
print "SPECS:"
print " num_units (LSTM)", num_units
print " sequence_length", sequence_length
print " input_dim (X)", input_dim
print " output_dim (Y)", output_dim
print " batch_size", batch_size
print "X_train len", len(X_train)
start = len(X_train.index) % (batch_size * sequence_length)
X_train_Window_Generator = slidingWindow(X_train.iloc[start:], sequence_length) # , 10, 1)
Y_train_Window_Generator = slidingWindow(y_train.iloc[start:], sequence_length) # , 10, 1)
print('Build model...')
model = Sequential()
model.add(LSTM(num_units, batch_input_shape=(batch_size, sequence_length, input_dim), return_sequences=True,
stateful=True, go_backwards=backwards))
if "2-lstm" in params:
model.add(TimeDistributed(Dense(num_units, activation='relu')))
model.add(LSTM(num_units, batch_input_shape=(batch_size, sequence_length, input_dim), return_sequences=True,
stateful=True, go_backwards=backwards))
model.add(TimeDistributed(Dense(num_units, activation='relu')))
model.add(Dropout(params['hidden_dropout']))
model.add(TimeDistributed(Dense(32, activation='sigmoid')))
model.add(TimeDistributed(Dense(output_dim, activation='linear')))
model.compile(loss='mse', optimizer='rmsprop')
X_seq = list(X_train_Window_Generator)
Y_seq = list(Y_train_Window_Generator)
if backwards:
X_seq.reverse()
Y_seq.reverse()
model.fit(X_seq, Y_seq,
batch_size=batch_size,
verbose=1,
nb_epoch=params['nb_epoch'],
shuffle=False)
model = model
batch_size = params['batch_size']
sequence_length = params['sequence_length']
# merge the train and the test
X_merged = pd.concat([X_train, X_val])
print len(X_merged.index)
start = len(X_merged.index) % (batch_size * sequence_length)
X_train_Window_Generator = slidingWindow(X_merged.iloc[start:], sequence_length) # , 10, 1)
dataX = list(X_train_Window_Generator)
if backwards:
dataX.reverse()
Y_hat = model.predict(dataX, batch_size=batch_size, verbose=1)
# now get the tail of Y_hat
Y_hat1 = np.vstack(Y_hat)
if backwards:
Y_hat1=Y_hat1[::-1,:]
res = Y_hat1[-len(X_val.index):, :]
return None, res, None
if params["model"] in ['reg_xgb_tree']:
X_trainsets = []
y_train_sets = []
X_testssets = []
for ix, col in enumerate(config.target_cols):
X_train_col = X_train.copy()
X_test_col = X_val.copy()
X_train_col['out'] = ix
X_test_col['out'] = ix
X_testssets.append(X_test_col)
X_trainsets.append(X_train_col)
y_train_sets.append(y_train[col].values)
X_train = pd.concat(X_trainsets)
X_val = pd.concat(X_testssets)
y_train = np.concatenate(y_train_sets)
X_train_xgb = X_train.fillna(-999.0)
X_val_xgb = X_val.fillna(-999.0)
params['num_round'] = max(params['num_round'], 10)
params['nthread'] = params['n_jobs']
params['seed'] = params['random_state']
X_es_train, X_es_eval, y_es_train, y_es_eval = train_test_split(X_train_xgb, y_train,
test_size=0.2,
random_state=0)
dvalid_base = xgb.DMatrix(X_es_eval, label=y_es_eval, feature_names=list(X_es_eval.columns))
dtrain_base = xgb.DMatrix(X_es_train, label=y_es_train,
feature_names=list(X_es_eval.columns))
dtest_base = xgb.DMatrix(X_val_xgb, feature_names=list(X_es_eval.columns))
watchlist = [(dtrain_base, 'train'), (dvalid_base, 'valid')]
if params['early_stopping'] == True:
model = xgb.train(params, dtrain_base, int(params['num_round']), watchlist, early_stopping_rounds=20)
else:
model = xgb.train(params, dtrain_base, int(params['num_round']), watchlist)
importance = model.get_fscore()
importance = sorted(importance.items(), key=operator.itemgetter(1))
print importance
y_val_prob = model.predict(dtest_base)
y_val_prob = y_val_prob.reshape((len(config.target_cols), -1)).T
y_train_prob = None
return model, y_val_prob, y_train_prob
| mit |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/io/clipboard.py | 7 | 3685 | """ io on the clipboard """
from pandas import compat, get_option, option_context, DataFrame
from pandas.compat import StringIO, PY2
def read_clipboard(**kwargs): # pragma: no cover
"""
Read text from clipboard and pass to read_table. See read_table for the
full argument list
If unspecified, `sep` defaults to '\s+'
Returns
-------
parsed : DataFrame
"""
encoding = kwargs.pop('encoding', 'utf-8')
# only utf-8 is valid for passed value because that's what clipboard
# supports
if encoding is not None and encoding.lower().replace('-', '') != 'utf8':
raise NotImplementedError(
'reading from clipboard only supports utf-8 encoding')
from pandas.util.clipboard import clipboard_get
from pandas.io.parsers import read_table
text = clipboard_get()
# try to decode (if needed on PY3)
# Strange. linux py33 doesn't complain, win py33 does
if compat.PY3:
try:
text = compat.bytes_to_str(
text, encoding=(kwargs.get('encoding') or
get_option('display.encoding'))
)
except:
pass
# Excel copies into clipboard with \t seperation
# inspect no more then the 10 first lines, if they
# all contain an equal number (>0) of tabs, infer
# that this came from excel and set 'sep' accordingly
lines = text[:10000].split('\n')[:-1][:10]
# Need to remove leading white space, since read_table
# accepts:
# a b
# 0 1 2
# 1 3 4
counts = set([x.lstrip().count('\t') for x in lines])
if len(lines) > 1 and len(counts) == 1 and counts.pop() != 0:
kwargs['sep'] = '\t'
if kwargs.get('sep') is None and kwargs.get('delim_whitespace') is None:
kwargs['sep'] = '\s+'
return read_table(StringIO(text), **kwargs)
def to_clipboard(obj, excel=None, sep=None, **kwargs): # pragma: no cover
"""
Attempt to write text representation of object to the system clipboard
The clipboard can be then pasted into Excel for example.
Parameters
----------
obj : the object to write to the clipboard
excel : boolean, defaults to True
if True, use the provided separator, writing in a csv
format for allowing easy pasting into excel.
if False, write a string representation of the object
to the clipboard
sep : optional, defaults to tab
other keywords are passed to to_csv
Notes
-----
Requirements for your platform
- Linux: xclip, or xsel (with gtk or PyQt4 modules)
- Windows:
- OS X:
"""
encoding = kwargs.pop('encoding', 'utf-8')
# testing if an invalid encoding is passed to clipboard
if encoding is not None and encoding.lower().replace('-', '') != 'utf8':
raise ValueError('clipboard only supports utf-8 encoding')
from pandas.util.clipboard import clipboard_set
if excel is None:
excel = True
if excel:
try:
if sep is None:
sep = '\t'
buf = StringIO()
# clipboard_set (pyperclip) expects unicode
obj.to_csv(buf, sep=sep, encoding='utf-8', **kwargs)
text = buf.getvalue()
if PY2:
text = text.decode('utf-8')
clipboard_set(text)
return
except:
pass
if isinstance(obj, DataFrame):
# str(df) has various unhelpful defaults, like truncation
with option_context('display.max_colwidth', 999999):
objstr = obj.to_string(**kwargs)
else:
objstr = str(obj)
clipboard_set(objstr)
| gpl-3.0 |
houghb/HDSAviz | doc/conf.py | 2 | 9918 | # -*- coding: utf-8 -*-
#
# savvy documentation build configuration file, created by
# sphinx-quickstart on Tue Mar 15 11:10:06 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import mock
MOCK_MODULES = ['bokeh', 'models.widgets', 'bokeh.models', 'Panel', 'Tabs',
'widgets', 'bokeh.models.widgets', 'show', 'bokeh.plotting',
'bokeh.charts', 'Bar', 'SALib', 'numpy', 'pandas',
'hashtable', 'tslib', 'lib', 'graph_tool', 'matplotlib',
'ipywidgets', 'IPython.html.widgets', 'html.widgets',
'IPython.html', 'html', 'interact', 'fixed',
'BoundedFloatText', 'IntText', 'Checkbox',
'SelectMultiple', 'IPython', ]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../savvy'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'savvy'
copyright = u'2016, houghb, cdf6gc, swapilpaliwal'
author = u'houghb, cdf6gc, swapilpaliwal'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'savvydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'savvy.tex', u'savvy Documentation',
u'houghb, cdf6gc, swapilpaliwal', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'savvy', u'savvy Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'savvy', u'savvy Documentation',
author, 'savvy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| bsd-2-clause |
mne-tools/mne-tools.github.io | stable/_downloads/360a4576e24cd4f883ecd5893fe93cfd/decoding_unsupervised_spatial_filter.py | 29 | 2496 | """
==================================================================
Analysis of evoked response using ICA and PCA reduction techniques
==================================================================
This example computes PCA and ICA of evoked or epochs data. Then the
PCA / ICA components, a.k.a. spatial filters, are used to transform
the channel data to new sources / virtual channels. The output is
visualized on the average of all the epochs.
"""
# Authors: Jean-Remi King <jeanremi.king@gmail.com>
# Asish Panda <asishrocks95@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.decoding import UnsupervisedSpatialFilter
from sklearn.decomposition import PCA, FastICA
print(__doc__)
# Preprocess data
data_path = sample.data_path()
# Load and filter data, set up epochs
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.1, 0.3
event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 20, fir_design='firwin')
events = mne.read_events(event_fname)
picks = mne.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
exclude='bads')
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=False,
picks=picks, baseline=None, preload=True,
verbose=False)
X = epochs.get_data()
##############################################################################
# Transform data with PCA computed on the average ie evoked response
pca = UnsupervisedSpatialFilter(PCA(30), average=False)
pca_data = pca.fit_transform(X)
ev = mne.EvokedArray(np.mean(pca_data, axis=0),
mne.create_info(30, epochs.info['sfreq'],
ch_types='eeg'), tmin=tmin)
ev.plot(show=False, window_title="PCA", time_unit='s')
##############################################################################
# Transform data with ICA computed on the raw epochs (no averaging)
ica = UnsupervisedSpatialFilter(FastICA(30), average=False)
ica_data = ica.fit_transform(X)
ev1 = mne.EvokedArray(np.mean(ica_data, axis=0),
mne.create_info(30, epochs.info['sfreq'],
ch_types='eeg'), tmin=tmin)
ev1.plot(show=False, window_title='ICA', time_unit='s')
plt.show()
| bsd-3-clause |
APPIAN-PET/APPIAN | .backup/conf.py | 1 | 6007 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = u'APPIAN'
copyright = u'2018, Thomas Funck, Kevin Larcher'
author = u'Thomas Funck <tffunck@gmail.com>, Kevin Larcher'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u'0.1.1-alpha'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'aafigure.sphinxext'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'APPIANdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'APPIAN.tex', u'APPIAN Documentation',
u'Thomas Funck, Kevin Larcher', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'appian', u'APPIAN Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'APPIAN', u'APPIAN Documentation',
author, 'APPIAN', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Module mocking ----------------------------------------------------------
autodoc_mock_imports = ["traits_extension", "nipype", "nibabel", "pyminc", "pandas", "matplotlib", "numpy", "scipy", "sklearn", "seaborn", "minc" ]
| mit |
Hiyorimi/scikit-image | doc/examples/segmentation/plot_threshold_adaptive.py | 5 | 1307 | """
=====================
Adaptive Thresholding
=====================
Thresholding is the simplest way to segment objects from a background. If that
background is relatively uniform, then you can use a global threshold value to
binarize the image by pixel-intensity. If there's large variation in the
background intensity, however, adaptive thresholding (a.k.a. local or dynamic
thresholding) may produce better results.
Here, we binarize an image using the `threshold_adaptive` function, which
calculates thresholds in regions of size `block_size` surrounding each pixel
(i.e. local neighborhoods). Each threshold value is the weighted mean of the
local neighborhood minus an offset value.
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage.filters import threshold_otsu, threshold_adaptive
image = data.page()
global_thresh = threshold_otsu(image)
binary_global = image > global_thresh
block_size = 35
binary_adaptive = threshold_adaptive(image, block_size, offset=10)
fig, axes = plt.subplots(nrows=3, figsize=(7, 8))
ax0, ax1, ax2 = axes
plt.gray()
ax0.imshow(image)
ax0.set_title('Image')
ax1.imshow(binary_global)
ax1.set_title('Global thresholding')
ax2.imshow(binary_adaptive)
ax2.set_title('Adaptive thresholding')
for ax in axes:
ax.axis('off')
plt.show()
| bsd-3-clause |
jastarex/DeepLearningCourseCodes | 01_TF_basics_and_linear_regression/linear_regression_tf.py | 1 | 2356 |
# coding: utf-8
# # TensorFlow线性回归代码示例
# In[1]:
from __future__ import print_function
import tensorflow as tf
import numpy
import matplotlib.pyplot as plt
rng = numpy.random
# In[2]:
# Hyper Parameters, 超参数
learning_rate = 0.01
training_epochs = 1000
display_step = 50
# In[3]:
# Training Data,训练数据
train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,
7.042,10.791,5.313,7.997,5.654,9.27,3.1])
train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,
2.827,3.465,1.65,2.904,2.42,2.94,1.3])
n_samples = train_X.shape[0]
# In[4]:
# tf Graph Input,tf图输入
X = tf.placeholder("float")
Y = tf.placeholder("float")
# Set model weights,初始化网络模型的权重
W = tf.Variable(rng.randn(), name="weight")
b = tf.Variable(rng.randn(), name="bias")
# In[5]:
# Construct a linear model,构造线性模型
pred = tf.add(tf.multiply(X, W), b)
# In[6]:
# Mean squared error,损失函数:均方差
cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)
# Gradient descent, 优化方式:梯度下降
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# In[8]:
# Initialize the variables (i.e. assign their default value),初始化所有图节点参数
init = tf.global_variables_initializer()
# In[9]:
# Start training,开始训练
with tf.Session() as sess:
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
for (x, y) in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={X: x, Y: y})
#Display logs per epoch step
if (epoch+1) % display_step == 0:
c = sess.run(cost, feed_dict={X: train_X, Y:train_Y})
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c),"W=", sess.run(W), "b=", sess.run(b))
print("Optimization Finished!")
training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
#Graphic display
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
plt.legend()
plt.show()
# In[1]:
# Regression result,回归结果
| apache-2.0 |
rosswhitfield/mantid | qt/applications/workbench/workbench/test/test_mainwindow.py | 3 | 16416 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
#
#
"""
Defines the QMainWindow of the application and the main() entry point.
"""
import unittest
import sys
from unittest.mock import patch, Mock, MagicMock, call
import matplotlib
from mantidqt.utils.qt.testing import start_qapplication
from mantid.api import FrameworkManager
from qtpy.QtWidgets import QMessageBox, QAction, QMenu
from workbench.utils.recentlyclosedscriptsmenu import RecentlyClosedScriptsMenu # noqa
from io import StringIO
@start_qapplication
class MainWindowTest(unittest.TestCase):
def setUp(self):
from workbench.app.mainwindow import MainWindow
self.main_window = MainWindow()
def tearDown(self):
self.main_window.close()
@patch("workbench.app.mainwindow.find_window")
def test_launch_custom_cpp_gui_creates_interface_if_not_already_open(self, mock_find_window):
mock_find_window.return_value = None
interface_name = 'ISIS Reflectometry'
with patch.object(self.main_window, 'interface_manager') as mock_interface_manager:
self.main_window.launch_custom_cpp_gui(interface_name)
mock_interface_manager.createSubWindow.assert_called_once_with(interface_name)
@patch("workbench.app.mainwindow.find_window")
def test_different_interfaces_simultaneously_created(self, mock_find_window):
mock_find_window.return_value = None
interface_name = 'Data Reduction'
second_interface_name = 'Settings'
with patch.object(self.main_window, 'interface_manager') as mock_interface_manager:
self.main_window.launch_custom_cpp_gui(interface_name)
mock_interface_manager.createSubWindow.assert_called_with(interface_name)
self.main_window.launch_custom_cpp_gui(second_interface_name)
mock_interface_manager.createSubWindow.assert_called_with(second_interface_name)
@patch("workbench.app.mainwindow.FrameworkManager")
@patch("workbench.app.mainwindow.QMessageBox")
def test_clear_all_memory_calls_frameworkmanager_when_user_presses_ok(self, mock_msg_box, mock_fm):
mock_msg_box_instance = MagicMock(spec=QMessageBox)
mock_msg_box.return_value = mock_msg_box_instance
mock_msg_box_instance.exec.return_value = mock_msg_box.Ok
mock_fm_instance = MagicMock(spec=FrameworkManager)
mock_fm.Instance.return_value = mock_fm_instance
self.main_window.clear_all_memory_action()
mock_fm_instance.clear.assert_called_once()
@patch("workbench.app.mainwindow.FrameworkManager")
@patch("workbench.app.mainwindow.QMessageBox")
def test_clear_all_memory_does_not_call_frameworkmanager_when_user_presses_cancel(self, mock_msg_box, mock_fm):
mock_msg_box_instance = MagicMock(spec=QMessageBox)
mock_msg_box.return_value = mock_msg_box_instance
mock_msg_box_instance.exec.return_value = mock_msg_box.Cancel
mock_fm_instance = MagicMock(spec=FrameworkManager)
mock_fm.Instance.return_value = mock_fm_instance
self.main_window.clear_all_memory_action()
mock_fm_instance.clear.assert_not_called()
@patch('workbench.plugins.logmessagedisplay.ORIGINAL_STDOUT', new=StringIO())
@patch('workbench.plugins.logmessagedisplay.ORIGINAL_STDERR', new=StringIO())
@patch('workbench.plugins.logmessagedisplay.MessageDisplay.append_script_notice')
@patch('workbench.plugins.logmessagedisplay.MessageDisplay.append_script_error')
def test_after_setup_stdout_and_stderr_are_captured(self, append_script_notice, append_script_error):
original_stdout = sys.stdout
original_stderr = sys.stderr
try:
matplotlib.use("agg")
self.main_window.setup()
print('test stdout')
print('test stderr', file=sys.stderr)
finally:
# whatever happened, we need to reset these so unittest can report it!
sys.stdout = original_stdout
sys.stderr = original_stderr
append_script_notice.assert_called()
append_script_error.assert_called()
def test_menus_exist(self):
self.main_window.menuBar().addMenu = MagicMock()
expected_calls = [call("&File"), call("&View"), call("&Interfaces"), call("&Help")]
self.main_window.create_menus()
self.main_window.menuBar().addMenu.assert_has_calls(expected_calls, any_order=False)
@patch('workbench.app.mainwindow.add_actions')
def test_file_view_and_help_menus_are_correct(self, mock_add_actions):
def convert_action_to_text(menu_item):
"""Takes an item on a mainwindow menu, and returns a representation of the item
so we can assert whether menus look right to the user. """
if isinstance(menu_item, QAction):
return menu_item.text()
if not menu_item:
return None
return type(menu_item)
self.main_window.editor = Mock()
self.main_window.populate_interfaces_menu = Mock()
expected_file_menu_items = [
'Open Script', 'Open Project', None, 'Save Script', 'Save Script as...', RecentlyClosedScriptsMenu, 'Generate Recovery Script',
None, 'Save Project', 'Save Project as...', None, 'Settings', None, 'Manage User Directories', None, 'Script Repository', None,
'Clear All Memory', None, '&Quit'
]
# There are no wigets on this instance of MainWindow, so they will not appear on the view menu.
expected_view_menu_items = ['Restore Default Layout', None]
expected_help_menu_items = [
'Mantid Help', 'Mantid Concepts', 'Algorithm Descriptions', None, 'Mantid Homepage', 'Mantid Forum', None,
'About Mantid Workbench'
]
self.main_window.create_menus()
self.main_window.create_actions()
self.main_window.populate_menus()
actual_file_menu_items = list(map(convert_action_to_text, self.main_window.file_menu_actions))
actual_view_menu_items = list(map(convert_action_to_text, self.main_window.view_menu_actions))
actual_help_menu_items = list(map(convert_action_to_text, self.main_window.help_menu_actions))
self.assertEqual(expected_file_menu_items, actual_file_menu_items)
self.assertEqual(expected_view_menu_items, actual_view_menu_items)
self.assertEqual(expected_help_menu_items, actual_help_menu_items)
mock_add_actions.assert_has_calls([
call(self.main_window.file_menu, self.main_window.file_menu_actions),
call(self.main_window.view_menu, self.main_window.view_menu_actions),
call(self.main_window.help_menu, self.main_window.help_menu_actions),
])
@patch('workbench.app.mainwindow.add_actions')
def test_interfaces_menu_texts_are_correct(self, _):
interface_dir = './interfaces/'
example_interfaces = {
'General': ['TOFCalculator'],
'Direct': ['DGS_Reduction.py', 'DGSPlanner.py', 'PyChop.py', 'MSlice.py', 'ALF View']
}
with patch('workbench.app.mainwindow.ConfigService',
new={
'interfaces.categories.hidden': '',
'mantidqt.python_interfaces_directory': interface_dir
}):
self.main_window._discover_python_interfaces = Mock(return_value=(example_interfaces, {}))
self.main_window._discover_cpp_interfaces = Mock()
self.main_window.create_menus()
self.main_window.populate_interfaces_menu()
expected_menu_texts = ['Direct', 'General'] # Alphabetical order
actual_menu_texts = [action.text() for action in self.main_window.interfaces_menu.actions()]
self.assertEqual(expected_menu_texts, actual_menu_texts)
expected_direct_texts = ['ALF View', 'DGSPlanner', 'DGS Reduction', 'MSlice', 'PyChop']
expected_general_texts = ['TOFCalculator']
submenus = list(filter(lambda child: isinstance(child, QMenu), self.main_window.interfaces_menu.children()))
actual_direct_texts = [action.text() for action in submenus[0].actions()]
actual_general_texts = [action.text() for action in submenus[1].actions()]
self.assertEqual(expected_direct_texts, actual_direct_texts)
self.assertEqual(expected_general_texts, actual_general_texts)
@patch('workbench.app.mainwindow.add_actions')
def test_that_populate_interfaces_menu_discovers_interfaces(self, _):
interface_dir = './interfaces/'
interfaces = {'category': ['interface.py']}
self.main_window._discover_python_interfaces = Mock(return_value=(interfaces, {}))
self.main_window._discover_cpp_interfaces = Mock()
with patch('workbench.app.mainwindow.ConfigService',
new={
'interfaces.categories.hidden': '',
'mantidqt.python_interfaces_directory': interface_dir
}):
self.main_window.create_menus()
self.main_window.populate_interfaces_menu()
self.main_window._discover_python_interfaces.assert_called_with(interface_dir)
self.main_window._discover_cpp_interfaces.assert_called_with(interfaces)
def test_that_populate_interfaces_menu_ignores_hidden_interfaces(self):
interface_dir = './interfaces/'
self.main_window._discover_python_interfaces = Mock(return_value=({
'category1': ['interface1.py'],
'category2': ['interface2.py']
}, {}))
self.main_window._discover_cpp_interfaces = Mock()
self.main_window.interfaces_menu = Mock()
ConfigService_dict = {'interfaces.categories.hidden': 'category1;category2', 'mantidqt.python_interfaces_directory': interface_dir}
with patch.object(self.main_window, 'interfaces_menu') as mock_interfaces_menu:
with patch('workbench.app.mainwindow.ConfigService', new=ConfigService_dict):
self.main_window.populate_interfaces_menu()
mock_interfaces_menu.addMenu.assert_not_called()
def test_main_window_does_not_close_when_project_is_saving(self):
mock_event = Mock()
mock_project = Mock()
mock_project.is_saving = True
self.main_window.project = mock_project
self.main_window.closeEvent(mock_event)
mock_event.ignore.assert_called()
mock_project.inform_user_not_possible.assert_called()
def test_main_window_does_not_close_when_project_is_loading(self):
mock_event = Mock()
mock_project = Mock()
mock_project.is_loading = True
self.main_window.project = mock_project
self.main_window.closeEvent(mock_event)
mock_event.ignore.assert_called()
mock_project.inform_user_not_possible.assert_called()
def test_main_window_does_not_close_if_project_not_saved_and_user_cancels_project_save(self):
mock_event = Mock()
mock_project = Mock()
mock_project.is_saving, mock_project.is_loading, mock_project.saved, = False, False, False
mock_project.offer_save = Mock(return_value=True) # user cancels when save offered
self.main_window.project = mock_project
self.main_window.closeEvent(mock_event)
mock_project.offer_save.assert_called()
mock_event.ignore.assert_called()
@patch('workbench.app.mainwindow.ConfigService')
@patch('workbench.app.mainwindow.QApplication')
@patch('matplotlib.pyplot.close')
def test_main_window_close_behavior_correct_when_workbench_able_to_be_closed(self, mock_plt_close, mock_QApplication,
mock_ConfigService):
mock_event = Mock()
mock_project = Mock()
mock_project.is_saving, mock_project.is_loading, mock_project.saved = False, False, True
mock_editor = Mock()
mock_editor.app_closing = Mock(return_value=True) # Editors can be closed
mock_project_recovery = Mock()
mock_project_recovery.stop_recovery_thread, mock_project_recovery.remove_current_pid_folder =\
Mock(), Mock()
self.main_window.editor = mock_editor
self.main_window.writeSettings = Mock()
self.main_window.project_recovery = mock_project_recovery
self.main_window.interface_manager = Mock()
self.main_window.writeSettings = Mock()
self.main_window.project = mock_project
self.main_window.closeEvent(mock_event)
mock_ConfigService.saveConfig.assert_called_with(mock_ConfigService.getUserFilename())
self.main_window.writeSettings.assert_called()
mock_plt_close.assert_called_with('all')
mock_QApplication.instance().closeAllWindows.assert_called()
self.main_window.project_recovery.assert_has_calls([call.stop_recovery_thread(), call.remove_current_pid_folder()])
self.assertTrue(self.main_window.project_recovery.closing_workbench)
self.main_window.interface_manager.closeHelpWindow.assert_called()
mock_event.accept.assert_called()
@patch('workbench.app.mainwindow.logger')
@patch('os.path.exists')
def test_python_interfaces_are_discovered_correctly(self, mock_os_path_exists, _):
interfaces = ['Muon/Frequency_Domain_Analysis.py', 'ILL/Drill.py']
interfaces_str = " ".join(interfaces) # config service returns them as a whole string.
mock_os_path_exists.return_value = lambda path: path in interfaces
self.main_window.PYTHON_GUI_BLACKLIST = []
with patch('workbench.app.mainwindow.ConfigService', new={'mantidqt.python_interfaces': interfaces_str}):
returned_interfaces, registration_files = self.main_window._discover_python_interfaces('')
expected_interfaces = {'Muon': ['Frequency_Domain_Analysis.py'], 'ILL': ['Drill.py']}
self.assertDictEqual(expected_interfaces, returned_interfaces)
self.assertDictEqual({}, registration_files)
@patch('workbench.app.mainwindow.logger')
@patch('os.path.exists')
def test_that_non_existent_python_interface_is_ignored_gracefully(self, mock_os_path_exists, mock_logger):
interface_str = 'fake/interface.py'
mock_os_path_exists.return_value = False
self.main_window.PYTHON_GUI_BLACKLIST = []
with patch('workbench.app.mainwindow.ConfigService', new={'mantidqt.python_interfaces': interface_str}):
returned_interfaces, registration_files = self.main_window._discover_python_interfaces('')
self.assertDictEqual({}, returned_interfaces)
self.assertDictEqual({}, registration_files)
mock_logger.warning.assert_called()
@patch('workbench.app.mainwindow.UserSubWindowFactory')
def test_cpp_interfaces_are_discovered_correctly(self, mock_UserSubWindowFactory):
"""Assuming we have already found some python interfaces, test that
cpp interfaces are discovered correctly using the Direct interfaces as an example."""
cpp_interface_factory = Mock()
cpp_interface_factory.keys.return_value = ['ALF View', 'TOFCalculator']
cpp_interface_factory.categories.side_effect = lambda name: ['Direct'] if name == 'ALF View' else []
mock_UserSubWindowFactory.Instance.return_value = cpp_interface_factory
all_interfaces = self.main_window._discover_cpp_interfaces(
{'Direct': ['DGS_Reduction.py', 'DGSPlanner.py', 'PyChop.py', 'MSlice.py']})
expected_interfaces = {
'Direct': ['DGS_Reduction.py', 'DGSPlanner.py', 'PyChop.py', 'MSlice.py', 'ALF View'],
'General': ['TOFCalculator']
}
self.assertDictEqual(expected_interfaces, all_interfaces)
@patch('workbench.app.mainwindow.input_qinputdialog')
def test_override_python_input_replaces_input_with_qinputdialog(self, mock_input):
self.main_window.override_python_input()
input("prompt")
mock_input.assert_called_with("prompt")
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
jreback/pandas | pandas/io/clipboard/__init__.py | 1 | 21542 | """
Pyperclip
A cross-platform clipboard module for Python,
with copy & paste functions for plain text.
By Al Sweigart al@inventwithpython.com
BSD License
Usage:
import pyperclip
pyperclip.copy('The text to be copied to the clipboard.')
spam = pyperclip.paste()
if not pyperclip.is_available():
print("Copy functionality unavailable!")
On Windows, no additional modules are needed.
On Mac, the pyobjc module is used, falling back to the pbcopy and pbpaste cli
commands. (These commands should come with OS X.).
On Linux, install xclip or xsel via package manager. For example, in Debian:
sudo apt-get install xclip
sudo apt-get install xsel
Otherwise on Linux, you will need the PyQt5 modules installed.
This module does not work with PyGObject yet.
Cygwin is currently not supported.
Security Note: This module runs programs with these names:
- which
- where
- pbcopy
- pbpaste
- xclip
- xsel
- klipper
- qdbus
A malicious user could rename or add programs with these names, tricking
Pyperclip into running them with whatever permissions the Python process has.
"""
__version__ = "1.7.0"
import contextlib
import ctypes
from ctypes import c_size_t, c_wchar, c_wchar_p, get_errno, sizeof
import distutils.spawn
import os
import platform
import subprocess
import time
import warnings
# `import PyQt4` sys.exit()s if DISPLAY is not in the environment.
# Thus, we need to detect the presence of $DISPLAY manually
# and not load PyQt4 if it is absent.
HAS_DISPLAY = os.getenv("DISPLAY", False)
EXCEPT_MSG = """
Pyperclip could not find a copy/paste mechanism for your system.
For more information, please visit
https://pyperclip.readthedocs.io/en/latest/introduction.html#not-implemented-error
"""
ENCODING = "utf-8"
# The "which" unix command finds where a command is.
if platform.system() == "Windows":
WHICH_CMD = "where"
else:
WHICH_CMD = "which"
def _executable_exists(name):
return (
subprocess.call(
[WHICH_CMD, name], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
== 0
)
# Exceptions
class PyperclipException(RuntimeError):
pass
class PyperclipWindowsException(PyperclipException):
def __init__(self, message):
message += f" ({ctypes.WinError()})"
super().__init__(message)
def _stringifyText(text) -> str:
acceptedTypes = (str, int, float, bool)
if not isinstance(text, acceptedTypes):
raise PyperclipException(
f"only str, int, float, and bool values "
f"can be copied to the clipboard, not {type(text).__name__}"
)
return str(text)
def init_osx_pbcopy_clipboard():
def copy_osx_pbcopy(text):
text = _stringifyText(text) # Converts non-str values to str.
p = subprocess.Popen(["pbcopy", "w"], stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=text.encode(ENCODING))
def paste_osx_pbcopy():
p = subprocess.Popen(["pbpaste", "r"], stdout=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
return stdout.decode(ENCODING)
return copy_osx_pbcopy, paste_osx_pbcopy
def init_osx_pyobjc_clipboard():
def copy_osx_pyobjc(text):
"""Copy string argument to clipboard"""
text = _stringifyText(text) # Converts non-str values to str.
newStr = Foundation.NSString.stringWithString_(text).nsstring()
newData = newStr.dataUsingEncoding_(Foundation.NSUTF8StringEncoding)
board = AppKit.NSPasteboard.generalPasteboard()
board.declareTypes_owner_([AppKit.NSStringPboardType], None)
board.setData_forType_(newData, AppKit.NSStringPboardType)
def paste_osx_pyobjc():
"""Returns contents of clipboard"""
board = AppKit.NSPasteboard.generalPasteboard()
content = board.stringForType_(AppKit.NSStringPboardType)
return content
return copy_osx_pyobjc, paste_osx_pyobjc
def init_qt_clipboard():
global QApplication
# $DISPLAY should exist
# Try to import from qtpy, but if that fails try PyQt5 then PyQt4
try:
from qtpy.QtWidgets import QApplication
except ImportError:
try:
from PyQt5.QtWidgets import QApplication
except ImportError:
from PyQt4.QtGui import QApplication
app = QApplication.instance()
if app is None:
app = QApplication([])
def copy_qt(text):
text = _stringifyText(text) # Converts non-str values to str.
cb = app.clipboard()
cb.setText(text)
def paste_qt() -> str:
cb = app.clipboard()
return str(cb.text())
return copy_qt, paste_qt
def init_xclip_clipboard():
DEFAULT_SELECTION = "c"
PRIMARY_SELECTION = "p"
def copy_xclip(text, primary=False):
text = _stringifyText(text) # Converts non-str values to str.
selection = DEFAULT_SELECTION
if primary:
selection = PRIMARY_SELECTION
p = subprocess.Popen(
["xclip", "-selection", selection], stdin=subprocess.PIPE, close_fds=True
)
p.communicate(input=text.encode(ENCODING))
def paste_xclip(primary=False):
selection = DEFAULT_SELECTION
if primary:
selection = PRIMARY_SELECTION
p = subprocess.Popen(
["xclip", "-selection", selection, "-o"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
)
stdout, stderr = p.communicate()
# Intentionally ignore extraneous output on stderr when clipboard is empty
return stdout.decode(ENCODING)
return copy_xclip, paste_xclip
def init_xsel_clipboard():
DEFAULT_SELECTION = "-b"
PRIMARY_SELECTION = "-p"
def copy_xsel(text, primary=False):
text = _stringifyText(text) # Converts non-str values to str.
selection_flag = DEFAULT_SELECTION
if primary:
selection_flag = PRIMARY_SELECTION
p = subprocess.Popen(
["xsel", selection_flag, "-i"], stdin=subprocess.PIPE, close_fds=True
)
p.communicate(input=text.encode(ENCODING))
def paste_xsel(primary=False):
selection_flag = DEFAULT_SELECTION
if primary:
selection_flag = PRIMARY_SELECTION
p = subprocess.Popen(
["xsel", selection_flag, "-o"], stdout=subprocess.PIPE, close_fds=True
)
stdout, stderr = p.communicate()
return stdout.decode(ENCODING)
return copy_xsel, paste_xsel
def init_klipper_clipboard():
def copy_klipper(text):
text = _stringifyText(text) # Converts non-str values to str.
p = subprocess.Popen(
[
"qdbus",
"org.kde.klipper",
"/klipper",
"setClipboardContents",
text.encode(ENCODING),
],
stdin=subprocess.PIPE,
close_fds=True,
)
p.communicate(input=None)
def paste_klipper():
p = subprocess.Popen(
["qdbus", "org.kde.klipper", "/klipper", "getClipboardContents"],
stdout=subprocess.PIPE,
close_fds=True,
)
stdout, stderr = p.communicate()
# Workaround for https://bugs.kde.org/show_bug.cgi?id=342874
# TODO: https://github.com/asweigart/pyperclip/issues/43
clipboardContents = stdout.decode(ENCODING)
# even if blank, Klipper will append a newline at the end
assert len(clipboardContents) > 0
# make sure that newline is there
assert clipboardContents.endswith("\n")
if clipboardContents.endswith("\n"):
clipboardContents = clipboardContents[:-1]
return clipboardContents
return copy_klipper, paste_klipper
def init_dev_clipboard_clipboard():
def copy_dev_clipboard(text):
text = _stringifyText(text) # Converts non-str values to str.
if text == "":
warnings.warn(
"Pyperclip cannot copy a blank string to the clipboard on Cygwin."
"This is effectively a no-op."
)
if "\r" in text:
warnings.warn("Pyperclip cannot handle \\r characters on Cygwin.")
with open("/dev/clipboard", "wt") as fo:
fo.write(text)
def paste_dev_clipboard() -> str:
with open("/dev/clipboard") as fo:
content = fo.read()
return content
return copy_dev_clipboard, paste_dev_clipboard
def init_no_clipboard():
class ClipboardUnavailable:
def __call__(self, *args, **kwargs):
raise PyperclipException(EXCEPT_MSG)
def __bool__(self) -> bool:
return False
return ClipboardUnavailable(), ClipboardUnavailable()
# Windows-related clipboard functions:
class CheckedCall:
def __init__(self, f):
super().__setattr__("f", f)
def __call__(self, *args):
ret = self.f(*args)
if not ret and get_errno():
raise PyperclipWindowsException("Error calling " + self.f.__name__)
return ret
def __setattr__(self, key, value):
setattr(self.f, key, value)
def init_windows_clipboard():
global HGLOBAL, LPVOID, DWORD, LPCSTR, INT
global HWND, HINSTANCE, HMENU, BOOL, UINT, HANDLE
from ctypes.wintypes import (
BOOL,
DWORD,
HANDLE,
HGLOBAL,
HINSTANCE,
HMENU,
HWND,
INT,
LPCSTR,
LPVOID,
UINT,
)
windll = ctypes.windll
msvcrt = ctypes.CDLL("msvcrt")
safeCreateWindowExA = CheckedCall(windll.user32.CreateWindowExA)
safeCreateWindowExA.argtypes = [
DWORD,
LPCSTR,
LPCSTR,
DWORD,
INT,
INT,
INT,
INT,
HWND,
HMENU,
HINSTANCE,
LPVOID,
]
safeCreateWindowExA.restype = HWND
safeDestroyWindow = CheckedCall(windll.user32.DestroyWindow)
safeDestroyWindow.argtypes = [HWND]
safeDestroyWindow.restype = BOOL
OpenClipboard = windll.user32.OpenClipboard
OpenClipboard.argtypes = [HWND]
OpenClipboard.restype = BOOL
safeCloseClipboard = CheckedCall(windll.user32.CloseClipboard)
safeCloseClipboard.argtypes = []
safeCloseClipboard.restype = BOOL
safeEmptyClipboard = CheckedCall(windll.user32.EmptyClipboard)
safeEmptyClipboard.argtypes = []
safeEmptyClipboard.restype = BOOL
safeGetClipboardData = CheckedCall(windll.user32.GetClipboardData)
safeGetClipboardData.argtypes = [UINT]
safeGetClipboardData.restype = HANDLE
safeSetClipboardData = CheckedCall(windll.user32.SetClipboardData)
safeSetClipboardData.argtypes = [UINT, HANDLE]
safeSetClipboardData.restype = HANDLE
safeGlobalAlloc = CheckedCall(windll.kernel32.GlobalAlloc)
safeGlobalAlloc.argtypes = [UINT, c_size_t]
safeGlobalAlloc.restype = HGLOBAL
safeGlobalLock = CheckedCall(windll.kernel32.GlobalLock)
safeGlobalLock.argtypes = [HGLOBAL]
safeGlobalLock.restype = LPVOID
safeGlobalUnlock = CheckedCall(windll.kernel32.GlobalUnlock)
safeGlobalUnlock.argtypes = [HGLOBAL]
safeGlobalUnlock.restype = BOOL
wcslen = CheckedCall(msvcrt.wcslen)
wcslen.argtypes = [c_wchar_p]
wcslen.restype = UINT
GMEM_MOVEABLE = 0x0002
CF_UNICODETEXT = 13
@contextlib.contextmanager
def window():
"""
Context that provides a valid Windows hwnd.
"""
# we really just need the hwnd, so setting "STATIC"
# as predefined lpClass is just fine.
hwnd = safeCreateWindowExA(
0, b"STATIC", None, 0, 0, 0, 0, 0, None, None, None, None
)
try:
yield hwnd
finally:
safeDestroyWindow(hwnd)
@contextlib.contextmanager
def clipboard(hwnd):
"""
Context manager that opens the clipboard and prevents
other applications from modifying the clipboard content.
"""
# We may not get the clipboard handle immediately because
# some other application is accessing it (?)
# We try for at least 500ms to get the clipboard.
t = time.time() + 0.5
success = False
while time.time() < t:
success = OpenClipboard(hwnd)
if success:
break
time.sleep(0.01)
if not success:
raise PyperclipWindowsException("Error calling OpenClipboard")
try:
yield
finally:
safeCloseClipboard()
def copy_windows(text):
# This function is heavily based on
# http://msdn.com/ms649016#_win32_Copying_Information_to_the_Clipboard
text = _stringifyText(text) # Converts non-str values to str.
with window() as hwnd:
# http://msdn.com/ms649048
# If an application calls OpenClipboard with hwnd set to NULL,
# EmptyClipboard sets the clipboard owner to NULL;
# this causes SetClipboardData to fail.
# => We need a valid hwnd to copy something.
with clipboard(hwnd):
safeEmptyClipboard()
if text:
# http://msdn.com/ms649051
# If the hMem parameter identifies a memory object,
# the object must have been allocated using the
# function with the GMEM_MOVEABLE flag.
count = wcslen(text) + 1
handle = safeGlobalAlloc(GMEM_MOVEABLE, count * sizeof(c_wchar))
locked_handle = safeGlobalLock(handle)
ctypes.memmove(
c_wchar_p(locked_handle),
c_wchar_p(text),
count * sizeof(c_wchar),
)
safeGlobalUnlock(handle)
safeSetClipboardData(CF_UNICODETEXT, handle)
def paste_windows():
with clipboard(None):
handle = safeGetClipboardData(CF_UNICODETEXT)
if not handle:
# GetClipboardData may return NULL with errno == NO_ERROR
# if the clipboard is empty.
# (Also, it may return a handle to an empty buffer,
# but technically that's not empty)
return ""
return c_wchar_p(handle).value
return copy_windows, paste_windows
def init_wsl_clipboard():
def copy_wsl(text):
text = _stringifyText(text) # Converts non-str values to str.
p = subprocess.Popen(["clip.exe"], stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=text.encode(ENCODING))
def paste_wsl():
p = subprocess.Popen(
["powershell.exe", "-command", "Get-Clipboard"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
)
stdout, stderr = p.communicate()
# WSL appends "\r\n" to the contents.
return stdout[:-2].decode(ENCODING)
return copy_wsl, paste_wsl
# Automatic detection of clipboard mechanisms
# and importing is done in determine_clipboard():
def determine_clipboard():
"""
Determine the OS/platform and set the copy() and paste() functions
accordingly.
"""
global Foundation, AppKit, qtpy, PyQt4, PyQt5
# Setup for the CYGWIN platform:
if (
"cygwin" in platform.system().lower()
): # Cygwin has a variety of values returned by platform.system(),
# such as 'CYGWIN_NT-6.1'
# FIXME: pyperclip currently does not support Cygwin,
# see https://github.com/asweigart/pyperclip/issues/55
if os.path.exists("/dev/clipboard"):
warnings.warn(
"Pyperclip's support for Cygwin is not perfect,"
"see https://github.com/asweigart/pyperclip/issues/55"
)
return init_dev_clipboard_clipboard()
# Setup for the WINDOWS platform:
elif os.name == "nt" or platform.system() == "Windows":
return init_windows_clipboard()
if platform.system() == "Linux":
if distutils.spawn.find_executable("wslconfig.exe"):
return init_wsl_clipboard()
# Setup for the MAC OS X platform:
if os.name == "mac" or platform.system() == "Darwin":
try:
import AppKit
import Foundation # check if pyobjc is installed
except ImportError:
return init_osx_pbcopy_clipboard()
else:
return init_osx_pyobjc_clipboard()
# Setup for the LINUX platform:
if HAS_DISPLAY:
if _executable_exists("xsel"):
return init_xsel_clipboard()
if _executable_exists("xclip"):
return init_xclip_clipboard()
if _executable_exists("klipper") and _executable_exists("qdbus"):
return init_klipper_clipboard()
try:
# qtpy is a small abstraction layer that lets you write applications
# using a single api call to either PyQt or PySide.
# https://pypi.python.org/project/QtPy
import qtpy # check if qtpy is installed
except ImportError:
# If qtpy isn't installed, fall back on importing PyQt4.
try:
import PyQt5 # check if PyQt5 is installed
except ImportError:
try:
import PyQt4 # check if PyQt4 is installed
except ImportError:
pass # We want to fail fast for all non-ImportError exceptions.
else:
return init_qt_clipboard()
else:
return init_qt_clipboard()
else:
return init_qt_clipboard()
return init_no_clipboard()
def set_clipboard(clipboard):
"""
Explicitly sets the clipboard mechanism. The "clipboard mechanism" is how
the copy() and paste() functions interact with the operating system to
implement the copy/paste feature. The clipboard parameter must be one of:
- pbcopy
- pbobjc (default on Mac OS X)
- qt
- xclip
- xsel
- klipper
- windows (default on Windows)
- no (this is what is set when no clipboard mechanism can be found)
"""
global copy, paste
clipboard_types = {
"pbcopy": init_osx_pbcopy_clipboard,
"pyobjc": init_osx_pyobjc_clipboard,
"qt": init_qt_clipboard, # TODO - split this into 'qtpy', 'pyqt4', and 'pyqt5'
"xclip": init_xclip_clipboard,
"xsel": init_xsel_clipboard,
"klipper": init_klipper_clipboard,
"windows": init_windows_clipboard,
"no": init_no_clipboard,
}
if clipboard not in clipboard_types:
allowed_clipboard_types = [repr(_) for _ in clipboard_types.keys()]
raise ValueError(
f"Argument must be one of {', '.join(allowed_clipboard_types)}"
)
# Sets pyperclip's copy() and paste() functions:
copy, paste = clipboard_types[clipboard]()
def lazy_load_stub_copy(text):
"""
A stub function for copy(), which will load the real copy() function when
called so that the real copy() function is used for later calls.
This allows users to import pyperclip without having determine_clipboard()
automatically run, which will automatically select a clipboard mechanism.
This could be a problem if it selects, say, the memory-heavy PyQt4 module
but the user was just going to immediately call set_clipboard() to use a
different clipboard mechanism.
The lazy loading this stub function implements gives the user a chance to
call set_clipboard() to pick another clipboard mechanism. Or, if the user
simply calls copy() or paste() without calling set_clipboard() first,
will fall back on whatever clipboard mechanism that determine_clipboard()
automatically chooses.
"""
global copy, paste
copy, paste = determine_clipboard()
return copy(text)
def lazy_load_stub_paste():
"""
A stub function for paste(), which will load the real paste() function when
called so that the real paste() function is used for later calls.
This allows users to import pyperclip without having determine_clipboard()
automatically run, which will automatically select a clipboard mechanism.
This could be a problem if it selects, say, the memory-heavy PyQt4 module
but the user was just going to immediately call set_clipboard() to use a
different clipboard mechanism.
The lazy loading this stub function implements gives the user a chance to
call set_clipboard() to pick another clipboard mechanism. Or, if the user
simply calls copy() or paste() without calling set_clipboard() first,
will fall back on whatever clipboard mechanism that determine_clipboard()
automatically chooses.
"""
global copy, paste
copy, paste = determine_clipboard()
return paste()
def is_available() -> bool:
return copy != lazy_load_stub_copy and paste != lazy_load_stub_paste
# Initially, copy() and paste() are set to lazy loading wrappers which will
# set `copy` and `paste` to real functions the first time they're used, unless
# set_clipboard() or determine_clipboard() is called first.
copy, paste = lazy_load_stub_copy, lazy_load_stub_paste
__all__ = ["copy", "paste", "set_clipboard", "determine_clipboard"]
# pandas aliases
clipboard_get = paste
clipboard_set = copy
| bsd-3-clause |
fja05680/pinkfish | pinkfish/portfolio.py | 1 | 22823 | """
Portfolio backtesting.
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn
import pinkfish as pf
class Portfolio:
"""
A portfolio or collection of securities.
Methods
-------
- fetch_timeseries()
Read time series data for symbols.
- add_technical_indicator()
Add a technical indicator for each symbol in the portfolio.
- calendar()
Add calendar columns.
- finalize_timeseries()
Finalize timeseries.
- get_price()
Return price given row, symbol, and field.
- get_prices()
Return dict of prices for all symbols given row and fields.
- shares()
Return number of shares for given symbol in portfolio.
- positions
Gets the active symbols in portfolio as a list.
- share_percent()
Return share value of symbol as a percentage of `total_funds`.
- adjust_percent()
Adjust symbol to a specified weight (percent) of portfolio.
- print_holdings()
Print snapshot of portfolio holding and values.
- init_trade_logs()
Add a trade log for each symbol.
- record_daily_balance()
Append to daily balance list.
- get_logs()
Return raw tradelog, tradelog, and daily balance log.
- performance_per_symbol()
Returns performance per symbol data, also plots performance.
- correlation_map()
Show correlation map between symbols.
"""
def __init__(self):
"""
Initialize instance variables.
Attributes
----------
_l : list of tuples
The list of daily balance tuples.
_ts : pd.DataFrame
The timeseries of the portfolio.
symbols : list
The symbols that constitute the portfolio.
"""
self._l = []
self._ts = None
self.symbols = []
####################################################################
# TIMESERIES (fetch, add_technical_indicator, calender, finalize)
def _add_symbol_columns(self, ts, symbol, symbol_ts, fields):
"""
Add column with field suffix for symbol, i.e. SPY_close.
"""
for field in fields:
column = symbol + '_' + field
ts[column] = symbol_ts[field]
return ts
def fetch_timeseries(self, symbols, start, end,
fields=['open', 'high', 'low', 'close'],
dir_name='data',
use_cache=True, use_adj=True,
stock_market_calendar=True):
"""
Read time series data for symbols.
Parameters
----------
symbols : list
The list of symbols to fetch timeseries.
start : datetime.datetime
The desired start date for the strategy.
end : datetime.datetime
The desired end date for the strategy.
fields : list, optional
The list of fields to use for each symbol (default is
['open', 'high', 'low', 'close']).
use_cache: bool, optional
True to use data cache. False to retrieve from the
internet (default is True).
use_adj : bool, optional
True to adjust prices for dividends and splits
(default is False).
stock_market_calendar : bool, optional
True forces use of stock market calendar on timeseries.
Set to False only if ALL your investments in a portfolio has
prices for every day of the week, e.g. all cryptocurrencies
in portfolio (default is True).
Returns
-------
pd.DataFrame
The timeseries of the symbols.
"""
symbols = list(set(symbols))
for i, symbol in enumerate(symbols):
if i == 0:
ts = pf.fetch_timeseries(symbol, dir_name=dir_name, use_cache=use_cache)
ts = pf.select_tradeperiod(ts, start, end, use_adj=use_adj,
stock_market_calendar=stock_market_calendar)
self._add_symbol_columns(ts, symbol, ts, fields)
ts.drop(columns=['open', 'high', 'low', 'close', 'volume', 'adj_close'],
inplace=True)
else:
# Add another symbol.
_ts = pf.fetch_timeseries(symbol, dir_name=dir_name, use_cache=use_cache)
_ts = pf.select_tradeperiod(_ts, start, end, use_adj=use_adj,
stock_market_calendar=stock_market_calendar)
self._add_symbol_columns(ts, symbol, _ts, fields)
ts.dropna(inplace=True)
self.symbols = symbols
return ts
def add_technical_indicator(self, ts, ta_func, ta_param, output_column_suffix,
input_column_suffix='close'):
"""
Add a technical indicator for each symbol in the portfolio.
A new column will be added for each symbol. The name of the
new column will be the symbol name, an underscore, and the
`output_column_suffix`. For example, 'SPY_MA30' is the symbol
SPY with `output_column_suffix` equal to MA30.
ta_func is a wrapper for a technical analysis function. The
actual technical analysis function could be from ta-lib,
pandas, pinkfish indicator, or a custom user function.
ta_param is used to pass 1 parameter to the ta_func. Other
parameters could be passed to the technical indicator within
ta_func. If you need to mass more than 1 paramters to ta_func,
you could make ta_param a dict.
Parameters
----------
ts : pd.DataFrame
The timeseries of the portfolio.
ta_func : function
A wrapper for a technical analysis function.
ta_param : object
The parameter for ta_func (typically an int).
output_column_suffix : str
Output column suffix to use for technical indicator.
input_column_suffix : str, {'close', 'open', 'high', 'low'}
Input column suffix to use for price
(default is 'close').
Returns
-------
ts : pd.DataFrame
Timeseries with new column for technical indicator.
Examples
--------
>>> # Add technical indicator: X day high
>>> def period_high(ts, ta_param, input_column):
>>> return pd.Series(ts[input_column]).rolling(ta_param).max()
>>> ts = portfolio.add_technical_indicator(
>>> ts, ta_func=_period_high, ta_param=period,
>>> output_column_suffix='period_high'+str(period),
>>> input_column_suffix='close')
"""
for symbol in self.symbols:
input_column = symbol + '_' + input_column_suffix
output_column = symbol + '_' + output_column_suffix
ts[output_column] = ta_func(ts, ta_param, input_column)
return ts
def calendar(self, ts):
"""
Add calendar columns.
"""
return pf.calendar(ts)
def finalize_timeseries(self, ts, start):
"""
Finalize timeseries.
"""
return pf.finalize_timeseries(ts, start)
####################################################################
# GET PRICES (get_price, get_prices)
def get_price(self, row, symbol, field='close'):
"""
Return price given row, symbol, and field.
Parameters
----------
row : pd.Series
The row of data from the timeseries of the portfolio.
symbol : str
The symbol for a security.
field : str, optional {'close', 'open', 'high', 'low'}
The price field (default is 'close').
Returns
-------
price : float
The current price.
"""
symbol += '_' + field
try:
price = getattr(row, symbol)
except AttributeError:
# This method is slower, but handles column names that
# don't conform to variable name rules, and thus aren't
# attributes.
date = row.Index.to_pydatetime()
price = self._ts.loc[date, symbol]
return price
def get_prices(self, row, fields=['open', 'high', 'low', 'close']):
"""
Return dict of prices for all symbols given row and fields.
Parameters
----------
row : pd.Series
A row of data from the timeseries of the portfolio.
fields : list, optional
The list of fields to use for each symbol (default is
['open', 'high', 'low', 'close']).
Returns
-------
d : dict of floats
The price indexed by symbol and field.
"""
d = {}
for symbol in self.symbols:
d[symbol] = {}
for field in fields:
value = self.get_price(row, symbol, field)
d[symbol][field] = value
return d
####################################################################
# ADJUST POSITION (adjust_shares, adjust_value, adjust_percent, print_holdings)
def _share_value(self, row):
"""
Return total share value in portfolio.
"""
value = 0
for symbol, tlog in pf.TradeLog.instance.items():
close = self.get_price(row, symbol)
value += tlog.share_value(close)
return value
def _total_value(self, row):
"""
Return total_value = share_value + cash (if cash > 0).
"""
total_value = self._share_value(row)
if pf.TradeLog.cash > 0:
total_value += pf.TradeLog.cash
return total_value
def _equity(self, row):
"""
Return equity = total_value - loan (loan is negative cash)
"""
equity = self._total_value(row)
if pf.TradeLog.cash < 0:
equity += pf.TradeLog.cash
return equity
def _leverage(self, row):
"""
Return the leverage factor of the position.
"""
return self._total_value(row) / self._equity(row)
def _total_funds(self, row):
"""
Return total account funds for trading.
"""
return self._equity(row) * pf.TradeLog.margin
def shares(self, symbol):
"""
Return number of shares for given symbol in portfolio.
Parameters
----------
symbol : str
The symbol for a security.
Returns
-------
tlog.shares : int
The number of shares for a given symbol.
"""
tlog = pf.TradeLog.instance[symbol]
return tlog.shares
@property
def positions(self):
"""
Return the active symbols in portfolio as a list.
This returns only those symbols that currently have shares
allocated to them, either long or short.
Parameters
----------
None
Returns
-------
list of str
The active symbols in portfolio.
"""
return [symbol for symbol in self.symbols if self.shares(symbol) > 0]
def share_percent(self, row, symbol):
"""
Return share value of symbol as a percentage of `total_funds`.
Parameters
----------
row : pd.Series
A row of data from the timeseries of the portfolio.
symbol : str
The symbol for a security.
Returns
-------
float
The share value as a percent.
"""
close = self.get_price(row, symbol)
tlog = pf.TradeLog.instance[symbol]
value = tlog.share_value(close)
return value / self._total_funds(row) * 100
def _calc_buying_power(self, row):
"""
Return the buying power.
"""
buying_power = (pf.TradeLog.cash * pf.TradeLog.margin
+ self._share_value(row) * (pf.TradeLog.margin -1))
return buying_power
def _adjust_shares(self, date, price, shares, symbol, row, direction):
"""
Adjust shares.
"""
tlog = pf.TradeLog.instance[symbol]
pf.TradeLog.buying_power = self._calc_buying_power(row)
shares = tlog.adjust_shares(date, price, shares, direction)
pf.TradeLog.buying_power = None
return shares
def _adjust_value(self, date, price, value, symbol, row, direction):
"""
Adjust value.
"""
total_funds = self._total_funds(row)
shares = int(min(total_funds, value) / price)
shares = self._adjust_shares(date, price, shares, symbol, row, direction)
return shares
def adjust_percent(self, date, price, weight, symbol, row,
direction=pf.Direction.LONG):
"""
Adjust symbol to a specified weight (percent) of portfolio.
Parameters
----------
date : str
The current date.
price : float
The current price of the security.
weight : float
The requested weight for the symbol.
symbol : str
The symbol for a security.
row : pd.Series
A row of data from the timeseries of the portfolio.
direction : pf.Direction, optional
The direction of the trade (default is `pf.Direction.LONG`).
Returns
-------
int
The number of shares bought or sold.
"""
weight = weight if weight <= 1 else weight/100
total_funds = self._total_funds(row)
value = total_funds * weight
shares = self._adjust_value(date, price, value, symbol, row, direction)
return shares
def adjust_percents(self, date, prices, weights, row, directions=None):
"""
Adjust symbols to a specified weight (percent) of portfolio.
This function assumes all positions are LONG and weights
is given for all symbols in the portfolio.
The ordering of the prices and weights dicts are unimportant.
They are both indexed by the symbol.
Parameters
----------
date : str
The current date.
prices : dict of floats
Dict of key value pair of symbol:price.
weights : dict of floats
Dict of key value pair of symbol:weight.
row : pd.Series
A row of data from the timeseries of the portfolio.
directions : dict of pf.Direction, optional
The direction of the trades (default is None, which implies
that all positions are `pf.Direction.LONG`).
Returns
-------
w : dict of floats
Dict of key value pair of symbol:weight.
"""
w = {}
# Get current weights
for symbol in self.symbols:
w[symbol] = self.share_percent(row, symbol)
# If direction is None, this set all to pf.Direction.LONG
if directions is None:
directions = {symbol:pf.Direction.LONG for symbol in self.symbols}
# Reverse sort by weights. We want current positions first so that
# if they need to reduced or closed out, then cash is freed for
# other symbols.
w = pf.sort_dict(w, reverse=True)
# Update weights with new values.
w.update(weights)
# Call adjust_percents() for each symbol.
for symbol in self.symbols:
price = prices[symbol]
weight = w[symbol]
direction = directions[symbol]
self.adjust_percent(date, price, weight, symbol, row, direction)
return w
def print_holdings(self, date, row):
"""
Print snapshot of portfolio holding and values.
Includes all symbols regardless of whether a symbol has shares
currently allocated to it.
Parameters
----------
date : str
The current date.
row : pd.Series
A row of data from the timeseries of the portfolio.
Returns
-------
None
"""
# 2010-02-01 SPY: 54 TLT: 59 GLD: 9 cash: 84.20 total: 9,872.30
print(date.strftime('%Y-%m-%d'), end=' ')
for symbol, tlog in pf.TradeLog.instance.items():
print('{}:{:3}'.format(symbol, tlog.shares), end=' ')
print('cash: {:8,.2f}'.format(pf.TradeLog.cash), end=' ')
print('total: {:9,.2f}'.format(self._equity(row)))
####################################################################
# LOGS (init_trade_logs, record_daily_balance, get_logs)
def init_trade_logs(self, ts):
"""
Add a trade log for each symbol.
Parameters
----------
ts : pd.DataFrame
The timeseries of the portfolio.
Returns
-------
None
"""
pf.TradeLog.seq_num = 0
pf.TradeLog.instance.clear()
self._ts = ts
for symbol in self.symbols:
pf.TradeLog(symbol, False)
def record_daily_balance(self, date, row):
"""
Append to daily balance list.
The portfolio version of this function uses closing values
for the daily high, low, and close.
Parameters
----------
date : str
The current date.
row : pd.Series
A row of data from the timeseries of the portfolio.
Returns
-------
None
"""
# calculate daily balance values: date, high, low, close,
# shares, cash
equity = self._equity(row)
leverage = self._leverage(row)
shares = 0
for tlog in pf.TradeLog.instance.values():
shares += tlog.shares
t = (date, equity, equity, equity, shares,
pf.TradeLog.cash, leverage)
self._l.append(t)
def get_logs(self):
"""
Return raw tradelog, tradelog, and daily balance log.
Parameters
----------
None
Returns
-------
rlog : pd.DataFrame
The raw trade log.
tlog : pd.DataFrame
The trade log.
dbal : pd.DataFrame
The daily balance log.
"""
tlogs = []; rlogs = []
for tlog in pf.TradeLog.instance.values():
rlogs.append(tlog.get_log_raw())
tlogs.append(tlog.get_log(merge_trades=False))
rlog = pd.concat([r for r in rlogs]).sort_values(['seq_num'],
ignore_index=True)
tlog = pd.concat([t for t in tlogs]).sort_values(['entry_date', 'exit_date'],
ignore_index=True)
tlog['cumul_total'] = tlog['pl_cash'].cumsum()
dbal = pf.DailyBal()
dbal._l = self._l
dbal = dbal.get_log(tlog)
return rlog, tlog, dbal
####################################################################
# PERFORMANCE ANALYSIS (performance_per_symbol, correlation_map)
def performance_per_symbol(self, weights):
"""
Returns performance per symbol data, also plots performance.
Parameters
----------
weights : dict of floats
A dictionary of weights with symbol as key.
Returns
-------
df : pd.DataFrame
The dataframe contains performance for each symbol in the
portfolio.
"""
def _weight(row, weights):
return weights[row.name]
def _currency(row):
return pf.currency(row['cumul_total'])
def _plot(df):
df = df[:-1]
# Make new figure and set the size.
fig = plt.figure(figsize=(12, 8))
axes = fig.add_subplot(111, ylabel='Percentages')
axes.set_title('Performance by Symbol')
df.plot(kind='bar', ax=axes)
axes.set_xticklabels(df.index, rotation=60)
plt.legend(loc='best')
# Convert dict to series.
s = pd.Series(dtype='object')
for symbol, tlog in pf.TradeLog.instance.items():
s[symbol] = tlog.cumul_total
# Convert series to dataframe.
df = pd.DataFrame(s.values, index=s.index, columns=['cumul_total'])
# Add weight column.
df['weight'] = df.apply(_weight, weights=weights, axis=1)
# Add percent column.
df['pct_cumul_total'] = df['cumul_total'] / df['cumul_total'].sum()
# Add relative preformance.
df['relative_performance'] = df['pct_cumul_total'] / df['weight']
# Add TOTAL row.
new_row = pd.Series(name='TOTAL',
data={'cumul_total':df['cumul_total'].sum(),
'pct_cumul_total': 1.00, 'weight': 1.00,
'relative_performance': 1.00})
df = df.append(new_row, ignore_index=False)
# Format as currency.
df['cumul_total'] = df.apply(_currency, axis=1)
# Plot bar graph of performance.
_plot(df)
return df
def correlation_map(self, ts, method='log', days=None):
"""
Show correlation map between symbols.
Parameters
----------
ts : pd.DataFrame
The timeseries of the portfolio.
method : str, optional {'price', 'log', 'returns'}
Analysis done based on specified method (default is 'log').
days : int
How many days to use for correlation (default is None,
which implies all days.
Returns
-------
df : pd.DataFrame
The dataframe contains the correlation data for each symbol
in the portfolio.
"""
# Filter coloumn names for ''_close''; drop '_close' suffix.
df = ts.filter(regex='_close')
df.columns = df.columns.str.strip('_close')
# Default is all days.
if days is None:
days = 0;
df = df[-days:]
if method == 'price':
pass
elif method == 'log':
df = np.log(df.pct_change()+1)
elif method == 'returns':
df = df.pct_change()
df = df.corr(method='pearson')
# Reset symbol as index (rather than 0-X).
df.head().reset_index()
# Take the bottom triangle since it repeats itself.
mask = np.zeros_like(df)
mask[np.triu_indices_from(mask)] = True
# Generate plot.
seaborn.heatmap(df, cmap='RdYlGn', vmax=1.0, vmin=-1.0 ,
mask = mask, linewidths=2.5)
plt.yticks(rotation=0)
plt.xticks(rotation=90)
return df
| mit |
MSeifert04/numpy | numpy/core/code_generators/ufunc_docstrings.py | 3 | 100671 | """
Docstrings for generated ufuncs
The syntax is designed to look like the function add_newdoc is being
called from numpy.lib, but in this file add_newdoc puts the docstrings
in a dictionary. This dictionary is used in
numpy/core/code_generators/generate_umath.py to generate the docstrings
for the ufuncs in numpy.core at the C level when the ufuncs are created
at compile time.
"""
from __future__ import division, absolute_import, print_function
import textwrap
docdict = {}
def get(name):
return docdict.get(name)
# common parameter text to all ufuncs
subst = {
'PARAMS': textwrap.dedent("""
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
where : array_like, optional
This condition is broadcast over the input. At locations where the
condition is True, the `out` array will be set to the ufunc result.
Elsewhere, the `out` array will retain its original value.
Note that if an uninitialized `out` array is created via the default
``out=None``, locations within it where the condition is False will
remain uninitialized.
**kwargs
For other keyword-only arguments, see the
:ref:`ufunc docs <ufuncs.kwargs>`.
""").strip(),
'BROADCASTABLE_2': ("If ``x1.shape != x2.shape``, they must be "
"broadcastable to a common shape (which becomes the "
"shape of the output)."),
'OUT_SCALAR_1': "This is a scalar if `x` is a scalar.",
'OUT_SCALAR_2': "This is a scalar if both `x1` and `x2` are scalars.",
}
def add_newdoc(place, name, doc):
doc = textwrap.dedent(doc).strip()
skip = (
# gufuncs do not use the OUT_SCALAR replacement strings
'matmul',
# clip has 3 inputs, which is not handled by this
'clip',
)
if name[0] != '_' and name not in skip:
if '\nx :' in doc:
assert '$OUT_SCALAR_1' in doc, "in {}".format(name)
elif '\nx2 :' in doc or '\nx1, x2 :' in doc:
assert '$OUT_SCALAR_2' in doc, "in {}".format(name)
else:
assert False, "Could not detect number of inputs in {}".format(name)
for k, v in subst.items():
doc = doc.replace('$' + k, v)
docdict['.'.join((place, name))] = doc
add_newdoc('numpy.core.umath', 'absolute',
"""
Calculate the absolute value element-wise.
``np.abs`` is a shorthand for this function.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. For complex input, ``a + ib``, the
absolute value is :math:`\\sqrt{ a^2 + b^2 }`.
$OUT_SCALAR_1
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
>>> np.absolute(1.2 + 1j)
1.5620499351813308
Plot the function over ``[-10, 10]``:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(start=-10, stop=10, num=101)
>>> plt.plot(x, np.absolute(x))
>>> plt.show()
Plot the function over the complex plane:
>>> xx = x + 1j * x[:, np.newaxis]
>>> plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10], cmap='gray')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'add',
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be added. $BROADCASTABLE_2
$PARAMS
Returns
-------
add : ndarray or scalar
The sum of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
Notes
-----
Equivalent to `x1` + `x2` in terms of array broadcasting.
Examples
--------
>>> np.add(1.0, 4.0)
5.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.add(x1, x2)
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
""")
add_newdoc('numpy.core.umath', 'arccos',
"""
Trigonometric inverse cosine, element-wise.
The inverse of `cos` so that, if ``y = cos(x)``, then ``x = arccos(y)``.
Parameters
----------
x : array_like
`x`-coordinate on the unit circle.
For real arguments, the domain is [-1, 1].
$PARAMS
Returns
-------
angle : ndarray
The angle of the ray intersecting the unit circle at the given
`x`-coordinate in radians [0, pi].
$OUT_SCALAR_1
See Also
--------
cos, arctan, arcsin, emath.arccos
Notes
-----
`arccos` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cos(z) = x`. The convention is to return
the angle `z` whose real part lies in `[0, pi]`.
For real-valued input data types, `arccos` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytic function that
has branch cuts `[-inf, -1]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse `cos` is also known as `acos` or cos^-1.
References
----------
M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arccos of 1 to be 0, and of -1 to be pi:
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
Plot arccos:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-1, 1, num=100)
>>> plt.plot(x, np.arccos(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arccosh',
"""
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
arccosh : ndarray
Array of the same shape as `x`.
$OUT_SCALAR_1
See Also
--------
cosh, arcsinh, sinh, arctanh, tanh
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]` and the real part in
``[0, inf]``.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccosh` is a complex analytical function that
has a branch cut `[-inf, 1]` and is continuous from above on it.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
https://en.wikipedia.org/wiki/Arccosh
Examples
--------
>>> np.arccosh([np.e, 10.0])
array([ 1.65745445, 2.99322285])
>>> np.arccosh(1)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsin',
"""
Inverse sine, element-wise.
Parameters
----------
x : array_like
`y`-coordinate on the unit circle.
$PARAMS
Returns
-------
angle : ndarray
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``.
$OUT_SCALAR_1
See Also
--------
sin, cos, arccos, tan, arctan, arctan2, emath.arcsin
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arcsin` is a complex analytic function that
has, by convention, the branch cuts [-inf, -1] and [1, inf] and is
continuous from above on the former and from below on the latter.
The inverse sine is also known as `asin` or sin^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsinh',
"""
Inverse hyperbolic sine element-wise.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
out : ndarray or scalar
Array of the same shape as `x`.
$OUT_SCALAR_1
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
returns ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytical function that
has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from
the right on the former and from the left on the latter.
The inverse hyperbolic sine is also known as `asinh` or ``sinh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
https://en.wikipedia.org/wiki/Arcsinh
Examples
--------
>>> np.arcsinh(np.array([np.e, 10.0]))
array([ 1.72538256, 2.99822295])
""")
add_newdoc('numpy.core.umath', 'arctan',
"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : array_like
$PARAMS
Returns
-------
out : ndarray or scalar
Out has the same shape as `x`. Its real part is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
$OUT_SCALAR_1
See Also
--------
arctan2 : The "four quadrant" arctan of the angle formed by (`x`, `y`)
and the positive `x`-axis.
angle : Argument of complex values.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctan` is a complex analytic function that
has [`1j, infj`] and [`-1j, -infj`] as branch cuts, and is continuous
from the left on the former and from the right on the latter.
The inverse tangent is also known as `atan` or tan^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arctan of 0 to be 0, and of 1 to be pi/4:
>>> np.arctan([0, 1])
array([ 0. , 0.78539816])
>>> np.pi/4
0.78539816339744828
Plot arctan:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10)
>>> plt.plot(x, np.arctan(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arctan2',
"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : array_like, real-valued
`y`-coordinates.
x2 : array_like, real-valued
`x`-coordinates. $BROADCASTABLE_2
$PARAMS
Returns
-------
angle : ndarray
Array of angles in radians, in the range ``[-pi, pi]``.
$OUT_SCALAR_2
See Also
--------
arctan, tan, angle
Notes
-----
*arctan2* is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> np.arctan2([1., -1.], [0., 0.])
array([ 1.57079633, -1.57079633])
>>> np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
array([ 0. , 3.14159265, 0.78539816])
""")
add_newdoc('numpy.core.umath', '_arg',
"""
DO NOT USE, ONLY FOR TESTING
""")
add_newdoc('numpy.core.umath', 'arctanh',
"""
Inverse hyperbolic tangent element-wise.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
out : ndarray or scalar
Array of the same shape as `x`.
$OUT_SCALAR_1
See Also
--------
emath.arctanh
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`. The convention is to return
the `z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctanh` is a complex analytical function
that has branch cuts `[-1, -inf]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse hyperbolic tangent is also known as `atanh` or ``tanh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
https://en.wikipedia.org/wiki/Arctanh
Examples
--------
>>> np.arctanh([0, -0.5])
array([ 0. , -0.54930614])
""")
add_newdoc('numpy.core.umath', 'bitwise_and',
"""
Compute the bit-wise AND of two arrays element-wise.
Computes the bit-wise AND of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``&``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled. $BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Result.
$OUT_SCALAR_2
See Also
--------
logical_and
bitwise_or
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise AND of 13 and 17 is
therefore ``000000001``, or 1:
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.binary_repr(12)
'1100'
>>> np.bitwise_and([14,3], 13)
array([12, 1])
>>> np.bitwise_and([11,7], [4,25])
array([0, 1])
>>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16]))
array([ 2, 4, 16])
>>> np.bitwise_and([True, True], [False, True])
array([False, True])
""")
add_newdoc('numpy.core.umath', 'bitwise_or',
"""
Compute the bit-wise OR of two arrays element-wise.
Computes the bit-wise OR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``|``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled. $BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Result.
$OUT_SCALAR_2
See Also
--------
logical_or
bitwise_and
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 has the binaray representation ``00001101``. Likewise,
16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is
then ``000111011``, or 29:
>>> np.bitwise_or(13, 16)
29
>>> np.binary_repr(29)
'11101'
>>> np.bitwise_or(32, 2)
34
>>> np.bitwise_or([33, 4], 1)
array([33, 5])
>>> np.bitwise_or([33, 4], [1, 2])
array([33, 6])
>>> np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4]))
array([ 6, 5, 255])
>>> np.array([2, 5, 255]) | np.array([4, 4, 4])
array([ 6, 5, 255])
>>> np.bitwise_or(np.array([2, 5, 255, 2147483647], dtype=np.int32),
... np.array([4, 4, 4, 2147483647], dtype=np.int32))
array([ 6, 5, 255, 2147483647])
>>> np.bitwise_or([True, True], [False, True])
array([ True, True])
""")
add_newdoc('numpy.core.umath', 'bitwise_xor',
"""
Compute the bit-wise XOR of two arrays element-wise.
Computes the bit-wise XOR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``^``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled. $BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Result.
$OUT_SCALAR_2
See Also
--------
logical_xor
bitwise_and
bitwise_or
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise XOR of 13 and 17 is
therefore ``00011100``, or 28:
>>> np.bitwise_xor(13, 17)
28
>>> np.binary_repr(28)
'11100'
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor([31,3], 5)
array([26, 6])
>>> np.bitwise_xor([31,3], [5,6])
array([26, 5])
>>> np.bitwise_xor([True, True], [False, True])
array([ True, False])
""")
add_newdoc('numpy.core.umath', 'ceil',
"""
Return the ceiling of the input, element-wise.
The ceil of the scalar `x` is the smallest integer `i`, such that
`i >= x`. It is often denoted as :math:`\\lceil x \\rceil`.
Parameters
----------
x : array_like
Input data.
$PARAMS
Returns
-------
y : ndarray or scalar
The ceiling of each element in `x`, with `float` dtype.
$OUT_SCALAR_1
See Also
--------
floor, trunc, rint
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'trunc',
"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : array_like
Input data.
$PARAMS
Returns
-------
y : ndarray or scalar
The truncated value of each element in `x`.
$OUT_SCALAR_1
See Also
--------
ceil, floor, rint
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'conjugate',
"""
Return the complex conjugate, element-wise.
The complex conjugate of a complex number is obtained by changing the
sign of its imaginary part.
Parameters
----------
x : array_like
Input value.
$PARAMS
Returns
-------
y : ndarray
The complex conjugate of `x`, with same dtype as `y`.
$OUT_SCALAR_1
Notes
-----
`conj` is an alias for `conjugate`:
>>> np.conj is np.conjugate
True
Examples
--------
>>> np.conjugate(1+2j)
(1-2j)
>>> x = np.eye(2) + 1j * np.eye(2)
>>> np.conjugate(x)
array([[ 1.-1.j, 0.-0.j],
[ 0.-0.j, 1.-1.j]])
""")
add_newdoc('numpy.core.umath', 'cos',
"""
Cosine element-wise.
Parameters
----------
x : array_like
Input array in radians.
$PARAMS
Returns
-------
y : ndarray
The corresponding cosine values.
$OUT_SCALAR_1
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00])
>>>
>>> # Example of providing the optional output parameter
>>> out1 = np.array([0], dtype='d')
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
add_newdoc('numpy.core.umath', 'cosh',
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
out : ndarray or scalar
Output array of same shape as `x`.
$OUT_SCALAR_1
Examples
--------
>>> np.cosh(0)
1.0
The hyperbolic cosine describes the shape of a hanging cable:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-4, 4, 1000)
>>> plt.plot(x, np.cosh(x))
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'degrees',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Input array in radians.
$PARAMS
Returns
-------
y : ndarray of floats
The corresponding degree values; if `out` was supplied this is a
reference to it.
$OUT_SCALAR_1
See Also
--------
rad2deg : equivalent function
Examples
--------
Convert a radian array to degrees
>>> rad = np.arange(12.)*np.pi/6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240.,
270., 300., 330.])
>>> out = np.zeros((rad.shape))
>>> r = np.degrees(rad, out)
>>> np.all(r == out)
True
""")
add_newdoc('numpy.core.umath', 'rad2deg',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Angle in radians.
$PARAMS
Returns
-------
y : ndarray
The corresponding angle in degrees.
$OUT_SCALAR_1
See Also
--------
deg2rad : Convert angles from degrees to radians.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
rad2deg(x) is ``180 * x / pi``.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
""")
add_newdoc('numpy.core.umath', 'heaviside',
"""
Compute the Heaviside step function.
The Heaviside step function is defined as::
0 if x1 < 0
heaviside(x1, x2) = x2 if x1 == 0
1 if x1 > 0
where `x2` is often taken to be 0.5, but 0 and 1 are also sometimes used.
Parameters
----------
x1 : array_like
Input values.
x2 : array_like
The value of the function when x1 is 0. $BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
The output array, element-wise Heaviside step function of `x1`.
$OUT_SCALAR_2
Notes
-----
.. versionadded:: 1.13.0
References
----------
.. Wikipedia, "Heaviside step function",
https://en.wikipedia.org/wiki/Heaviside_step_function
Examples
--------
>>> np.heaviside([-1.5, 0, 2.0], 0.5)
array([ 0. , 0.5, 1. ])
>>> np.heaviside([-1.5, 0, 2.0], 1)
array([ 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'divide',
"""
Divide arguments element-wise.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The quotient ``x1/x2``, element-wise.
$OUT_SCALAR_2
See Also
--------
seterr : Set whether to raise or warn on overflow, underflow and
division by zero.
Notes
-----
Equivalent to ``x1`` / ``x2`` in terms of array-broadcasting.
Behavior on division by zero can be changed using ``seterr``.
In Python 2, when both ``x1`` and ``x2`` are of an integer type,
``divide`` will behave like ``floor_divide``. In Python 3, it behaves
like ``true_divide``.
Examples
--------
>>> np.divide(2.0, 4.0)
0.5
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.divide(x1, x2)
array([[ NaN, 1. , 1. ],
[ Inf, 4. , 2.5],
[ Inf, 7. , 4. ]])
Note the behavior with integer types (Python 2 only):
>>> np.divide(2, 4)
0
>>> np.divide(2, 4.)
0.5
Division by zero always yields zero in integer arithmetic (again,
Python 2 only), and does not raise an exception or a warning:
>>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int))
array([0, 0])
Division by zero can, however, be caught using ``seterr``:
>>> old_err_state = np.seterr(divide='raise')
>>> np.divide(1, 0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: divide by zero encountered in divide
>>> ignored_states = np.seterr(**old_err_state)
>>> np.divide(1, 0)
0
""")
add_newdoc('numpy.core.umath', 'equal',
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. $BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal([0, 1, 3], np.arange(3))
array([ True, True, False])
What is compared are values, not types. So an int (1) and an array of
length one can evaluate as True:
>>> np.equal(1, np.ones(1))
array([ True])
""")
add_newdoc('numpy.core.umath', 'exp',
"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise exponential of `x`.
$OUT_SCALAR_1
See Also
--------
expm1 : Calculate ``exp(x) - 1`` for all elements in the array.
exp2 : Calculate ``2**x`` for all elements in the array.
Notes
-----
The irrational number ``e`` is also known as Euler's number. It is
approximately 2.718281, and is the base of the natural logarithm,
``ln`` (this means that, if :math:`x = \\ln y = \\log_e y`,
then :math:`e^x = y`. For real input, ``exp(x)`` is always positive.
For complex arguments, ``x = a + ib``, we can write
:math:`e^x = e^a e^{ib}`. The first term, :math:`e^a`, is already
known (it is the real argument, described above). The second term,
:math:`e^{ib}`, is :math:`\\cos b + i \\sin b`, a function with
magnitude 1 and a periodic phase.
References
----------
.. [1] Wikipedia, "Exponential function",
https://en.wikipedia.org/wiki/Exponential_function
.. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions
with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69,
http://www.math.sfu.ca/~cbm/aands/page_69.htm
Examples
--------
Plot the magnitude and phase of ``exp(x)`` in the complex plane:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2*np.pi, 2*np.pi, 100)
>>> xx = x + 1j * x[:, np.newaxis] # a + ib over complex plane
>>> out = np.exp(xx)
>>> plt.subplot(121)
>>> plt.imshow(np.abs(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi], cmap='gray')
>>> plt.title('Magnitude of exp(x)')
>>> plt.subplot(122)
>>> plt.imshow(np.angle(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi], cmap='hsv')
>>> plt.title('Phase (angle) of exp(x)')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'exp2',
"""
Calculate `2**p` for all `p` in the input array.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
out : ndarray or scalar
Element-wise 2 to the power `x`.
$OUT_SCALAR_1
See Also
--------
power
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> np.exp2([2, 3])
array([ 4., 8.])
""")
add_newdoc('numpy.core.umath', 'expm1',
"""
Calculate ``exp(x) - 1`` for all elements in the array.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
out : ndarray or scalar
Element-wise exponential minus one: ``out = exp(x) - 1``.
$OUT_SCALAR_1
See Also
--------
log1p : ``log(1 + x)``, the inverse of expm1.
Notes
-----
This function provides greater precision than ``exp(x) - 1``
for small values of ``x``.
Examples
--------
The true value of ``exp(1e-10) - 1`` is ``1.00000000005e-10`` to
about 32 significant digits. This example shows the superiority of
expm1 in this case.
>>> np.expm1(1e-10)
1.00000000005e-10
>>> np.exp(1e-10) - 1
1.000000082740371e-10
""")
add_newdoc('numpy.core.umath', 'fabs',
"""
Compute the absolute values element-wise.
This function returns the absolute values (positive magnitude) of the
data in `x`. Complex values are not handled, use `absolute` to find the
absolute values of complex data.
Parameters
----------
x : array_like
The array of numbers for which the absolute values are required. If
`x` is a scalar, the result `y` will also be a scalar.
$PARAMS
Returns
-------
y : ndarray or scalar
The absolute values of `x`, the returned values are always floats.
$OUT_SCALAR_1
See Also
--------
absolute : Absolute values including `complex` types.
Examples
--------
>>> np.fabs(-1)
1.0
>>> np.fabs([-1.2, 1.2])
array([ 1.2, 1.2])
""")
add_newdoc('numpy.core.umath', 'floor',
"""
Return the floor of the input, element-wise.
The floor of the scalar `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`.
Parameters
----------
x : array_like
Input data.
$PARAMS
Returns
-------
y : ndarray or scalar
The floor of each element in `x`.
$OUT_SCALAR_1
See Also
--------
ceil, trunc, rint
Notes
-----
Some spreadsheet programs calculate the "floor-towards-zero", in other
words ``floor(-2.5) == -2``. NumPy instead uses the definition of
`floor` where `floor(-2.5) == -3`.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'floor_divide',
"""
Return the largest integer smaller or equal to the division of the inputs.
It is equivalent to the Python ``//`` operator and pairs with the
Python ``%`` (`remainder`), function so that ``a = a % b + b * (a // b)``
up to roundoff.
Parameters
----------
x1 : array_like
Numerator.
x2 : array_like
Denominator. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
y = floor(`x1`/`x2`)
$OUT_SCALAR_2
See Also
--------
remainder : Remainder complementary to floor_divide.
divmod : Simultaneous floor division and remainder.
divide : Standard division.
floor : Round a number to the nearest integer toward minus infinity.
ceil : Round a number to the nearest integer toward infinity.
Examples
--------
>>> np.floor_divide(7,3)
2
>>> np.floor_divide([1., 2., 3., 4.], 2.5)
array([ 0., 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'fmod',
"""
Return the element-wise remainder of division.
This is the NumPy implementation of the C library function fmod, the
remainder has the same sign as the dividend `x1`. It is equivalent to
the Matlab(TM) ``rem`` function and should not be confused with the
Python modulus operator ``x1 % x2``.
Parameters
----------
x1 : array_like
Dividend.
x2 : array_like
Divisor. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : array_like
The remainder of the division of `x1` by `x2`.
$OUT_SCALAR_2
See Also
--------
remainder : Equivalent to the Python ``%`` operator.
divide
Notes
-----
The result of the modulo operation for negative dividend and divisors
is bound by conventions. For `fmod`, the sign of result is the sign of
the dividend, while for `remainder` the sign of the result is the sign
of the divisor. The `fmod` function is equivalent to the Matlab(TM)
``rem`` function.
Examples
--------
>>> np.fmod([-3, -2, -1, 1, 2, 3], 2)
array([-1, 0, -1, 1, 0, 1])
>>> np.remainder([-3, -2, -1, 1, 2, 3], 2)
array([1, 0, 1, 1, 0, 1])
>>> np.fmod([5, 3], [2, 2.])
array([ 1., 1.])
>>> a = np.arange(-3, 3).reshape(3, 2)
>>> a
array([[-3, -2],
[-1, 0],
[ 1, 2]])
>>> np.fmod(a, [2,2])
array([[-1, 0],
[-1, 0],
[ 1, 0]])
""")
add_newdoc('numpy.core.umath', 'greater',
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. $BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
greater_equal, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater([4,2],[2,2])
array([ True, False])
If the inputs are ndarrays, then np.greater is equivalent to '>'.
>>> a = np.array([4,2])
>>> b = np.array([2,2])
>>> a > b
array([ True, False])
""")
add_newdoc('numpy.core.umath', 'greater_equal',
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. $BROADCASTABLE_2
$PARAMS
Returns
-------
out : bool or ndarray of bool
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
greater, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater_equal([4, 2, 1], [2, 2, 2])
array([ True, True, False])
""")
add_newdoc('numpy.core.umath', 'hypot',
"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
(See Examples)
Parameters
----------
x1, x2 : array_like
Leg of the triangle(s). $BROADCASTABLE_2
$PARAMS
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
$OUT_SCALAR_2
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
""")
add_newdoc('numpy.core.umath', 'invert',
"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
For signed integer inputs, the two's complement is returned. In a
two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit
two's-complement system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
x : array_like
Only integer and boolean types are handled.
$PARAMS
Returns
-------
out : ndarray or scalar
Result.
$OUT_SCALAR_1
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
References
----------
.. [1] Wikipedia, "Two's complement",
https://en.wikipedia.org/wiki/Two's_complement
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> x = np.invert(np.array(13, dtype=np.uint8))
>>> x
242
>>> np.binary_repr(x, width=8)
'11110010'
The result depends on the bit-width:
>>> x = np.invert(np.array(13, dtype=np.uint16))
>>> x
65522
>>> np.binary_repr(x, width=16)
'1111111111110010'
When using signed integer types the result is the two's complement of
the result for the unsigned type:
>>> np.invert(np.array([13], dtype=np.int8))
array([-14], dtype=int8)
>>> np.binary_repr(-14, width=8)
'11110010'
Booleans are accepted as well:
>>> np.invert(np.array([True, False]))
array([False, True])
""")
add_newdoc('numpy.core.umath', 'isfinite',
"""
Test element-wise for finiteness (not infinity or not Not a Number).
The result is returned as a boolean array.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
y : ndarray, bool
True where ``x`` is not positive infinity, negative infinity,
or NaN; false otherwise.
$OUT_SCALAR_1
See Also
--------
isinf, isneginf, isposinf, isnan
Notes
-----
Not a Number, positive infinity and negative infinity are considered
to be non-finite.
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity. Errors result if the
second argument is also supplied when `x` is a scalar input, or if
first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(np.NINF)
False
>>> np.isfinite([np.log(-1.),1.,np.log(0)])
array([False, True, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isfinite(x, y)
array([0, 1, 0])
>>> y
array([0, 1, 0])
""")
add_newdoc('numpy.core.umath', 'isinf',
"""
Test element-wise for positive or negative infinity.
Returns a boolean array of the same shape as `x`, True where ``x ==
+/-inf``, otherwise False.
Parameters
----------
x : array_like
Input values
$PARAMS
Returns
-------
y : bool (scalar) or boolean ndarray
True where ``x`` is positive or negative infinity, false otherwise.
$OUT_SCALAR_1
See Also
--------
isneginf, isposinf, isnan, isfinite
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
Errors result if the second argument is supplied when the first
argument is a scalar, or if the first and second arguments have
different shapes.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.NINF)
True
>>> np.isinf([np.inf, -np.inf, 1.0, np.nan])
array([ True, True, False, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isinf(x, y)
array([1, 0, 1])
>>> y
array([1, 0, 1])
""")
add_newdoc('numpy.core.umath', 'isnan',
"""
Test element-wise for NaN and return result as a boolean array.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y : ndarray or bool
True where ``x`` is NaN, false otherwise.
$OUT_SCALAR_1
See Also
--------
isinf, isneginf, isposinf, isfinite, isnat
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan([np.log(-1.),1.,np.log(0)])
array([ True, False, False])
""")
add_newdoc('numpy.core.umath', 'isnat',
"""
Test element-wise for NaT (not a time) and return result as a boolean array.
.. versionadded:: 1.13.0
Parameters
----------
x : array_like
Input array with datetime or timedelta data type.
$PARAMS
Returns
-------
y : ndarray or bool
True where ``x`` is NaT, false otherwise.
$OUT_SCALAR_1
See Also
--------
isnan, isinf, isneginf, isposinf, isfinite
Examples
--------
>>> np.isnat(np.datetime64("NaT"))
True
>>> np.isnat(np.datetime64("2016-01-01"))
False
>>> np.isnat(np.array(["NaT", "2016-01-01"], dtype="datetime64[ns]"))
array([ True, False])
""")
add_newdoc('numpy.core.umath', 'left_shift',
"""
Shift the bits of an integer to the left.
Bits are shifted to the left by appending `x2` 0s at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to multiplying `x1` by ``2**x2``.
Parameters
----------
x1 : array_like of integer type
Input values.
x2 : array_like of integer type
Number of zeros to append to `x1`. Has to be non-negative.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : array of integer type
Return `x1` with bits shifted `x2` times to the left.
$OUT_SCALAR_2
See Also
--------
right_shift : Shift the bits of an integer to the right.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(5)
'101'
>>> np.left_shift(5, 2)
20
>>> np.binary_repr(20)
'10100'
>>> np.left_shift(5, [1,2,3])
array([10, 20, 40])
""")
add_newdoc('numpy.core.umath', 'less',
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. $BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
greater, less_equal, greater_equal, equal, not_equal
Examples
--------
>>> np.less([1, 2], [2, 2])
array([ True, False])
""")
add_newdoc('numpy.core.umath', 'less_equal',
"""
Return the truth value of (x1 =< x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. $BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
greater, less, greater_equal, equal, not_equal
Examples
--------
>>> np.less_equal([4, 2, 1], [2, 2, 2])
array([False, True, True])
""")
add_newdoc('numpy.core.umath', 'log',
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base
`e`.
Parameters
----------
x : array_like
Input value.
$PARAMS
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
$OUT_SCALAR_1
See Also
--------
log10, log2, log1p, emath.log
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". https://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log([1, np.e, np.e**2, 0])
array([ 0., 1., 2., -Inf])
""")
add_newdoc('numpy.core.umath', 'log10',
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
y : ndarray
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative.
$OUT_SCALAR_1
See Also
--------
emath.log10
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `10**z = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log10` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log10` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it.
`log10` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". https://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log10([1e-15, -3.])
array([-15., nan])
""")
add_newdoc('numpy.core.umath', 'log2',
"""
Base-2 logarithm of `x`.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
y : ndarray
Base-2 logarithm of `x`.
$OUT_SCALAR_1
See Also
--------
log, log10, log1p, emath.log2
Notes
-----
.. versionadded:: 1.3.0
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `2**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log2` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log2` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log2`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-Inf, 0., 1., 4.])
>>> xi = np.array([0+1.j, 1, 2+0.j, 4.j])
>>> np.log2(xi)
array([ 0.+2.26618007j, 0.+0.j , 1.+0.j , 2.+2.26618007j])
""")
add_newdoc('numpy.core.umath', 'logaddexp',
"""
Logarithm of the sum of exponentiations of the inputs.
Calculates ``log(exp(x1) + exp(x2))``. This function is useful in
statistics where the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the logarithm of the calculated probability is stored. This function
allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values. $BROADCASTABLE_2
$PARAMS
Returns
-------
result : ndarray
Logarithm of ``exp(x1) + exp(x2)``.
$OUT_SCALAR_2
See Also
--------
logaddexp2: Logarithm of the sum of exponentiations of inputs in base 2.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log(1e-50)
>>> prob2 = np.log(2.5e-50)
>>> prob12 = np.logaddexp(prob1, prob2)
>>> prob12
-113.87649168120691
>>> np.exp(prob12)
3.5000000000000057e-50
""")
add_newdoc('numpy.core.umath', 'logaddexp2',
"""
Logarithm of the sum of exponentiations of the inputs in base-2.
Calculates ``log2(2**x1 + 2**x2)``. This function is useful in machine
learning when the calculated probabilities of events may be so small as
to exceed the range of normal floating point numbers. In such cases
the base-2 logarithm of the calculated probability can be used instead.
This function allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values. $BROADCASTABLE_2
$PARAMS
Returns
-------
result : ndarray
Base-2 logarithm of ``2**x1 + 2**x2``.
$OUT_SCALAR_2
See Also
--------
logaddexp: Logarithm of the sum of exponentiations of the inputs.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log2(1e-50)
>>> prob2 = np.log2(2.5e-50)
>>> prob12 = np.logaddexp2(prob1, prob2)
>>> prob1, prob2, prob12
(-166.09640474436813, -164.77447664948076, -164.28904982231052)
>>> 2**prob12
3.4999999999999914e-50
""")
add_newdoc('numpy.core.umath', 'log1p',
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
y : ndarray
Natural logarithm of `1 + x`, element-wise.
$OUT_SCALAR_1
See Also
--------
expm1 : ``exp(x) - 1``, the inverse of `log1p`.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log1p` is a complex analytical function that
has a branch cut `[-inf, -1]` and is continuous from above on it.
`log1p` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". https://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> np.log(1 + 1e-99)
0.0
""")
add_newdoc('numpy.core.umath', 'logical_and',
"""
Compute the truth value of x1 AND x2 element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or bool
Boolean result of the logical OR operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
$OUT_SCALAR_2
See Also
--------
logical_or, logical_not, logical_xor
bitwise_and
Examples
--------
>>> np.logical_and(True, False)
False
>>> np.logical_and([True, False], [False, False])
array([False, False])
>>> x = np.arange(5)
>>> np.logical_and(x>1, x<4)
array([False, False, True, True, False])
""")
add_newdoc('numpy.core.umath', 'logical_not',
"""
Compute the truth value of NOT x element-wise.
Parameters
----------
x : array_like
Logical NOT is applied to the elements of `x`.
$PARAMS
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
$OUT_SCALAR_1
See Also
--------
logical_and, logical_or, logical_xor
Examples
--------
>>> np.logical_not(3)
False
>>> np.logical_not([True, False, 0, 1])
array([False, True, True, False])
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True])
""")
add_newdoc('numpy.core.umath', 'logical_or',
"""
Compute the truth value of x1 OR x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical OR is applied to the elements of `x1` and `x2`.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or bool
Boolean result of the logical OR operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
$OUT_SCALAR_2
See Also
--------
logical_and, logical_not, logical_xor
bitwise_or
Examples
--------
>>> np.logical_or(True, False)
True
>>> np.logical_or([True, False], [False, False])
array([ True, False])
>>> x = np.arange(5)
>>> np.logical_or(x < 1, x > 3)
array([ True, False, False, False, True])
""")
add_newdoc('numpy.core.umath', 'logical_xor',
"""
Compute the truth value of x1 XOR x2, element-wise.
Parameters
----------
x1, x2 : array_like
Logical XOR is applied to the elements of `x1` and `x2`. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : bool or ndarray of bool
Boolean result of the logical XOR operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
$OUT_SCALAR_2
See Also
--------
logical_and, logical_or, logical_not, bitwise_xor
Examples
--------
>>> np.logical_xor(True, False)
True
>>> np.logical_xor([True, True, False, False], [True, False, True, False])
array([False, True, True, False])
>>> x = np.arange(5)
>>> np.logical_xor(x < 1, x > 3)
array([ True, False, False, False, True])
Simple example showing support of broadcasting
>>> np.logical_xor(0, np.eye(2))
array([[ True, False],
[False, True]])
""")
add_newdoc('numpy.core.umath', 'maximum',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The maximum of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
See Also
--------
minimum :
Element-wise minimum of two arrays, propagates NaNs.
fmax :
Element-wise maximum of two arrays, ignores NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
fmin, amin, nanmin
Notes
-----
The maximum is equivalent to ``np.where(x1 >= x2, x1, x2)`` when
neither x1 nor x2 are nans, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.maximum([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.maximum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan])
array([nan, nan, nan])
>>> np.maximum(np.Inf, 1)
inf
""")
add_newdoc('numpy.core.umath', 'minimum',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The minimum of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
See Also
--------
maximum :
Element-wise maximum of two arrays, propagates NaNs.
fmin :
Element-wise minimum of two arrays, ignores NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
fmax, amax, nanmax
Notes
-----
The minimum is equivalent to ``np.where(x1 <= x2, x1, x2)`` when
neither x1 nor x2 are NaNs, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.minimum([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.minimum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([nan, nan, nan])
>>> np.minimum(-np.Inf, 1)
-inf
""")
add_newdoc('numpy.core.umath', 'fmax',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The maximum of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
See Also
--------
fmin :
Element-wise minimum of two arrays, ignores NaNs.
maximum :
Element-wise maximum of two arrays, propagates NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
minimum, amin, nanmin
Notes
-----
.. versionadded:: 1.3.0
The fmax is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmax([2, 3, 4], [1, 5, 2])
array([ 2., 5., 4.])
>>> np.fmax(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., nan])
""")
add_newdoc('numpy.core.umath', 'fmin',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The minimum of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
See Also
--------
fmax :
Element-wise maximum of two arrays, ignores NaNs.
minimum :
Element-wise minimum of two arrays, propagates NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
maximum, amax, nanmax
Notes
-----
.. versionadded:: 1.3.0
The fmin is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmin([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.fmin(np.eye(2), [0.5, 2])
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., nan])
""")
add_newdoc('numpy.core.umath', 'clip',
"""
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Equivalent to but faster than ``np.minimum(np.maximum(a, a_min), a_max)``.
Parameters
----------
a : array_like
Array containing elements to clip.
a_min : array_like
Minimum value.
a_max : array_like
Maximum value.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
$PARAMS
See Also
--------
numpy.clip :
Wrapper that makes the ``a_min`` and ``a_max`` arguments optional,
dispatching to one of `~numpy.core.umath.clip`,
`~numpy.core.umath.minimum`, and `~numpy.core.umath.maximum`.
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
""")
add_newdoc('numpy.core.umath', 'matmul',
"""
Matrix product of two arrays.
Parameters
----------
x1, x2 : array_like
Input arrays, scalars not allowed.
out : ndarray, optional
A location into which the result is stored. If provided, it must have
a shape that matches the signature `(n,k),(k,m)->(n,m)`. If not
provided or `None`, a freshly-allocated array is returned.
**kwargs
For other keyword-only arguments, see the
:ref:`ufunc docs <ufuncs.kwargs>`.
.. versionadded:: 1.16
Now handles ufunc kwargs
Returns
-------
y : ndarray
The matrix product of the inputs.
This is a scalar only when both x1, x2 are 1-d vectors.
Raises
------
ValueError
If the last dimension of `a` is not the same size as
the second-to-last dimension of `b`.
If a scalar value is passed in.
See Also
--------
vdot : Complex-conjugating dot product.
tensordot : Sum products over arbitrary axes.
einsum : Einstein summation convention.
dot : alternative matrix product with different broadcasting rules.
Notes
-----
The behavior depends on the arguments in the following way.
- If both arguments are 2-D they are multiplied like conventional
matrices.
- If either argument is N-D, N > 2, it is treated as a stack of
matrices residing in the last two indexes and broadcast accordingly.
- If the first argument is 1-D, it is promoted to a matrix by
prepending a 1 to its dimensions. After matrix multiplication
the prepended 1 is removed.
- If the second argument is 1-D, it is promoted to a matrix by
appending a 1 to its dimensions. After matrix multiplication
the appended 1 is removed.
``matmul`` differs from ``dot`` in two important ways:
- Multiplication by scalars is not allowed, use ``*`` instead.
- Stacks of matrices are broadcast together as if the matrices
were elements, respecting the signature ``(n,k),(k,m)->(n,m)``:
>>> a = np.ones([9, 5, 7, 4])
>>> c = np.ones([9, 5, 4, 3])
>>> np.dot(a, c).shape
(9, 5, 7, 9, 5, 3)
>>> np.matmul(a, c).shape
(9, 5, 7, 3)
>>> # n is 7, k is 4, m is 3
The matmul function implements the semantics of the `@` operator introduced
in Python 3.5 following PEP465.
Examples
--------
For 2-D arrays it is the matrix product:
>>> a = np.array([[1, 0],
... [0, 1]])
>>> b = np.array([[4, 1],
... [2, 2]])
>>> np.matmul(a, b)
array([[4, 1],
[2, 2]])
For 2-D mixed with 1-D, the result is the usual.
>>> a = np.array([[1, 0],
... [0, 1]])
>>> b = np.array([1, 2])
>>> np.matmul(a, b)
array([1, 2])
>>> np.matmul(b, a)
array([1, 2])
Broadcasting is conventional for stacks of arrays
>>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4))
>>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2))
>>> np.matmul(a,b).shape
(2, 2, 2)
>>> np.matmul(a, b)[0, 1, 1]
98
>>> sum(a[0, 1, :] * b[0 , :, 1])
98
Vector, vector returns the scalar inner product, but neither argument
is complex-conjugated:
>>> np.matmul([2j, 3j], [2j, 3j])
(-13+0j)
Scalar multiplication raises an error.
>>> np.matmul([1,2], 3)
Traceback (most recent call last):
...
ValueError: matmul: Input operand 1 does not have enough dimensions ...
.. versionadded:: 1.10.0
""")
add_newdoc('numpy.core.umath', 'modf',
"""
Return the fractional and integral parts of an array, element-wise.
The fractional and integral parts are negative if the given number is
negative.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y1 : ndarray
Fractional part of `x`.
$OUT_SCALAR_1
y2 : ndarray
Integral part of `x`.
$OUT_SCALAR_1
Notes
-----
For integer input the return values are floats.
See Also
--------
divmod : ``divmod(x, 1)`` is equivalent to ``modf`` with the return values
switched, except it always has a positive remainder.
Examples
--------
>>> np.modf([0, 3.5])
(array([ 0. , 0.5]), array([ 0., 3.]))
>>> np.modf(-0.5)
(-0.5, -0)
""")
add_newdoc('numpy.core.umath', 'multiply',
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays to be multiplied. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
The product of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
Notes
-----
Equivalent to `x1` * `x2` in terms of array broadcasting.
Examples
--------
>>> np.multiply(2.0, 4.0)
8.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.multiply(x1, x2)
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
""")
add_newdoc('numpy.core.umath', 'negative',
"""
Numerical negative, element-wise.
Parameters
----------
x : array_like or scalar
Input array.
$PARAMS
Returns
-------
y : ndarray or scalar
Returned array or scalar: `y = -x`.
$OUT_SCALAR_1
Examples
--------
>>> np.negative([1.,-1.])
array([-1., 1.])
""")
add_newdoc('numpy.core.umath', 'positive',
"""
Numerical positive, element-wise.
.. versionadded:: 1.13.0
Parameters
----------
x : array_like or scalar
Input array.
Returns
-------
y : ndarray or scalar
Returned array or scalar: `y = +x`.
$OUT_SCALAR_1
Notes
-----
Equivalent to `x.copy()`, but only defined for types that support
arithmetic.
""")
add_newdoc('numpy.core.umath', 'not_equal',
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. $BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal([1.,2.], [1., 3.])
array([False, True])
>>> np.not_equal([1, 2], [[1, 3],[1, 4]])
array([[False, True],
[False, True]])
""")
add_newdoc('numpy.core.umath', '_ones_like',
"""
This function used to be the numpy.ones_like, but now a specific
function for that has been written for consistency with the other
*_like functions. It is only used internally in a limited fashion now.
See Also
--------
ones_like
""")
add_newdoc('numpy.core.umath', 'power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in
`x2`. `x1` and `x2` must be broadcastable to the same shape. Note that an
integer type raised to a negative integer power will raise a ValueError.
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
$OUT_SCALAR_2
See Also
--------
float_power : power function that promotes integers to float
Examples
--------
Cube each element in a list.
>>> x1 = range(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.power(x1, 3)
array([ 0, 1, 8, 27, 64, 125])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.power(x1, x2)
array([[ 0, 1, 8, 27, 16, 5],
[ 0, 1, 8, 27, 16, 5]])
""")
add_newdoc('numpy.core.umath', 'float_power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in `x2`.
`x1` and `x2` must be broadcastable to the same shape. This differs from
the power function in that integers, float16, and float32 are promoted to
floats with a minimum precision of float64 so that the result is always
inexact. The intent is that the function will return a usable result for
negative powers and seldom overflow for positive powers.
.. versionadded:: 1.12.0
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
$OUT_SCALAR_2
See Also
--------
power : power function that preserves type
Examples
--------
Cube each element in a list.
>>> x1 = range(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.float_power(x1, 3)
array([ 0., 1., 8., 27., 64., 125.])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.float_power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.float_power(x1, x2)
array([[ 0., 1., 8., 27., 16., 5.],
[ 0., 1., 8., 27., 16., 5.]])
""")
add_newdoc('numpy.core.umath', 'radians',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Input array in degrees.
$PARAMS
Returns
-------
y : ndarray
The corresponding radian values.
$OUT_SCALAR_1
See Also
--------
deg2rad : equivalent function
Examples
--------
Convert a degree array to radians
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([ 0. , 0.52359878, 1.04719755, 1.57079633, 2.0943951 ,
2.61799388, 3.14159265, 3.66519143, 4.1887902 , 4.71238898,
5.23598776, 5.75958653])
>>> out = np.zeros((deg.shape))
>>> ret = np.radians(deg, out)
>>> ret is out
True
""")
add_newdoc('numpy.core.umath', 'deg2rad',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Angles in degrees.
$PARAMS
Returns
-------
y : ndarray
The corresponding angle in radians.
$OUT_SCALAR_1
See Also
--------
rad2deg : Convert angles from radians to degrees.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
``deg2rad(x)`` is ``x * pi / 180``.
Examples
--------
>>> np.deg2rad(180)
3.1415926535897931
""")
add_newdoc('numpy.core.umath', 'reciprocal',
"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y : ndarray
Return array.
$OUT_SCALAR_1
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division. For
integer zero the result is an overflow.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> np.reciprocal([1, 2., 3.33])
array([ 1. , 0.5 , 0.3003003])
""")
add_newdoc('numpy.core.umath', 'remainder',
"""
Return element-wise remainder of division.
Computes the remainder complementary to the `floor_divide` function. It is
equivalent to the Python modulus operator``x1 % x2`` and has the same sign
as the divisor `x2`. The MATLAB function equivalent to ``np.remainder``
is ``mod``.
.. warning::
This should not be confused with:
* Python 3.7's `math.remainder` and C's ``remainder``, which
computes the IEEE remainder, which are the complement to
``round(x1 / x2)``.
* The MATLAB ``rem`` function and or the C ``%`` operator which is the
complement to ``int(x1 / x2)``.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
The element-wise remainder of the quotient ``floor_divide(x1, x2)``.
$OUT_SCALAR_2
See Also
--------
floor_divide : Equivalent of Python ``//`` operator.
divmod : Simultaneous floor division and remainder.
fmod : Equivalent of the MATLAB ``rem`` function.
divide, floor
Notes
-----
Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of)
integers.
``mod`` is an alias of ``remainder``.
Examples
--------
>>> np.remainder([4, 7], [2, 3])
array([0, 1])
>>> np.remainder(np.arange(7), 5)
array([0, 1, 2, 3, 4, 0, 1])
""")
add_newdoc('numpy.core.umath', 'divmod',
"""
Return element-wise quotient and remainder simultaneously.
.. versionadded:: 1.13.0
``np.divmod(x, y)`` is equivalent to ``(x // y, x % y)``, but faster
because it avoids redundant work. It is used to implement the Python
built-in function ``divmod`` on NumPy arrays.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array. $BROADCASTABLE_2
$PARAMS
Returns
-------
out1 : ndarray
Element-wise quotient resulting from floor division.
$OUT_SCALAR_2
out2 : ndarray
Element-wise remainder from floor division.
$OUT_SCALAR_2
See Also
--------
floor_divide : Equivalent to Python's ``//`` operator.
remainder : Equivalent to Python's ``%`` operator.
modf : Equivalent to ``divmod(x, 1)`` for positive ``x`` with the return
values switched.
Examples
--------
>>> np.divmod(np.arange(5), 3)
(array([0, 0, 0, 1, 1]), array([0, 1, 2, 0, 1]))
""")
add_newdoc('numpy.core.umath', 'right_shift',
"""
Shift the bits of an integer to the right.
Bits are shifted to the right `x2`. Because the internal
representation of numbers is in binary format, this operation is
equivalent to dividing `x1` by ``2**x2``.
Parameters
----------
x1 : array_like, int
Input values.
x2 : array_like, int
Number of bits to remove at the right of `x1`. $BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray, int
Return `x1` with bits shifted `x2` times to the right.
$OUT_SCALAR_2
See Also
--------
left_shift : Shift the bits of an integer to the left.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(10)
'1010'
>>> np.right_shift(10, 1)
5
>>> np.binary_repr(5)
'101'
>>> np.right_shift(10, [1,2,3])
array([5, 2, 1])
""")
add_newdoc('numpy.core.umath', 'rint',
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
out : ndarray or scalar
Output array is same shape and type as `x`.
$OUT_SCALAR_1
See Also
--------
ceil, floor, trunc
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'sign',
"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. nan
is returned for nan inputs.
For complex inputs, the `sign` function returns
``sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j``.
complex(nan, 0) is returned for complex nan inputs.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
y : ndarray
The sign of `x`.
$OUT_SCALAR_1
Notes
-----
There is more than one definition of sign in common use for complex
numbers. The definition used here is equivalent to :math:`x/\\sqrt{x*x}`
which is different from a common alternative, :math:`x/|x|`.
Examples
--------
>>> np.sign([-5., 4.5])
array([-1., 1.])
>>> np.sign(0)
0
>>> np.sign(5-2j)
(1+0j)
""")
add_newdoc('numpy.core.umath', 'signbit',
"""
Returns element-wise True where signbit is set (less than zero).
Parameters
----------
x : array_like
The input value(s).
$PARAMS
Returns
-------
result : ndarray of bool
Output array, or reference to `out` if that was supplied.
$OUT_SCALAR_1
Examples
--------
>>> np.signbit(-1.2)
True
>>> np.signbit(np.array([1, -2.3, 2.1]))
array([False, True, False])
""")
add_newdoc('numpy.core.umath', 'copysign',
"""
Change the sign of x1 to that of x2, element-wise.
If `x2` is a scalar, its sign will be copied to all elements of `x1`.
Parameters
----------
x1 : array_like
Values to change the sign of.
x2 : array_like
The sign of `x2` is copied to `x1`. $BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
The values of `x1` with the sign of `x2`.
$OUT_SCALAR_2
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> np.copysign([-1, 0, 1], -1.1)
array([-1., -0., -1.])
>>> np.copysign([-1, 0, 1], np.arange(3)-1)
array([-1., 0., 1.])
""")
add_newdoc('numpy.core.umath', 'nextafter',
"""
Return the next floating-point value after x1 towards x2, element-wise.
Parameters
----------
x1 : array_like
Values to find the next representable value of.
x2 : array_like
The direction where to look for the next representable value of `x1`.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
The next representable values of `x1` in the direction of `x2`.
$OUT_SCALAR_2
Examples
--------
>>> eps = np.finfo(np.float64).eps
>>> np.nextafter(1, 2) == eps + 1
True
>>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps]
array([ True, True])
""")
add_newdoc('numpy.core.umath', 'spacing',
"""
Return the distance between x and the nearest adjacent number.
Parameters
----------
x : array_like
Values to find the spacing of.
$PARAMS
Returns
-------
out : ndarray or scalar
The spacing of values of `x`.
$OUT_SCALAR_1
Notes
-----
It can be considered as a generalization of EPS:
``spacing(np.float64(1)) == np.finfo(np.float64).eps``, and there
should not be any representable number between ``x + spacing(x)`` and
x for any finite x.
Spacing of +- inf and NaN is NaN.
Examples
--------
>>> np.spacing(1) == np.finfo(np.float64).eps
True
""")
add_newdoc('numpy.core.umath', 'sin',
"""
Trigonometric sine, element-wise.
Parameters
----------
x : array_like
Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).
$PARAMS
Returns
-------
y : array_like
The sine of each element of x.
$OUT_SCALAR_1
See Also
--------
arcsin, sinh, cos
Notes
-----
The sine is one of the fundamental functions of trigonometry (the
mathematical study of triangles). Consider a circle of radius 1
centered on the origin. A ray comes in from the :math:`+x` axis, makes
an angle at the origin (measured counter-clockwise from that axis), and
departs from the origin. The :math:`y` coordinate of the outgoing
ray's intersection with the unit circle is the sine of that angle. It
ranges from -1 for :math:`x=3\\pi / 2` to +1 for :math:`\\pi / 2.` The
function has zeroes where the angle is a multiple of :math:`\\pi`.
Sines of angles between :math:`\\pi` and :math:`2\\pi` are negative.
The numerous properties of the sine and related functions are included
in any standard trigonometry text.
Examples
--------
Print sine of one angle:
>>> np.sin(np.pi/2.)
1.0
Print sines of an array of angles given in degrees:
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. )
array([ 0. , 0.5 , 0.70710678, 0.8660254 , 1. ])
Plot the sine function:
>>> import matplotlib.pylab as plt
>>> x = np.linspace(-np.pi, np.pi, 201)
>>> plt.plot(x, np.sin(x))
>>> plt.xlabel('Angle [rad]')
>>> plt.ylabel('sin(x)')
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'sinh',
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or
``-1j * np.sin(1j*x)``.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y : ndarray
The corresponding hyperbolic sine values.
$OUT_SCALAR_1
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
Examples
--------
>>> np.sinh(0)
0.0
>>> np.sinh(np.pi*1j/2)
1j
>>> np.sinh(np.pi*1j) # (exact value is 0)
1.2246063538223773e-016j
>>> # Discrepancy due to vagaries of floating point arithmetic.
>>> # Example of providing the optional output parameter
>>> out1 = np.array([0], dtype='d')
>>> out2 = np.sinh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.sinh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
add_newdoc('numpy.core.umath', 'sqrt',
"""
Return the non-negative square-root of an array, element-wise.
Parameters
----------
x : array_like
The values whose square-roots are required.
$PARAMS
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. If any element in `x` is
complex, a complex array is returned (and the square-roots of
negative reals are calculated). If all of the elements in `x`
are real, so is `y`, with negative elements returning ``nan``.
If `out` was provided, `y` is a reference to it.
$OUT_SCALAR_1
See Also
--------
lib.scimath.sqrt
A version which returns complex numbers when given negative reals.
Notes
-----
*sqrt* has--consistent with common convention--as its branch cut the
real "interval" [`-inf`, 0), and is continuous from above on it.
A branch cut is a curve in the complex plane across which a given
complex function fails to be continuous.
Examples
--------
>>> np.sqrt([1,4,9])
array([ 1., 2., 3.])
>>> np.sqrt([4, -1, -3+4J])
array([ 2.+0.j, 0.+1.j, 1.+2.j])
>>> np.sqrt([4, -1, np.inf])
array([ 2., nan, inf])
""")
add_newdoc('numpy.core.umath', 'cbrt',
"""
Return the cube-root of an array, element-wise.
.. versionadded:: 1.10.0
Parameters
----------
x : array_like
The values whose cube-roots are required.
$PARAMS
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the cube
cube-root of each element in `x`.
If `out` was provided, `y` is a reference to it.
$OUT_SCALAR_1
Examples
--------
>>> np.cbrt([1,8,27])
array([ 1., 2., 3.])
""")
add_newdoc('numpy.core.umath', 'square',
"""
Return the element-wise square of the input.
Parameters
----------
x : array_like
Input data.
$PARAMS
Returns
-------
out : ndarray or scalar
Element-wise `x*x`, of the same shape and dtype as `x`.
$OUT_SCALAR_1
See Also
--------
numpy.linalg.matrix_power
sqrt
power
Examples
--------
>>> np.square([-1j, 1])
array([-1.-0.j, 1.+0.j])
""")
add_newdoc('numpy.core.umath', 'subtract',
"""
Subtract arguments, element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be subtracted from each other. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
The difference of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
Notes
-----
Equivalent to ``x1 - x2`` in terms of array broadcasting.
Examples
--------
>>> np.subtract(1.0, 4.0)
-3.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.subtract(x1, x2)
array([[ 0., 0., 0.],
[ 3., 3., 3.],
[ 6., 6., 6.]])
""")
add_newdoc('numpy.core.umath', 'tan',
"""
Compute tangent element-wise.
Equivalent to ``np.sin(x)/np.cos(x)`` element-wise.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y : ndarray
The corresponding tangent values.
$OUT_SCALAR_1
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> from math import pi
>>> np.tan(np.array([-pi,pi/2,pi]))
array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16])
>>>
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out1 = np.array([0], dtype='d')
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
add_newdoc('numpy.core.umath', 'tanh',
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)`` or ``-1j * np.tan(1j*x)``.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y : ndarray
The corresponding hyperbolic tangent values.
$OUT_SCALAR_1
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
.. [1] M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Hyperbolic function",
https://en.wikipedia.org/wiki/Hyperbolic_function
Examples
--------
>>> np.tanh((0, np.pi*1j, np.pi*1j/2))
array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j])
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out1 = np.array([0], dtype='d')
>>> out2 = np.tanh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.tanh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
add_newdoc('numpy.core.umath', 'true_divide',
"""
Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array. $BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
$OUT_SCALAR_2
Notes
-----
The floor division operator ``//`` was added in Python 2.2 making
``//`` and ``/`` equivalent operators. The default floor division
operation of ``/`` can be replaced by true division with ``from
__future__ import division``.
In Python 3.0, ``//`` is the floor division operator and ``/`` the
true division operator. The ``true_divide(x1, x2)`` function is
equivalent to true division in Python.
Examples
--------
>>> x = np.arange(5)
>>> np.true_divide(x, 4)
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x//4
array([0, 0, 0, 0, 1])
>>> from __future__ import division
>>> x/4
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x//4
array([0, 0, 0, 0, 1])
""")
add_newdoc('numpy.core.umath', 'frexp',
"""
Decompose the elements of x into mantissa and twos exponent.
Returns (`mantissa`, `exponent`), where `x = mantissa * 2**exponent``.
The mantissa is lies in the open interval(-1, 1), while the twos
exponent is a signed integer.
Parameters
----------
x : array_like
Array of numbers to be decomposed.
out1 : ndarray, optional
Output array for the mantissa. Must have the same shape as `x`.
out2 : ndarray, optional
Output array for the exponent. Must have the same shape as `x`.
$PARAMS
Returns
-------
mantissa : ndarray
Floating values between -1 and 1.
$OUT_SCALAR_1
exponent : ndarray
Integer exponents of 2.
$OUT_SCALAR_1
See Also
--------
ldexp : Compute ``y = x1 * 2**x2``, the inverse of `frexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
Examples
--------
>>> x = np.arange(9)
>>> y1, y2 = np.frexp(x)
>>> y1
array([ 0. , 0.5 , 0.5 , 0.75 , 0.5 , 0.625, 0.75 , 0.875,
0.5 ])
>>> y2
array([0, 1, 2, 2, 3, 3, 3, 3, 4])
>>> y1 * 2**y2
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8.])
""")
add_newdoc('numpy.core.umath', 'ldexp',
"""
Returns x1 * 2**x2, element-wise.
The mantissas `x1` and twos exponents `x2` are used to construct
floating point numbers ``x1 * 2**x2``.
Parameters
----------
x1 : array_like
Array of multipliers.
x2 : array_like, int
Array of twos exponents. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The result of ``x1 * 2**x2``.
$OUT_SCALAR_2
See Also
--------
frexp : Return (y1, y2) from ``x = y1 * 2**y2``, inverse to `ldexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
`ldexp` is useful as the inverse of `frexp`, if used by itself it is
more clear to simply use the expression ``x1 * 2**x2``.
Examples
--------
>>> np.ldexp(5, np.arange(4))
array([ 5., 10., 20., 40.], dtype=float16)
>>> x = np.arange(6)
>>> np.ldexp(*np.frexp(x))
array([ 0., 1., 2., 3., 4., 5.])
""")
add_newdoc('numpy.core.umath', 'gcd',
"""
Returns the greatest common divisor of ``|x1|`` and ``|x2|``
Parameters
----------
x1, x2 : array_like, int
Arrays of values. $BROADCASTABLE_2
Returns
-------
y : ndarray or scalar
The greatest common divisor of the absolute value of the inputs
$OUT_SCALAR_2
See Also
--------
lcm : The lowest common multiple
Examples
--------
>>> np.gcd(12, 20)
4
>>> np.gcd.reduce([15, 25, 35])
5
>>> np.gcd(np.arange(6), 20)
array([20, 1, 2, 1, 4, 5])
""")
add_newdoc('numpy.core.umath', 'lcm',
"""
Returns the lowest common multiple of ``|x1|`` and ``|x2|``
Parameters
----------
x1, x2 : array_like, int
Arrays of values. $BROADCASTABLE_2
Returns
-------
y : ndarray or scalar
The lowest common multiple of the absolute value of the inputs
$OUT_SCALAR_2
See Also
--------
gcd : The greatest common divisor
Examples
--------
>>> np.lcm(12, 20)
60
>>> np.lcm.reduce([3, 12, 20])
60
>>> np.lcm.reduce([40, 12, 20])
120
>>> np.lcm(np.arange(6), 20)
array([ 0, 20, 20, 60, 20, 20])
""")
| bsd-3-clause |
thangbui/geepee | sandbox/hodgkin_huxley.py | 1 | 6391 | import matplotlib
matplotlib.use('Agg')
import scipy as sp
import matplotlib.pylab as plt
from scipy.integrate import odeint
import numpy as np
from matplotlib.font_manager import FontProperties
fontP = FontProperties()
fontP.set_size('small')
class HodgkinHuxley():
"""Full Hodgkin-Huxley Model implemented in Python"""
C_m = 1.0
"""membrane capacitance, in uF/cm^2"""
g_Na = 120.0
"""Sodium (Na) maximum conductances, in mS/cm^2"""
g_K = 36.0
"""Postassium (K) maximum conductances, in mS/cm^2"""
g_L = 0.3
"""Leak maximum conductances, in mS/cm^2"""
E_Na = 50.0
"""Sodium (Na) Nernst reversal potentials, in mV"""
E_K = -77.0
"""Postassium (K) Nernst reversal potentials, in mV"""
E_L = -54.387
"""Leak Nernst reversal potentials, in mV"""
t = sp.arange(0.0, 200.0, 0.1)
# t = sp.arange(0.0, 100.0, 0.1)
""" The time to integrate over """
def alpha_m(self, V):
"""Channel gating kinetics. Functions of membrane voltage"""
return 0.1*(V+40.0)/(1.0 - sp.exp(-(V+40.0) / 10.0))
def beta_m(self, V):
"""Channel gating kinetics. Functions of membrane voltage"""
return 4.0*sp.exp(-(V+65.0) / 18.0)
def alpha_h(self, V):
"""Channel gating kinetics. Functions of membrane voltage"""
return 0.07*sp.exp(-(V+65.0) / 20.0)
def beta_h(self, V):
"""Channel gating kinetics. Functions of membrane voltage"""
return 1.0/(1.0 + sp.exp(-(V+35.0) / 10.0))
def alpha_n(self, V):
"""Channel gating kinetics. Functions of membrane voltage"""
return 0.01*(V+55.0)/(1.0 - sp.exp(-(V+55.0) / 10.0))
def beta_n(self, V):
"""Channel gating kinetics. Functions of membrane voltage"""
return 0.125*sp.exp(-(V+65) / 80.0)
def I_Na(self, V, m, h):
"""
Membrane current (in uA/cm^2)
Sodium (Na = element name)
| :param V:
| :param m:
| :param h:
| :return:
"""
return self.g_Na * m**3 * h * (V - self.E_Na)
def I_K(self, V, n):
"""
Membrane current (in uA/cm^2)
Potassium (K = element name)
| :param V:
| :param h:
| :return:
"""
return self.g_K * n**4 * (V - self.E_K)
# Leak
def I_L(self, V):
"""
Membrane current (in uA/cm^2)
Leak
| :param V:
| :param h:
| :return:
"""
return self.g_L * (V - self.E_L)
# def I_inj(self, t):
# """
# External Current
# | :param t: time
# | :return: step up to 10 uA/cm^2 at t>100
# | step down to 0 uA/cm^2 at t>200
# | step up to 35 uA/cm^2 at t>300
# | step down to 0 uA/cm^2 at t>400
# """
# return 15*(t>5) - 15*(t>80) + 40*(t>115) - 40*(t>160)
def I_inj(self, t):
"""
External Current
| :param t: time
| :return: step up to 10 uA/cm^2 at t>100
| step down to 0 uA/cm^2 at t>200
| step up to 35 uA/cm^2 at t>300
| step down to 0 uA/cm^2 at t>400
"""
return (t-10)/2*(t>10) - (t-10)/2*(t>70) + (t-110.0)/4*(t>110) - (t-110.0)/4*(t>190)
# def I_inj(self, t):
# """
# External Current
# | :param t: time
# | :return: step up to 10 uA/cm^2 at t>100
# | step down to 0 uA/cm^2 at t>200
# | step up to 35 uA/cm^2 at t>300
# | step down to 0 uA/cm^2 at t>400
# """
# return 15*(t>5) - 15*(t>35) + 40*(t>65) - 40*(t>95)
# def I_inj(self, t):
# """
# External Current
# | :param t: time
# | :return: step up to 10 uA/cm^2 at t>100
# | step down to 0 uA/cm^2 at t>200
# | step up to 35 uA/cm^2 at t>300
# | step down to 0 uA/cm^2 at t>400
# """
# return 10*(t>100) - 10*(t>200) + 35*(t>300) - 35*(t>400)
@staticmethod
def dALLdt(X, t, self):
"""
Integrate
| :param X:
| :param t:
| :return: calculate membrane potential & activation variables
"""
V, m, h, n = X
dVdt = (self.I_inj(t) - self.I_Na(V, m, h) - self.I_K(V, n) - self.I_L(V)) / self.C_m
dmdt = self.alpha_m(V)*(1.0-m) - self.beta_m(V)*m
dhdt = self.alpha_h(V)*(1.0-h) - self.beta_h(V)*h
dndt = self.alpha_n(V)*(1.0-n) - self.beta_n(V)*n
return dVdt, dmdt, dhdt, dndt
def Main(self):
"""
Main demo for the Hodgkin Huxley neuron model
"""
X = odeint(self.dALLdt, [-65, 0.05, 0.6, 0.32], self.t, args=(self,))
V = X[:,0]
m = X[:,1]
h = X[:,2]
n = X[:,3]
ina = self.I_Na(V, m, h)
ik = self.I_K(V, n)
il = self.I_L(V)
plt.figure()
plt.subplot(3,1,1)
plt.title('Hodgkin-Huxley Neuron')
plt.plot(self.t, V, 'k')
plt.ylabel('V (mV)')
plt.xticks([])
# plt.subplot(4,1,2)
# plt.plot(self.t, ina, 'c', label='$I_{Na}$')
# plt.plot(self.t, ik, 'y', label='$I_{K}$')
# plt.plot(self.t, il, 'm', label='$I_{L}$')
# plt.ylabel('Current')
# plt.xticks([])
# plt.legend(loc='upper center', ncol=3, prop=fontP)
plt.subplot(3,1,2)
plt.plot(self.t, m, 'r', label='m')
plt.plot(self.t, h, 'g', label='h')
plt.plot(self.t, n, 'b', label='n')
plt.ylabel('Gating Value')
plt.xticks([])
plt.legend(loc='upper center', ncol=3, prop=fontP)
plt.subplot(3,1,3)
i_inj_values = [self.I_inj(t) for t in self.t]
plt.plot(self.t, i_inj_values, 'k')
plt.xlabel('t (ms)')
plt.ylabel('$I_{inj}$ ($\\mu{A}/cm^2$)')
plt.ylim(-2, 42)
plt.savefig('/tmp/hh_data_all.pdf')
plt.figure()
plt.plot(V, n, 'ok', alpha=0.2)
plt.xlabel('V')
plt.ylabel('n')
np.savetxt('hh_data.txt',
np.vstack((V, m, n, h, np.array(i_inj_values))).T,
fmt='%.5f')
plt.show()
plt.savefig('/tmp/hh_data_V_n.pdf')
if __name__ == '__main__':
runner = HodgkinHuxley()
runner.Main() | mit |
thekerrlab/netpyne | netpyne/analysis/lfp.py | 1 | 16820 | """
analysis/lfp.py
Functions to plot and analyze LFP-related results
Contributors: salvadordura@gmail.com
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import range
from builtins import round
from builtins import str
try:
basestring
except NameError:
basestring = str
from future import standard_library
standard_library.install_aliases()
from netpyne import __gui__
if __gui__:
import matplotlib.pyplot as plt
from matplotlib import mlab
import numpy as np
from numbers import Number
from .utils import colorList, exception, _saveFigData, _showFigure, _smooth1d
# -------------------------------------------------------------------------------------------------------------------
## Plot LFP (time-resolved, power spectral density, time-frequency and 3D locations)
# -------------------------------------------------------------------------------------------------------------------
@exception
def plotLFP (electrodes = ['avg', 'all'], plots = ['timeSeries', 'PSD', 'spectrogram', 'locations'], timeRange=None, NFFT=256, noverlap=128,
nperseg=256, minFreq=1, maxFreq=100, stepFreq=1, smooth=0, separation=1.0, includeAxon=True, logx=False, logy=False, norm=False, dpi=200, overlay=False, filtFreq = False, filtOrder=3, detrend=False, specType='morlet', fontSize=14, colors = None, maxPlots=8, lineWidth=1.5, figSize = (8,8), saveData = None, saveFig = None, showFig = True):
'''
Plot LFP
- electrodes (list): List of electrodes to include; 'avg'=avg of all electrodes; 'all'=each electrode separately (default: ['avg', 'all'])
- plots (list): list of plot types to show (default: ['timeSeries', 'PSD', 'timeFreq', 'locations'])
- timeRange ([start:stop]): Time range of spikes shown; if None shows all (default: None)
- NFFT (int, power of 2): Number of data points used in each block for the PSD and time-freq FFT (default: 256)
- noverlap (int, <nperseg): Number of points of overlap between segments for PSD and time-freq (default: 128)
- minFreq (float)
- maxFreq (float): Maximum frequency shown in plot for PSD and time-freq (default: 100 Hz)
- stepFreq (float)
- nperseg (int): Length of each segment for time-freq (default: 256)
- smooth (int): Window size for smoothing LFP; no smoothing if 0 (default: 0)
- separation (float): Separation factor between time-resolved LFP plots; multiplied by max LFP value (default: 1.0)
- includeAxon (boolean): Whether to show the axon in the location plot (default: True)
- logx (boolean)
- logy (boolean)
- norm (boolean)
- filtFreq (float)
- filtOrder (int)
- detrend (false)
- specType ('morlet'|'fft')
- overlay (boolean)
- dpi (int)
- colors
- maxPlots
- lineWidth
- figSize ((width, height)): Size of figure (default: (10,8))
- saveData (None|True|'fileName'): File name where to save the final data used to generate the figure;
if set to True uses filename from simConfig (default: None)
- saveFig (None|True|'fileName'): File name where to save the figure;
if set to True uses filename from simConfig (default: None)
- showFig (True|False): Whether to show the figure or not (default: True)
- Returns figure handles
'''
from .. import sim
from ..support.scalebar import add_scalebar
print('Plotting LFP ...')
if not colors: colors = colorList
# set font size
plt.rcParams.update({'font.size': fontSize})
# time range
if timeRange is None:
timeRange = [0,sim.cfg.duration]
lfp = np.array(sim.allSimData['LFP'])[int(timeRange[0]/sim.cfg.recordStep):int(timeRange[1]/sim.cfg.recordStep),:]
if filtFreq:
from scipy import signal
fs = 1000.0/sim.cfg.recordStep
nyquist = fs/2.0
if isinstance(filtFreq, list): # bandpass
Wn = [filtFreq[0]/nyquist, filtFreq[1]/nyquist]
b, a = signal.butter(filtOrder, Wn, btype='bandpass')
elif isinstance(filtFreq, Number): # lowpass
Wn = filtFreq/nyquist
b, a = signal.butter(filtOrder, Wn)
for i in range(lfp.shape[1]):
lfp[:,i] = signal.filtfilt(b, a, lfp[:,i])
if detrend:
from scipy import signal
for i in range(lfp.shape[1]):
lfp[:,i] = signal.detrend(lfp[:,i])
if norm:
for i in range(lfp.shape[1]):
offset = min(lfp[:,i])
if offset <= 0:
lfp[:,i] += abs(offset)
lfp[:,i] /= max(lfp[:,i])
# electrode selection
if 'all' in electrodes:
electrodes.remove('all')
electrodes.extend(list(range(int(sim.net.recXElectrode.nsites))))
# plotting
figs = []
#maxPlots = 8.0
data = {'lfp': lfp} # returned data
# time series -----------------------------------------
if 'timeSeries' in plots:
ydisp = np.absolute(lfp).max() * separation
offset = 1.0*ydisp
t = np.arange(timeRange[0], timeRange[1], sim.cfg.recordStep)
if figSize:
figs.append(plt.figure(figsize=figSize))
for i,elec in enumerate(electrodes):
if elec == 'avg':
lfpPlot = np.mean(lfp, axis=1)
color = 'k'
lw=1.0
elif isinstance(elec, Number) and elec <= sim.net.recXElectrode.nsites:
lfpPlot = lfp[:, elec]
color = colors[i%len(colors)]
lw=1.0
plt.plot(t, -lfpPlot+(i*ydisp), color=color, linewidth=lw)
if len(electrodes) > 1:
plt.text(timeRange[0]-0.07*(timeRange[1]-timeRange[0]), (i*ydisp), elec, color=color, ha='center', va='top', fontsize=fontSize, fontweight='bold')
ax = plt.gca()
data['lfpPlot'] = lfpPlot
data['ydisp'] = ydisp
data['t'] = t
# format plot
if len(electrodes) > 1:
plt.text(timeRange[0]-0.14*(timeRange[1]-timeRange[0]), (len(electrodes)*ydisp)/2.0, 'LFP electrode', color='k', ha='left', va='bottom', fontSize=fontSize, rotation=90)
plt.ylim(-offset, (len(electrodes))*ydisp)
else:
plt.suptitle('LFP Signal', fontSize=fontSize, fontweight='bold')
ax.invert_yaxis()
plt.xlabel('time (ms)', fontsize=fontSize)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
plt.subplots_adjust(bottom=0.1, top=1.0, right=1.0)
# calculate scalebar size and add scalebar
round_to_n = lambda x, n, m: int(np.ceil(round(x, -int(np.floor(np.log10(abs(x)))) + (n - 1)) / m)) * m
scaley = 1000.0 # values in mV but want to convert to uV
m = 10.0
sizey = 100/scaley
while sizey > 0.25*ydisp:
try:
sizey = round_to_n(0.2*ydisp*scaley, 1, m) / scaley
except:
sizey /= 10.0
m /= 10.0
labely = '%.3g $\mu$V'%(sizey*scaley)#)[1:]
if len(electrodes) > 1:
add_scalebar(ax,hidey=True, matchy=False, hidex=False, matchx=False, sizex=0, sizey=-sizey, labely=labely, unitsy='$\mu$V', scaley=scaley,
loc=3, pad=0.5, borderpad=0.5, sep=3, prop=None, barcolor="black", barwidth=2)
else:
add_scalebar(ax, hidey=True, matchy=False, hidex=True, matchx=True, sizex=None, sizey=-sizey, labely=labely, unitsy='$\mu$V', scaley=scaley,
unitsx='ms', loc=3, pad=0.5, borderpad=0.5, sep=3, prop=None, barcolor="black", barwidth=2)
# save figure
if saveFig:
if isinstance(saveFig, basestring):
filename = saveFig
else:
filename = sim.cfg.filename+'_'+'lfp.png'
plt.savefig(filename, dpi=dpi)
# PSD ----------------------------------
if 'PSD' in plots:
if overlay:
figs.append(plt.figure(figsize=figSize))
else:
numCols = 1# np.round(len(electrodes) / maxPlots) + 1
figs.append(plt.figure(figsize=(figSize[0]*numCols, figSize[1])))
#import seaborn as sb
allFreqs = []
allSignal = []
data['allFreqs'] = allFreqs
data['allSignal'] = allSignal
for i,elec in enumerate(electrodes):
if not overlay:
plt.subplot(np.ceil(len(electrodes)/numCols), numCols,i+1)
if elec == 'avg':
lfpPlot = np.mean(lfp, axis=1)
color = 'k'
elif isinstance(elec, Number) and elec <= sim.net.recXElectrode.nsites:
lfpPlot = lfp[:, elec]
color = colors[i%len(colors)]
Fs = int(1000.0/sim.cfg.recordStep)
power = mlab.psd(lfpPlot, Fs=Fs, NFFT=NFFT, detrend=mlab.detrend_none, window=mlab.window_hanning,
noverlap=noverlap, pad_to=None, sides='default', scale_by_freq=None)
if smooth:
signal = _smooth1d(10*np.log10(power[0]), smooth)
else:
signal = 10*np.log10(power[0])
freqs = power[1]
allFreqs.append(freqs)
allSignal.append(signal)
plt.plot(freqs[freqs<maxFreq], signal[freqs<maxFreq], linewidth=lineWidth, color=color, label='Electrode %s'%(str(elec)))
plt.xlim([0, maxFreq])
if len(electrodes) > 1 and not overlay:
plt.title('Electrode %s'%(str(elec)), fontsize=fontSize)
plt.ylabel('dB/Hz', fontsize=fontSize)
# ALTERNATIVE PSD CALCULATION USING WELCH
# from http://joelyancey.com/lfp-python-practice/
# from scipy import signal as spsig
# Fs = int(1000.0/sim.cfg.recordStep)
# maxFreq=100
# f, psd = spsig.welch(lfpPlot, Fs, nperseg=100)
# plt.semilogy(f,psd,'k')
# sb.despine()
# plt.xlim((0,maxFreq))
# plt.yticks(size=fontsiz)
# plt.xticks(size=fontsiz)
# plt.ylabel('$uV^{2}/Hz$',size=fontsiz)
# format plot
plt.xlabel('Frequency (Hz)', fontsize=fontSize)
if overlay:
plt.legend(fontsize=fontSize)
plt.tight_layout()
plt.suptitle('LFP Power Spectral Density', fontsize=fontSize, fontweight='bold') # add yaxis in opposite side
plt.subplots_adjust(bottom=0.08, top=0.92)
if logx:
pass
#from IPython import embed; embed()
# save figure
if saveFig:
if isinstance(saveFig, basestring):
filename = saveFig
else:
filename = sim.cfg.filename+'_'+'lfp_psd.png'
plt.savefig(filename, dpi=dpi)
# Spectrogram ------------------------------
if 'spectrogram' in plots:
import matplotlib.cm as cm
numCols = 1 #np.round(len(electrodes) / maxPlots) + 1
figs.append(plt.figure(figsize=(figSize[0]*numCols, figSize[1])))
#t = np.arange(timeRange[0], timeRange[1], sim.cfg.recordStep)
if specType == 'morlet':
from ..support.morlet import MorletSpec, index2ms
spec = []
for i,elec in enumerate(electrodes):
if elec == 'avg':
lfpPlot = np.mean(lfp, axis=1)
elif isinstance(elec, Number) and elec <= sim.net.recXElectrode.nsites:
lfpPlot = lfp[:, elec]
fs = int(1000.0 / sim.cfg.recordStep)
t_spec = np.linspace(0, index2ms(len(lfpPlot), fs), len(lfpPlot))
spec.append(MorletSpec(lfpPlot, fs, freqmin=minFreq, freqmax=maxFreq, freqstep=stepFreq))
f = np.array(range(minFreq, maxFreq+1, stepFreq)) # only used as output for user
vmin = np.array([s.TFR for s in spec]).min()
vmax = np.array([s.TFR for s in spec]).max()
for i,elec in enumerate(electrodes):
plt.subplot(np.ceil(len(electrodes) / numCols), numCols, i + 1)
T = timeRange
F = spec[i].f
if norm:
spec[i].TFR = spec[i].TFR / vmax
S = spec[i].TFR
vc = [0, 1]
else:
S = spec[i].TFR
vc = [vmin, vmax]
plt.imshow(S, extent=(np.amin(T), np.amax(T), np.amin(F), np.amax(F)), origin='lower', interpolation='None', aspect='auto', vmin=vc[0], vmax=vc[1], cmap=plt.get_cmap('viridis'))
plt.colorbar(label='Power')
plt.ylabel('Hz')
plt.tight_layout()
if len(electrodes) > 1:
plt.title('Electrode %s' % (str(elec)), fontsize=fontSize - 2)
elif specType == 'fft':
from scipy import signal as spsig
spec = []
for i,elec in enumerate(electrodes):
if elec == 'avg':
lfpPlot = np.mean(lfp, axis=1)
elif isinstance(elec, Number) and elec <= sim.net.recXElectrode.nsites:
lfpPlot = lfp[:, elec]
# creates spectrogram over a range of data
# from: http://joelyancey.com/lfp-python-practice/
fs = int(1000.0/sim.cfg.recordStep)
f, t_spec, x_spec = spsig.spectrogram(lfpPlot, fs=fs, window='hanning',
detrend=mlab.detrend_none, nperseg=nperseg, noverlap=noverlap, nfft=NFFT, mode='psd')
x_mesh, y_mesh = np.meshgrid(t_spec*1000.0, f[f<maxFreq])
spec.append(10*np.log10(x_spec[f<maxFreq]))
vmin = np.array(spec).min()
vmax = np.array(spec).max()
for i,elec in enumerate(electrodes):
plt.subplot(np.ceil(len(electrodes)/numCols), numCols, i+1)
plt.pcolormesh(x_mesh, y_mesh, spec[i], cmap=cm.viridis, vmin=vmin, vmax=vmax)
plt.colorbar(label='dB/Hz', ticks=[np.ceil(vmin), np.floor(vmax)])
if logy:
plt.yscale('log')
plt.ylabel('Log-frequency (Hz)')
if isinstance(logy, list):
yticks = tuple(logy)
plt.yticks(yticks, yticks)
else:
plt.ylabel('(Hz)')
if len(electrodes) > 1:
plt.title('Electrode %s'%(str(elec)), fontsize=fontSize-2)
plt.xlabel('time (ms)', fontsize=fontSize)
plt.tight_layout()
plt.suptitle('LFP spectrogram', size=fontSize, fontweight='bold')
plt.subplots_adjust(bottom=0.08, top=0.90)
# save figure
if saveFig:
if isinstance(saveFig, basestring):
filename = saveFig
else:
filename = sim.cfg.filename+'_'+'lfp_timefreq.png'
plt.savefig(filename, dpi=dpi)
# locations ------------------------------
if 'locations' in plots:
cvals = [] # used to store total transfer resistance
for cell in sim.net.compartCells:
trSegs = list(np.sum(sim.net.recXElectrode.getTransferResistance(cell.gid)*1e3, axis=0)) # convert from Mohm to kilohm
if not includeAxon:
i = 0
for secName, sec in cell.secs.items():
nseg = sec['hObj'].nseg #.geom.nseg
if 'axon' in secName:
for j in range(i,i+nseg): del trSegs[j]
i+=nseg
cvals.extend(trSegs)
includePost = [c.gid for c in sim.net.compartCells]
fig = sim.analysis.plotShape(includePost=includePost, showElectrodes=electrodes, cvals=cvals, includeAxon=includeAxon, dpi=dpi,
fontSize=fontSize, saveFig=saveFig, showFig=showFig, figSize=figSize)[0]
figs.append(fig)
outputData = {'LFP': lfp, 'electrodes': electrodes, 'timeRange': timeRange, 'saveData': saveData, 'saveFig': saveFig, 'showFig': showFig}
if 'PSD' in plots:
outputData.update({'allFreqs': allFreqs, 'allSignal': allSignal})
if 'spectrogram' in plots:
outputData.update({'spec': spec, 't': t_spec*1000.0, 'freqs': f[f<=maxFreq]})
#save figure data
if saveData:
figData = outputData
_saveFigData(figData, saveData, 'lfp')
# show fig
if showFig: _showFigure()
return figs, outputData | mit |
PatrickOReilly/scikit-learn | sklearn/feature_extraction/text.py | 6 | 50885 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck
# Robert Layton <robertlayton@gmail.com>
# Jochen Wersdörfer <jochen@wersdoerfer.de>
# Roman Sinayev <roman.sinayev@gmail.com>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
normalized = unicodedata.normalize('NFKD', s)
if normalized == s:
return s
else:
return ''.join([c for c in normalized if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if isinstance(vocabulary, set):
vocabulary = sorted(vocabulary)
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df or min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The formula that is used to compute the tf-idf of term t is
tf-idf(d, t) = tf(t) * idf(d, t), and the idf is computed as
idf(d, t) = log [ n / df(d, t) ] + 1 (if ``smooth_idf=False``),
where n is the total number of documents and df(d, t) is the
document frequency; the document frequency is the number of documents d
that contain term t. The effect of adding "1" to the idf in the equation
above is that terms with zero idf, i.e., terms that occur in all documents
in a training set, will not be entirely ignored.
(Note that the idf formula above differs from the standard
textbook notation that defines the idf as
idf(d, t) = log [ n / (df(d, t) + 1) ]).
If ``smooth_idf=True`` (the default), the constant "1" is added to the
numerator and denominator of the idf as if an extra document was seen
containing every term in the collection exactly once, which prevents
zero divisions: idf(d, t) = log [ (1 + n) / 1 + df(d, t) ] + 1.
Furthermore, the formulas used to compute tf and idf depend
on parameter settings that correspond to the SMART notation used in IR
as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when
``sublinear_tf=True``.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when ``norm='l2'``, "n" (none)
when ``norm=None``.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schütze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf, diags=0, m=n_features,
n=n_features, format='csr')
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
aspilotros/YouTube_views_forecasting | Clustering_ucrdtw.py | 1 | 3331 | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 22 17:18:29 2017
Knn Clustering with the ucrdtw method from the paper
Thanawin Rakthanmanon, Bilson Campana, Abdullah Mueen, Gustavo Batista,
Brandon Westover, Qiang Zhu, Jesin Zakaria, Eamonn Keogh (2012).
Searching and Mining Trillions of Time Series Subsequences under Dynamic
Time Warping SIGKDD 2012.
Usage
import _ucrdtw
import numpy as np
import matplotlib.pyplot as plt
data = np.cumsum(np.random.uniform(-0.5, 0.5, 1000000))
query = np.cumsum(np.random.uniform(-0.5, 0.5, 100))
loc, dist = _ucrdtw.ucrdtw(data, query, 0.05, True)
query = np.concatenate((np.linspace(0.0, 0.0, loc), query)) + (data[loc] - query[0])
plt.figure()
plt.plot(data)
plt.plot(query)
plt.show()
@author: Alessandro
"""
import sys
sys.path.append('/media/sf_windows-share/Portfolio/ucrdtw-master')
import _ucrdtw
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as pyp
import matplotlib.pylab as plt
import plotly
import plotly.plotly as py
from plotly.graph_objs import *
#%% Importing data
df = pd.read_csv("/media/sf_windows-share/Portfolio/DATA/df_views_30d_norm.csv")
#%% Calculating the cumulative sum
df.iloc[:,0]=0
df_30d_cum=df.iloc[:,:].cumsum(axis=1)
#%%
def UCR_DTW_Distance(data, query,w=0.05, logic=True):
loc, dist = _ucrdtw.ucrdtw(data, query, w, logic)
return loc, dist
#%%
def k_means_clust(data,num_clust,num_iter,w=0.05):
centroids = data[np.random.choice(data.shape[0], num_clust, replace=False)]
counter=0
for n in range(num_iter):
counter+=1
print (counter)
assignments={}
#assign data points to clusters
for ind,i in enumerate(data):
#for ind,i in data.iterrows():
min_dist=float('inf')
closest_clust=None
for c_ind,j in enumerate(centroids):
cur_dist=UCR_DTW_Distance(i,j,w,True)[1]
if cur_dist<min_dist:
min_dist=cur_dist
closest_clust=c_ind
if closest_clust in assignments:
assignments[closest_clust].append(ind)
else:
assignments[closest_clust]=[]
#recalculate centroids of clusters
for key in assignments:
clust_sum=np.array([0])
for k in assignments[key]:
clust_sum=clust_sum+data[k]
#centroids[key]=[m/len(assignments[key]) for m in clust_sum]
centroids[key] = clust_sum/len(assignments[key])
return centroids, assignments
#%%
centroids, assignments=k_means_clust(
df_30d_cum.iloc[:,:].values, num_clust = 6, num_iter = 10, w = 0.05
)
#%% Plotting Centroids
patterns = pd.DataFrame(centroids)
X=pd.Series(range(0,patterns.shape[1]))
trace=list()
for i in range(patterns.shape[0]):
trace.append( Scatter(
x=X,
y=patterns.iloc[i,:]
))
trace.append(Scatter(x=X, y=df_30d_cum.iloc[20,:]))
data = Data(trace)
plotly.offline.plot(data, filename = 'basic-line')
#%%
pickle.dump(centroids, open("/media/sf_windows-share/Portfolio/DATA/centroids_30d_cum.p",'wb'))
pickle.dump(assignments, open("/media/sf_windows-share/Portfolio/DATA/assignments_30d_cum.p",'wb')) | mit |
nguyentu1602/statsmodels | statsmodels/graphics/tests/test_mosaicplot.py | 17 | 18878 | from __future__ import division
from statsmodels.compat.python import iterkeys, zip, lrange, iteritems, range
from numpy.testing import assert_, assert_raises, dec
from numpy.testing import run_module_suite
# utilities for the tests
from statsmodels.compat.collections import OrderedDict
from statsmodels.api import datasets
import numpy as np
from itertools import product
try:
import matplotlib.pyplot as pylab
have_matplotlib = True
except:
have_matplotlib = False
import pandas
pandas_old = int(pandas.__version__.split('.')[1]) < 9
# the main drawing function
from statsmodels.graphics.mosaicplot import mosaic
# other functions to be tested for accuracy
from statsmodels.graphics.mosaicplot import _hierarchical_split
from statsmodels.graphics.mosaicplot import _reduce_dict
from statsmodels.graphics.mosaicplot import _key_splitting
from statsmodels.graphics.mosaicplot import _normalize_split
from statsmodels.graphics.mosaicplot import _split_rect
@dec.skipif(not have_matplotlib or pandas_old)
def test_data_conversion():
# It will not reorder the elements
# so the dictionary will look odd
# as it key order has the c and b
# keys swapped
import pandas
fig, ax = pylab.subplots(4, 4)
data = {'ax': 1, 'bx': 2, 'cx': 3}
mosaic(data, ax=ax[0, 0], title='basic dict', axes_label=False)
data = pandas.Series(data)
mosaic(data, ax=ax[0, 1], title='basic series', axes_label=False)
data = [1, 2, 3]
mosaic(data, ax=ax[0, 2], title='basic list', axes_label=False)
data = np.asarray(data)
mosaic(data, ax=ax[0, 3], title='basic array', axes_label=False)
data = {('ax', 'cx'): 1, ('bx', 'cx'): 2, ('ax', 'dx'): 3, ('bx', 'dx'): 4}
mosaic(data, ax=ax[1, 0], title='compound dict', axes_label=False)
mosaic(data, ax=ax[2, 0], title='inverted keys dict', index=[1, 0], axes_label=False)
data = pandas.Series(data)
mosaic(data, ax=ax[1, 1], title='compound series', axes_label=False)
mosaic(data, ax=ax[2, 1], title='inverted keys series', index=[1, 0])
data = [[1, 2], [3, 4]]
mosaic(data, ax=ax[1, 2], title='compound list', axes_label=False)
mosaic(data, ax=ax[2, 2], title='inverted keys list', index=[1, 0])
data = np.array([[1, 2], [3, 4]])
mosaic(data, ax=ax[1, 3], title='compound array', axes_label=False)
mosaic(data, ax=ax[2, 3], title='inverted keys array', index=[1, 0], axes_label=False)
gender = ['male', 'male', 'male', 'female', 'female', 'female']
pet = ['cat', 'dog', 'dog', 'cat', 'dog', 'cat']
data = pandas.DataFrame({'gender': gender, 'pet': pet})
mosaic(data, ['gender'], ax=ax[3, 0], title='dataframe by key 1', axes_label=False)
mosaic(data, ['pet'], ax=ax[3, 1], title='dataframe by key 2', axes_label=False)
mosaic(data, ['gender', 'pet'], ax=ax[3, 2], title='both keys', axes_label=False)
mosaic(data, ['pet', 'gender'], ax=ax[3, 3], title='keys inverted', axes_label=False)
pylab.suptitle('testing data conversion (plot 1 of 4)')
#pylab.show()
@dec.skipif(not have_matplotlib)
def test_mosaic_simple():
# display a simple plot of 4 categories of data, splitted in four
# levels with increasing size for each group
# creation of the levels
key_set = (['male', 'female'], ['old', 'adult', 'young'],
['worker', 'unemployed'], ['healty', 'ill'])
# the cartesian product of all the categories is
# the complete set of categories
keys = list(product(*key_set))
data = OrderedDict(zip(keys, range(1, 1 + len(keys))))
# which colours should I use for the various categories?
# put it into a dict
props = {}
#males and females in blue and red
props[('male',)] = {'color': 'b'}
props[('female',)] = {'color': 'r'}
# all the groups corresponding to ill groups have a different color
for key in keys:
if 'ill' in key:
if 'male' in key:
props[key] = {'color': 'BlueViolet' , 'hatch': '+'}
else:
props[key] = {'color': 'Crimson' , 'hatch': '+'}
# mosaic of the data, with given gaps and colors
mosaic(data, gap=0.05, properties=props, axes_label=False)
pylab.suptitle('syntetic data, 4 categories (plot 2 of 4)')
#pylab.show()
@dec.skipif(not have_matplotlib or pandas_old)
def test_mosaic():
# make the same analysis on a known dataset
# load the data and clean it a bit
affairs = datasets.fair.load_pandas()
datas = affairs.exog
# any time greater than 0 is cheating
datas['cheated'] = affairs.endog > 0
# sort by the marriage quality and give meaningful name
# [rate_marriage, age, yrs_married, children,
# religious, educ, occupation, occupation_husb]
datas = datas.sort(['rate_marriage', 'religious'])
num_to_desc = {1: 'awful', 2: 'bad', 3: 'intermediate',
4: 'good', 5: 'wonderful'}
datas['rate_marriage'] = datas['rate_marriage'].map(num_to_desc)
num_to_faith = {1: 'non religious', 2: 'poorly religious', 3: 'religious',
4: 'very religious'}
datas['religious'] = datas['religious'].map(num_to_faith)
num_to_cheat = {False: 'faithful', True: 'cheated'}
datas['cheated'] = datas['cheated'].map(num_to_cheat)
# finished cleaning
fig, ax = pylab.subplots(2, 2)
mosaic(datas, ['rate_marriage', 'cheated'], ax=ax[0, 0],
title='by marriage happiness')
mosaic(datas, ['religious', 'cheated'], ax=ax[0, 1],
title='by religiosity')
mosaic(datas, ['rate_marriage', 'religious', 'cheated'], ax=ax[1, 0],
title='by both', labelizer=lambda k:'')
ax[1, 0].set_xlabel('marriage rating')
ax[1, 0].set_ylabel('religion status')
mosaic(datas, ['religious', 'rate_marriage'], ax=ax[1, 1],
title='inter-dependence', axes_label=False)
pylab.suptitle("extramarital affairs (plot 3 of 4)")
#pylab.show()
@dec.skipif(not have_matplotlib)
def test_mosaic_very_complex():
# make a scattermatrix of mosaic plots to show the correlations between
# each pair of variable in a dataset. Could be easily converted into a
# new function that does this automatically based on the type of data
key_name = ['gender', 'age', 'health', 'work']
key_base = (['male', 'female'], ['old', 'young'],
['healty', 'ill'], ['work', 'unemployed'])
keys = list(product(*key_base))
data = OrderedDict(zip(keys, range(1, 1 + len(keys))))
props = {}
props[('male', 'old')] = {'color': 'r'}
props[('female',)] = {'color': 'pink'}
L = len(key_base)
fig, axes = pylab.subplots(L, L)
for i in range(L):
for j in range(L):
m = set(range(L)).difference(set((i, j)))
if i == j:
axes[i, i].text(0.5, 0.5, key_name[i],
ha='center', va='center')
axes[i, i].set_xticks([])
axes[i, i].set_xticklabels([])
axes[i, i].set_yticks([])
axes[i, i].set_yticklabels([])
else:
ji = max(i, j)
ij = min(i, j)
temp_data = OrderedDict([((k[ij], k[ji]) + tuple(k[r] for r in m), v)
for k, v in iteritems(data)])
keys = list(iterkeys(temp_data))
for k in keys:
value = _reduce_dict(temp_data, k[:2])
temp_data[k[:2]] = value
del temp_data[k]
mosaic(temp_data, ax=axes[i, j], axes_label=False,
properties=props, gap=0.05, horizontal=i > j)
pylab.suptitle('old males should look bright red, (plot 4 of 4)')
#pylab.show()
@dec.skipif(not have_matplotlib)
def test_axes_labeling():
from numpy.random import rand
key_set = (['male', 'female'], ['old', 'adult', 'young'],
['worker', 'unemployed'], ['yes', 'no'])
# the cartesian product of all the categories is
# the complete set of categories
keys = list(product(*key_set))
data = OrderedDict(zip(keys, rand(len(keys))))
lab = lambda k: ''.join(s[0] for s in k)
fig, (ax1, ax2) = pylab.subplots(1, 2, figsize=(16, 8))
mosaic(data, ax=ax1, labelizer=lab, horizontal=True, label_rotation=45)
mosaic(data, ax=ax2, labelizer=lab, horizontal=False,
label_rotation=[0, 45, 90, 0])
#fig.tight_layout()
fig.suptitle("correct alignment of the axes labels")
#pylab.show()
@dec.skipif(not have_matplotlib or pandas_old)
def test_mosaic_empty_cells():
# SMOKE test see #2286
import pandas as pd
mydata = pd.DataFrame({'id2': {64: 'Angelica',
65: 'DXW_UID', 66: 'casuid01',
67: 'casuid01', 68: 'EC93_uid',
69: 'EC93_uid', 70: 'EC93_uid',
60: 'DXW_UID', 61: 'AtmosFox',
62: 'DXW_UID', 63: 'DXW_UID'},
'id1': {64: 'TGP',
65: 'Retention01', 66: 'default',
67: 'default', 68: 'Musa_EC_9_3',
69: 'Musa_EC_9_3', 70: 'Musa_EC_9_3',
60: 'default', 61: 'default',
62: 'default', 63: 'default'}})
ct = pd.crosstab(mydata.id1, mydata.id2)
fig, vals = mosaic(ct.T.unstack())
fig, vals = mosaic(mydata, ['id1','id2'])
eq = lambda x, y: assert_(np.allclose(x, y))
def test_recursive_split():
keys = list(product('mf'))
data = OrderedDict(zip(keys, [1] * len(keys)))
res = _hierarchical_split(data, gap=0)
assert_(list(iterkeys(res)) == keys)
res[('m',)] = (0.0, 0.0, 0.5, 1.0)
res[('f',)] = (0.5, 0.0, 0.5, 1.0)
keys = list(product('mf', 'yao'))
data = OrderedDict(zip(keys, [1] * len(keys)))
res = _hierarchical_split(data, gap=0)
assert_(list(iterkeys(res)) == keys)
res[('m', 'y')] = (0.0, 0.0, 0.5, 1 / 3)
res[('m', 'a')] = (0.0, 1 / 3, 0.5, 1 / 3)
res[('m', 'o')] = (0.0, 2 / 3, 0.5, 1 / 3)
res[('f', 'y')] = (0.5, 0.0, 0.5, 1 / 3)
res[('f', 'a')] = (0.5, 1 / 3, 0.5, 1 / 3)
res[('f', 'o')] = (0.5, 2 / 3, 0.5, 1 / 3)
def test__reduce_dict():
data = OrderedDict(zip(list(product('mf', 'oy', 'wn')), [1] * 8))
eq(_reduce_dict(data, ('m',)), 4)
eq(_reduce_dict(data, ('m', 'o')), 2)
eq(_reduce_dict(data, ('m', 'o', 'w')), 1)
data = OrderedDict(zip(list(product('mf', 'oy', 'wn')), lrange(8)))
eq(_reduce_dict(data, ('m',)), 6)
eq(_reduce_dict(data, ('m', 'o')), 1)
eq(_reduce_dict(data, ('m', 'o', 'w')), 0)
def test__key_splitting():
# subdivide starting with an empty tuple
base_rect = {tuple(): (0, 0, 1, 1)}
res = _key_splitting(base_rect, ['a', 'b'], [1, 1], tuple(), True, 0)
assert_(list(iterkeys(res)) == [('a',), ('b',)])
eq(res[('a',)], (0, 0, 0.5, 1))
eq(res[('b',)], (0.5, 0, 0.5, 1))
# subdivide a in two sublevel
res_bis = _key_splitting(res, ['c', 'd'], [1, 1], ('a',), False, 0)
assert_(list(iterkeys(res_bis)) == [('a', 'c'), ('a', 'd'), ('b',)])
eq(res_bis[('a', 'c')], (0.0, 0.0, 0.5, 0.5))
eq(res_bis[('a', 'd')], (0.0, 0.5, 0.5, 0.5))
eq(res_bis[('b',)], (0.5, 0, 0.5, 1))
# starting with a non empty tuple and uneven distribution
base_rect = {('total',): (0, 0, 1, 1)}
res = _key_splitting(base_rect, ['a', 'b'], [1, 2], ('total',), True, 0)
assert_(list(iterkeys(res)) == [('total',) + (e,) for e in ['a', 'b']])
eq(res[('total', 'a')], (0, 0, 1 / 3, 1))
eq(res[('total', 'b')], (1 / 3, 0, 2 / 3, 1))
def test_proportion_normalization():
# extremes should give the whole set, as well
# as if 0 is inserted
eq(_normalize_split(0.), [0.0, 0.0, 1.0])
eq(_normalize_split(1.), [0.0, 1.0, 1.0])
eq(_normalize_split(2.), [0.0, 1.0, 1.0])
# negative values should raise ValueError
assert_raises(ValueError, _normalize_split, -1)
assert_raises(ValueError, _normalize_split, [1., -1])
assert_raises(ValueError, _normalize_split, [1., -1, 0.])
# if everything is zero it will complain
assert_raises(ValueError, _normalize_split, [0.])
assert_raises(ValueError, _normalize_split, [0., 0.])
# one-element array should return the whole interval
eq(_normalize_split([0.5]), [0.0, 1.0])
eq(_normalize_split([1.]), [0.0, 1.0])
eq(_normalize_split([2.]), [0.0, 1.0])
# simple division should give two pieces
for x in [0.3, 0.5, 0.9]:
eq(_normalize_split(x), [0., x, 1.0])
# multiple division should split as the sum of the components
for x, y in [(0.25, 0.5), (0.1, 0.8), (10., 30.)]:
eq(_normalize_split([x, y]), [0., x / (x + y), 1.0])
for x, y, z in [(1., 1., 1.), (0.1, 0.5, 0.7), (10., 30., 40)]:
eq(_normalize_split(
[x, y, z]), [0., x / (x + y + z), (x + y) / (x + y + z), 1.0])
def test_false_split():
# if you ask it to be divided in only one piece, just return the original
# one
pure_square = [0., 0., 1., 1.]
conf_h = dict(proportion=[1], gap=0.0, horizontal=True)
conf_v = dict(proportion=[1], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_h), pure_square)
eq(_split_rect(*pure_square, **conf_v), pure_square)
conf_h = dict(proportion=[1], gap=0.5, horizontal=True)
conf_v = dict(proportion=[1], gap=0.5, horizontal=False)
eq(_split_rect(*pure_square, **conf_h), pure_square)
eq(_split_rect(*pure_square, **conf_v), pure_square)
# identity on a void rectangle should not give anything strange
null_square = [0., 0., 0., 0.]
conf = dict(proportion=[1], gap=0.0, horizontal=True)
eq(_split_rect(*null_square, **conf), null_square)
conf = dict(proportion=[1], gap=1.0, horizontal=True)
eq(_split_rect(*null_square, **conf), null_square)
# splitting a negative rectangle should raise error
neg_square = [0., 0., -1., 0.]
conf = dict(proportion=[1], gap=0.0, horizontal=True)
assert_raises(ValueError, _split_rect, *neg_square, **conf)
conf = dict(proportion=[1, 1], gap=0.0, horizontal=True)
assert_raises(ValueError, _split_rect, *neg_square, **conf)
conf = dict(proportion=[1], gap=0.5, horizontal=True)
assert_raises(ValueError, _split_rect, *neg_square, **conf)
conf = dict(proportion=[1, 1], gap=0.5, horizontal=True)
assert_raises(ValueError, _split_rect, *neg_square, **conf)
def test_rect_pure_split():
pure_square = [0., 0., 1., 1.]
# division in two equal pieces from the perfect square
h_2split = [(0.0, 0.0, 0.5, 1.0), (0.5, 0.0, 0.5, 1.0)]
conf_h = dict(proportion=[1, 1], gap=0.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
v_2split = [(0.0, 0.0, 1.0, 0.5), (0.0, 0.5, 1.0, 0.5)]
conf_v = dict(proportion=[1, 1], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_v), v_2split)
# division in two non-equal pieces from the perfect square
h_2split = [(0.0, 0.0, 1 / 3, 1.0), (1 / 3, 0.0, 2 / 3, 1.0)]
conf_h = dict(proportion=[1, 2], gap=0.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
v_2split = [(0.0, 0.0, 1.0, 1 / 3), (0.0, 1 / 3, 1.0, 2 / 3)]
conf_v = dict(proportion=[1, 2], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_v), v_2split)
# division in three equal pieces from the perfect square
h_2split = [(0.0, 0.0, 1 / 3, 1.0), (1 / 3, 0.0, 1 / 3, 1.0), (2 / 3, 0.0,
1 / 3, 1.0)]
conf_h = dict(proportion=[1, 1, 1], gap=0.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
v_2split = [(0.0, 0.0, 1.0, 1 / 3), (0.0, 1 / 3, 1.0, 1 / 3), (0.0, 2 / 3,
1.0, 1 / 3)]
conf_v = dict(proportion=[1, 1, 1], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_v), v_2split)
# division in three non-equal pieces from the perfect square
h_2split = [(0.0, 0.0, 1 / 4, 1.0), (1 / 4, 0.0, 1 / 2, 1.0), (3 / 4, 0.0,
1 / 4, 1.0)]
conf_h = dict(proportion=[1, 2, 1], gap=0.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
v_2split = [(0.0, 0.0, 1.0, 1 / 4), (0.0, 1 / 4, 1.0, 1 / 2), (0.0, 3 / 4,
1.0, 1 / 4)]
conf_v = dict(proportion=[1, 2, 1], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_v), v_2split)
# splitting on a void rectangle should give multiple void
null_square = [0., 0., 0., 0.]
conf = dict(proportion=[1, 1], gap=0.0, horizontal=True)
eq(_split_rect(*null_square, **conf), [null_square, null_square])
conf = dict(proportion=[1, 2], gap=1.0, horizontal=True)
eq(_split_rect(*null_square, **conf), [null_square, null_square])
def test_rect_deformed_split():
non_pure_square = [1., -1., 1., 0.5]
# division in two equal pieces from the perfect square
h_2split = [(1.0, -1.0, 0.5, 0.5), (1.5, -1.0, 0.5, 0.5)]
conf_h = dict(proportion=[1, 1], gap=0.0, horizontal=True)
eq(_split_rect(*non_pure_square, **conf_h), h_2split)
v_2split = [(1.0, -1.0, 1.0, 0.25), (1.0, -0.75, 1.0, 0.25)]
conf_v = dict(proportion=[1, 1], gap=0.0, horizontal=False)
eq(_split_rect(*non_pure_square, **conf_v), v_2split)
# division in two non-equal pieces from the perfect square
h_2split = [(1.0, -1.0, 1 / 3, 0.5), (1 + 1 / 3, -1.0, 2 / 3, 0.5)]
conf_h = dict(proportion=[1, 2], gap=0.0, horizontal=True)
eq(_split_rect(*non_pure_square, **conf_h), h_2split)
v_2split = [(1.0, -1.0, 1.0, 1 / 6), (1.0, 1 / 6 - 1, 1.0, 2 / 6)]
conf_v = dict(proportion=[1, 2], gap=0.0, horizontal=False)
eq(_split_rect(*non_pure_square, **conf_v), v_2split)
def test_gap_split():
pure_square = [0., 0., 1., 1.]
# null split
conf_h = dict(proportion=[1], gap=1.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), pure_square)
# equal split
h_2split = [(0.0, 0.0, 0.25, 1.0), (0.75, 0.0, 0.25, 1.0)]
conf_h = dict(proportion=[1, 1], gap=1.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
# disequal split
h_2split = [(0.0, 0.0, 1 / 6, 1.0), (0.5 + 1 / 6, 0.0, 1 / 3, 1.0)]
conf_h = dict(proportion=[1, 2], gap=1.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
def test_default_arg_index():
# 2116
import pandas as pd
df = pd.DataFrame({'size' : ['small', 'large', 'large', 'small', 'large',
'small'],
'length' : ['long', 'short', 'short', 'long', 'long',
'short']})
assert_raises(ValueError, mosaic, data=df, title='foobar')
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause |
azjps/bokeh | bokeh/sampledata/gapminder.py | 7 | 2828 | ''' Provide a pandas DataFrame instance of four of the datasets from gapminder.org.
These are read in from csv filess that have been downloaded from Bokeh's
sample data on S3. But the original code that generated the csvs from the
raw gapminder data is available at the bottom of this file.
'''
from __future__ import absolute_import
from bokeh.util.dependencies import import_required
pd = import_required('pandas',
'gapminder sample data requires Pandas (http://pandas.pydata.org) to be installed')
from os.path import join
import sys
from . import _data_dir
data_dir = _data_dir()
datasets = [
'fertility',
'life_expectancy',
'population',
'regions',
]
for dataset in datasets:
filename = join(data_dir, 'gapminder_%s.csv' % dataset)
try:
setattr(
sys.modules[__name__],
dataset,
pd.read_csv(filename, index_col='Country', encoding='utf-8')
)
except (IOError, OSError):
raise RuntimeError('Could not load gapminder data file "%s". Please execute bokeh.sampledata.download()' % filename)
__all__ = datasets
# ====================================================
# Original data is from Gapminder - www.gapminder.org.
# The google docs links are maintained by gapminder
# The following script was used to get the data from gapminder
# and process it into the csvs stored in bokeh's sampledata.
"""
population_url = "http://spreadsheets.google.com/pub?key=phAwcNAVuyj0XOoBL_n5tAQ&output=xls"
fertility_url = "http://spreadsheets.google.com/pub?key=phAwcNAVuyj0TAlJeCEzcGQ&output=xls"
life_expectancy_url = "http://spreadsheets.google.com/pub?key=tiAiXcrneZrUnnJ9dBU-PAw&output=xls"
regions_url = "https://docs.google.com/spreadsheets/d/1OxmGUNWeADbPJkQxVPupSOK5MbAECdqThnvyPrwG5Os/pub?gid=1&output=xls"
def _get_data(url):
# Get the data from the url and return only 1962 - 2013
df = pd.read_excel(url, index_col=0)
df = df.unstack().unstack()
df = df[(df.index >= 1964) & (df.index <= 2013)]
df = df.unstack().unstack()
return df
fertility_df = _get_data(fertility_url)
life_expectancy_df = _get_data(life_expectancy_url)
population_df = _get_data(population_url)
regions_df = pd.read_excel(regions_url, index_col=0)
# have common countries across all data
fertility_df = fertility_df.drop(fertility_df.index.difference(life_expectancy_df.index))
population_df = population_df.drop(population_df.index.difference(life_expectancy_df.index))
regions_df = regions_df.drop(regions_df.index.difference(life_expectancy_df.index))
fertility_df.to_csv('gapminder_fertility.csv')
population_df.to_csv('gapminder_population.csv')
life_expectancy_df.to_csv('gapminder_life_expectancy.csv')
regions_df.to_csv('gapminder_regions.csv')
"""
# ======================================================
| bsd-3-clause |
hsiaoching/streethunt-matcher | descriptor.py | 2 | 1808 | #!/usr/local/bin/python
#!/usr/bin/python
import os, sys
import matplotlib.pyplot as plt
import numpy as np
import cv2
import time
def test_feature_detector(imfname, num_slice=1, save_output=False):
descript = 'SIFT'
image = cv2.imread(imfname)
t1 = time.time()
im = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# forb = cv2.FeatureDetector_create(detector)
# kpts = forb.detect(im)
sift = cv2.SIFT(0, 3, 0.04, 10, 1.6)
# surf = cv2.SURF()
h,w = im.shape
print 'h,w = ', h,w
mask = np.zeros((num_slice,h,w), np.uint8)
kp = []
des = []
for i in range(num_slice):
mask[i, 0:h/2, i*w/num_slice:(i+1)*w/num_slice] = 255
kp.append(sift.detect(im, mask[i]))
kp[i], descript = sift.compute(im, kp[i])
des.append(descript)
# kp_surf = surf.detect(im, None)
img = cv2.drawKeypoints(im, kp[i], flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
omfname = "%s_sift_keypoints%d.jpg" % (os.path.splitext(imfname)[0], i)
if save_output:
cv2.imwrite(omfname, img)
t2 = time.time()
print 'number of KeyPoint objects', len(kp), '(time', t2-t1, ')'
print 'length of descriptor', len(des)
for i in range(num_slice):
print 'descriptor[%d]' % (i), des[i].shape
return kp, des
def main():
imfname = sys.argv[1]
form = ""
detector = "FAST"
num_slice = 4
if (len(sys.argv) >= 3):
num_slice = int(sys.argv[2])
save_output = True
if (len(sys.argv) >= 4):
save_output = (sys.argv[3] == '1')
kpts,des = test_feature_detector(imfname, num_slice, save_output)
if __name__ == '__main__':
if len(sys.argv) < 2:
print "ERROR: No input file. Usage: python descriptor.py INPUT_FILE_NAME [NUM_SLICE]"
else:
main()
| mit |
SU-ECE-17-7/ibeis | ibeis/viz/interact/interact_chip.py | 1 | 18764 | # -*- coding: utf-8 -*-
"""
Interaction for a single annoation.
Also defines annotation context menu.
CommandLine:
python -m ibeis.viz.interact.interact_chip --test-ishow_chip --show --aid 2
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from ibeis import viz
import utool as ut
import vtool as vt
import plottool as pt # NOQA
from functools import partial
import six
from ibeis import constants as const
from plottool import draw_func2 as df2
from plottool.viz_featrow import draw_feat_row
from ibeis.viz import viz_helpers as vh
from plottool import interact_helpers as ih
(print, rrr, profile) = ut.inject2(__name__, '[interact_chip]')
def interact_multichips(ibs, aid_list, config2_=None, **kwargs):
r"""
Args:
ibs (IBEISController): ibeis controller object
aid_list (list): list of annotation rowids
Returns:
MultiImageInteraction: iteract_obj
CommandLine:
python -m ibeis.viz.interact.interact_chip --exec-interact_multichips --show
Example:
>>> # SLOW_DOCTEST
>>> from ibeis.viz.interact.interact_chip import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='testdb1')
>>> aid_list = ibs.get_valid_aids()
>>> iteract_obj = interact_multichips(ibs, aid_list)
>>> iteract_obj.start()
>>> result = ('iteract_obj = %s' % (str(iteract_obj),))
>>> print(result)
>>> ut.show_if_requested()
"""
# FIXME: needs to be flushed out a little
import plottool as pt
show_chip_list = [
partial(viz.show_chip, ibs, aid, config2_=config2_)
for aid in aid_list
]
vizkw = dict(ell=0, pts=1)
context_option_funcs = [
partial(build_annot_context_options, ibs, aid, config2_=config2_)
for aid in aid_list
]
iteract_obj = pt.interact_multi_image.MultiImageInteraction(
show_chip_list, context_option_funcs=context_option_funcs,
vizkw=vizkw, **kwargs)
return iteract_obj
def show_annot_context_menu(ibs, aid, qwin, qpoint, refresh_func=None,
with_interact_name=True, with_interact_chip=True,
with_interact_image=True, config2_=None):
"""
Defines logic for poping up a context menu when viewing an annotation.
Used in other interactions like name_interaction and interact_query_decision
CommandLine:
python -m ibeis.viz.interact.interact_chip --test-ishow_chip --show
"""
import guitool as gt
callback_list = build_annot_context_options(
ibs, aid, refresh_func=refresh_func,
with_interact_name=with_interact_name,
with_interact_chip=with_interact_chip,
with_interact_image=with_interact_image, config2_=config2_)
gt.popup_menu(qwin, qpoint, callback_list)
def build_annot_context_options(ibs, aid, refresh_func=None,
with_interact_name=True,
with_interact_chip=True,
with_interact_image=True, config2_=None):
r"""
Args:
ibs (IBEISController): ibeis controller object
aid (int): annotation id
refresh_func (None): (default = None)
with_interact_name (bool): (default = True)
with_interact_chip (bool): (default = True)
with_interact_image (bool): (default = True)
config2_ (dict): (default = None)
Returns:
list: callback_list
CommandLine:
python -m ibeis.viz.interact.interact_chip --exec-build_annot_context_options
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.viz.interact.interact_chip import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='testdb1')
>>> aid = ibs.get_valid_aids()[0]
>>> refresh_func = None
>>> with_interact_name = True
>>> with_interact_chip = True
>>> with_interact_image = True
>>> config2_ = None
>>> callback_list = build_annot_context_options(ibs, aid, refresh_func,
>>> with_interact_name,
>>> with_interact_chip,
>>> with_interact_image,
>>> config2_)
>>> result = ('callback_list = %s' % (ut.list_str(callback_list, nl=4),))
>>> print(result)
"""
import guitool as gt
is_exemplar = ibs.get_annot_exemplar_flags(aid)
def refresh_wrp(func):
def _wrp():
ret = func()
if refresh_func is None:
print('no refresh func')
else:
print('calling refresh_func=%r' % (refresh_func,))
refresh_func()
return ret
return _wrp
def newplot_wrp(func):
def _wrp():
import plottool as pt
ret = func()
pt.draw()
return ret
return _wrp
@refresh_wrp
def toggle_exemplar_func():
new_flag = not is_exemplar
print('set_annot_exemplar(%r, %r)' % (aid, new_flag))
ibs.set_annot_exemplar_flags(aid, new_flag)
def set_yaw_func(yawtext):
#@refresh_wrp()
def _wrap_yaw():
ibs.set_annot_yaw_texts([aid], [yawtext])
print('set_annot_yaw(%r, %r)' % (aid, yawtext))
return _wrap_yaw
def set_quality_func(qualtext):
#@refresh_wrp()
def _wrp_qual():
ibs.set_annot_quality_texts([aid], [qualtext])
print('set_annot_quality(%r, %r)' % (aid, qualtext))
return _wrp_qual
def set_multiple_func(flag):
#@refresh_wrp()
def _wrp():
ibs.set_annot_multiple([aid], [flag])
print('set_annot_multiple(%r, %r)' % (aid, flag))
return _wrp
# Define popup menu
callback_list = []
nid = ibs.get_annot_name_rowids(aid)
if with_interact_chip:
callback_list += [
('Interact chip',
partial(
ishow_chip, ibs, aid, fnum=None, config2_=config2_))
]
if with_interact_name and not ibs.is_nid_unknown(nid):
#from ibeis.viz.interact import interact_name
#callback_list.append(
# ('Interact name', partial(interact_name.ishow_name, ibs,
# nid, fnum=None))
#)
from ibeis.viz import viz_graph2
nid = ibs.get_annot_nids(aid)
callback_list.append(
('New Split Interact (Annots)',
partial(viz_graph2.make_qt_graph_interface, ibs, nids=[nid])),
)
if with_interact_image:
gid = ibs.get_annot_gids(aid)
from ibeis.viz.interact import interact_annotations2
callback_list.append(
('Interact image',
partial(
interact_annotations2.ishow_image2, ibs, gid, fnum=None))
)
if True:
from ibeis import viz
callback_list.append(
('Show foreground mask',
newplot_wrp(lambda: viz.show_probability_chip(
ibs, aid, config2_=config2_))),
)
callback_list.append(
('Show foreground mask (blended)',
newplot_wrp(lambda: viz.show_probability_chip(
ibs, aid, config2_=config2_, blend=True))),
)
if True:
# Edit mask
callback_list.append(
('Edit mask',
partial(ibs.depc_annot.get_property, 'annotmask', aid, recompute=True))
)
current_qualtext = ibs.get_annot_quality_texts([aid])[0]
current_yawtext = ibs.get_annot_yaw_texts([aid])[0]
current_multiple = ibs.get_annot_multiple([aid])[0]
# Nested viewpoints
callback_list += [
#('Set Viewpoint: ' + key, set_yaw_func(key))
('Set &Viewpoint (%s): ' % (current_yawtext,), [
('&' + str(count) + ' ' +
('*' if current_yawtext == key else '') + key,
set_yaw_func(key))
for count, key in
enumerate(six.iterkeys(const.VIEWTEXT_TO_YAW_RADIANS), start=1)
]),
]
# Nested qualities
callback_list += [
#('Set Quality: ' + key, set_quality_func(key))
('Set &Quality (%s): ' % (current_qualtext,), [
('&' + str(count) + ' ' + ('*' if current_qualtext == key else '') +
'&' + key,
set_quality_func(key))
for count, key in
enumerate(six.iterkeys(const.QUALITY_TEXT_TO_INT), start=1)
]),
]
# TODO: add set species
callback_list += [
('Set &multiple: %r' % (not current_multiple), set_multiple_func(not current_multiple)),
]
with_tags = True
if with_tags:
from ibeis import tag_funcs
case_list = tag_funcs.get_available_annot_tags()
tags = ibs.get_annot_case_tags([aid])[0]
tags = [_.lower() for _ in tags]
case_hotlink_list = gt.make_word_hotlinks(case_list,
after_colon=True)
def _wrap_set_annot_prop(prop, toggle_val):
if ut.VERBOSE:
print('[SETTING] Clicked set prop=%r to val=%r' %
(prop, toggle_val,))
ibs.set_annot_prop(prop, [aid], [toggle_val])
if ut.VERBOSE:
print('[SETTING] done')
annot_tag_options = []
for case, case_hotlink in zip(case_list, case_hotlink_list):
toggle_val = case.lower() not in tags
fmtstr = 'Mark %s case' if toggle_val else 'Untag %s'
annot_tag_options += [
#(fmtstr % (case_hotlink,), lambda:
#ibs.set_annotmatch_prop(case, _get_annotmatch_rowid(),
# [toggle_val])),
#(fmtstr % (case_hotlink,), partial(ibs.set_annotmatch_prop,
#case, [annotmatch_rowid], [toggle_val])),
(fmtstr % (case_hotlink,), partial(_wrap_set_annot_prop, case,
toggle_val)),
]
callback_list += [
('Set Annot Ta&gs', annot_tag_options),
]
callback_list += [
('Remove name', lambda: ibs.set_annot_name_rowids([aid], [-aid]))
]
callback_list += [
('Unset as e&xemplar' if is_exemplar else 'Set as e&xemplar',
toggle_exemplar_func),
]
annot_info = ibs.get_annot_info(
aid, default=True, gname=False, name=False, notes=False,
exemplar=False)
def print_annot_info():
print('[interact_chip] Annotation Info = ' + ut.obj_str(annot_info, nl=4))
print('config2_ = %r' % (config2_,))
if config2_ is not None:
print('config2_.__dict__ = %s' % (ut.repr3(config2_.__dict__),))
dev_callback_list = []
def dev_edit_annot_tags():
print('ibs = %r' % (ibs,))
text = ibs.get_annot_tag_text([aid])[0]
resp = gt.user_input(title='edit tags', msg=text, text=text)
if resp is not None:
try:
print('resp = %r' % (resp,))
print('[ctx] set_annot_tag_text aid=%r resp=%r' % (aid, resp))
ibs.set_annot_tag_text(aid, resp)
new_text = ibs.get_annot_tag_text([aid])[0]
print('new_text = %r' % (new_text,))
assert new_text == resp, 'should have had text change'
except Exception as ex:
ut.printex(ex, 'error in dev edit tags')
raise
dev_callback_list += [
('dev Edit Annot Ta&gs', dev_edit_annot_tags),
('dev print annot info', print_annot_info),
('dev refresh', pt.update),
]
if ut.is_developer():
def dev_debug():
print('aid = %r' % (aid,))
print('config2_ = %r' % (config2_,))
def dev_embed(ibs=ibs, aid=aid, config2_=config2_):
#import plottool as pt
#pt.plt.ioff()
# TODO need to disable matplotlib callbacks?
# Causes can't re-enter readline error
ut.embed()
#pt.plt.ion()
pass
dev_callback_list += [
('dev chip context embed', dev_embed),
('dev chip context debug', dev_debug),
]
if len(dev_callback_list) > 0:
callback_list += [('Dev', dev_callback_list)]
return callback_list
#def custom_chip_click(event):
# ax = event.inaxes
# if ih.clicked_outside_axis(event):
# pass
# else:
# viztype = vh.get_ibsdat(ax, 'viztype')
# print('[ic] viztype=%r' % viztype)
# if viztype == 'chip':
# if event.button == 3: # right-click
# from ibeis.viz.interact import interact_chip
# height = fig.canvas.geometry().height()
# qpoint = gt.newQPoint(event.x, height - event.y)
# refresh_func = partial(_chip_view, **kwargs)
# interact_chip.show_annot_context_menu(
# ibs, aid, fig.canvas, qpoint, refresh_func=refresh_func,
# with_interact_chip=False, config2_=config2_)
# CHIP INTERACTION 2
def ishow_chip(ibs, aid, fnum=2, fx=None, dodraw=True, config2_=None,
ischild=False, **kwargs):
r"""
# TODO:
split into two interactions
interact chip and interact chip features
Args:
ibs (IBEISController): ibeis controller object
aid (int): annotation id
fnum (int): figure number
fx (None):
CommandLine:
python -m ibeis.viz.interact.interact_chip --test-ishow_chip --show
python -m ibeis.viz.interact.interact_chip --test-ishow_chip --show --aid 2
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.viz.interact.interact_chip import * # NOQA
>>> import ibeis
>>> # build test data
>>> ibs = ibeis.opendb('testdb1')
>>> aid = ut.get_argval('--aid', type_=int, default=1)
>>> fnum = 2
>>> fx = None
>>> # execute function
>>> dodraw = ut.show_was_requested()
>>> result = ishow_chip(ibs, aid, fnum, fx, dodraw)
>>> # verify results
>>> pt.show_if_requested()
>>> print(result)
"""
fnum = pt.ensure_fnum(fnum)
vh.ibsfuncs.assert_valid_aids(ibs, (aid,))
# TODO: Reconcile this with interact keypoints.
# Preferably this will call that but it will set some fancy callbacks
if not ischild:
fig = ih.begin_interaction('chip', fnum)
else:
fig = pt.gcf()
#fig = pt.figure(fnum=fnum, pnum=pnum)
# Get chip info (make sure get_chips is called first)
#mode_ptr = [1]
mode_ptr = [0]
def _select_fxth_kpt(fx):
# Get the fx-th keypiont
chip = ibs.get_annot_chips(aid, config2_=config2_)
kp = ibs.get_annot_kpts(aid, config2_=config2_)[fx]
sift = ibs.get_annot_vecs(aid, config2_=config2_)[fx]
# Draw chip + keypoints + highlighted plots
_chip_view(pnum=(2, 1, 1), sel_fx=fx)
#ishow_chip(ibs, aid, fnum=None, fx=fx, config2_=config2_, **kwargs)
# Draw the selected feature plots
nRows, nCols, px = (2, 3, 3)
draw_feat_row(chip, fx, kp, sift, fnum, nRows, nCols, px, None)
def _chip_view(mode=0, pnum=(1, 1, 1), **kwargs):
print('... _chip_view mode=%r' % mode_ptr[0])
kwargs['ell'] = mode_ptr[0] == 1
kwargs['pts'] = mode_ptr[0] == 2
if not ischild:
df2.figure(fnum=fnum, pnum=pnum, docla=True, doclf=True)
# Toggle no keypoints view
viz.show_chip(ibs, aid, fnum=fnum, pnum=pnum, config2_=config2_,
**kwargs)
df2.set_figtitle('Chip View')
def _on_chip_click(event):
print('[inter] clicked chip')
ax, x, y = event.inaxes, event.xdata, event.ydata
if ih.clicked_outside_axis(event):
if not ischild:
print('... out of axis')
mode_ptr[0] = (mode_ptr[0] + 1) % 3
_chip_view(**kwargs)
else:
if event.button == 3: # right-click
import guitool as gt
#from ibeis.viz.interact import interact_chip
height = fig.canvas.geometry().height()
qpoint = gt.newQPoint(event.x, height - event.y)
refresh_func = partial(_chip_view, **kwargs)
callback_list = build_annot_context_options(
ibs, aid, refresh_func=refresh_func,
with_interact_chip=False,
config2_=config2_)
qwin = fig.canvas
gt.popup_menu(qwin, qpoint, callback_list)
#interact_chip.show_annot_context_menu(
# ibs, aid, fig.canvas, qpoint, refresh_func=refresh_func,
# with_interact_chip=False, config2_=config2_)
else:
viztype = vh.get_ibsdat(ax, 'viztype')
print('[ic] viztype=%r' % viztype)
if viztype == 'chip' and event.key == 'shift':
_chip_view(**kwargs)
ih.disconnect_callback(fig, 'button_press_event')
elif viztype == 'chip':
kpts = ibs.get_annot_kpts(aid, config2_=config2_)
if len(kpts) > 0:
fx = vt.nearest_point(
x, y, kpts, conflict_mode='next')[0]
print('... clicked fx=%r' % fx)
_select_fxth_kpt(fx)
else:
print('... len(kpts) == 0')
elif viztype in ['warped', 'unwarped']:
fx = vh.get_ibsdat(ax, 'fx')
if fx is not None and viztype == 'warped':
viz.show_keypoint_gradient_orientations(
ibs, aid, fx, fnum=df2.next_fnum())
else:
print('...Unknown viztype: %r' % viztype)
viz.draw()
# Draw without keypoints the first time
if fx is not None:
_select_fxth_kpt(fx)
else:
_chip_view(**kwargs)
if dodraw:
viz.draw()
if not ischild:
ih.connect_callback(fig, 'button_press_event', _on_chip_click)
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis.viz.interact.interact_chip
python -m ibeis.viz.interact.interact_chip --allexamples
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| apache-2.0 |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/histogram2d/_colorbar.py | 1 | 73359 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ColorBar(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "histogram2d"
_path_str = "histogram2d.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"len",
"lenmode",
"minexponent",
"nticks",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklabeloverflow",
"ticklabelposition",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"titlefont",
"titleside",
"x",
"xanchor",
"xpad",
"y",
"yanchor",
"ypad",
}
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
# exponentformat
# --------------
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
# len
# ---
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
# lenmode
# -------
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
# minexponent
# -----------
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
# nticks
# ------
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
# outlinecolor
# ------------
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
# outlinewidth
# ------------
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
# separatethousands
# -----------------
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
# showexponent
# ------------
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
# showticklabels
# --------------
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
# showtickprefix
# --------------
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
# showticksuffix
# --------------
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# thicknessmode
# -------------
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
# tick0
# -----
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
# tickangle
# ---------
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
# tickcolor
# ---------
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram2d.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.histogram2d.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
# tickformat
# ----------
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for dates
see: https://github.com/d3/d3-time-format#locale_format. We add
two items to d3's date formatter: "%h" for half of the year as
a decimal number as well as "%{n}f" for fractional seconds with
n digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
# tickformatstops
# ---------------
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.histogram2d.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Supported dict properties:
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
Returns
-------
tuple[plotly.graph_objs.histogram2d.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
# tickformatstopdefaults
# ----------------------
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.data.histogram2d.co
lorbar.tickformatstopdefaults), sets the default property
values to use for elements of
histogram2d.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram2d.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.histogram2d.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
# ticklabeloverflow
# -----------------
@property
def ticklabeloverflow(self):
"""
Determines how we handle tick labels that would overflow either
the graph div or the domain of the axis. The default value for
inside tick labels is *hide past domain*. In other cases the
default is *hide past div*.
The 'ticklabeloverflow' property is an enumeration that may be specified as:
- One of the following enumeration values:
['allow', 'hide past div', 'hide past domain']
Returns
-------
Any
"""
return self["ticklabeloverflow"]
@ticklabeloverflow.setter
def ticklabeloverflow(self, val):
self["ticklabeloverflow"] = val
# ticklabelposition
# -----------------
@property
def ticklabelposition(self):
"""
Determines where tick labels are drawn.
The 'ticklabelposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', 'outside top', 'inside top',
'outside bottom', 'inside bottom']
Returns
-------
Any
"""
return self["ticklabelposition"]
@ticklabelposition.setter
def ticklabelposition(self, val):
self["ticklabelposition"] = val
# ticklen
# -------
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
# tickmode
# --------
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
# tickprefix
# ----------
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
# ticks
# -----
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
# ticksuffix
# ----------
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
# ticktext
# --------
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
# ticktextsrc
# -----------
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for ticktext .
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
# tickvals
# --------
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
# tickvalssrc
# -----------
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for tickvals .
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
# title
# -----
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram2d.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Supported dict properties:
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
Returns
-------
plotly.graph_objs.histogram2d.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
# titlefont
# ---------
@property
def titlefont(self):
"""
Deprecated: Please use histogram2d.colorbar.title.font instead.
Sets this color bar's title font. Note that the title's font
used to be set by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram2d.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
"""
return self["titlefont"]
@titlefont.setter
def titlefont(self, val):
self["titlefont"] = val
# titleside
# ---------
@property
def titleside(self):
"""
Deprecated: Please use histogram2d.colorbar.title.side instead.
Determines the location of color bar's title with respect to
the color bar. Note that the title's location used to be set by
the now deprecated `titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
"""
return self["titleside"]
@titleside.setter
def titleside(self, val):
self["titleside"] = val
# x
# -
@property
def x(self):
"""
Sets the x position of the color bar (in plot fraction).
The 'x' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar.
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
# xpad
# ----
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
# y
# -
@property
def y(self):
"""
Sets the y position of the color bar (in plot fraction).
The 'y' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar.
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
# ypad
# ----
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for
dates see: https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal
number as well as "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.histogram2d.col
orbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.histog
ram2d.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
histogram2d.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn.
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.histogram2d.colorbar.Title
` instance or dict with compatible properties
titlefont
Deprecated: Please use histogram2d.colorbar.title.font
instead. Sets this color bar's title font. Note that
the title's font used to be set by the now deprecated
`titlefont` attribute.
titleside
Deprecated: Please use histogram2d.colorbar.title.side
instead. Determines the location of color bar's title
with respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
"""
_mapped_properties = {
"titlefont": ("title", "font"),
"titleside": ("title", "side"),
}
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
len=None,
lenmode=None,
minexponent=None,
nticks=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklabeloverflow=None,
ticklabelposition=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
titlefont=None,
titleside=None,
x=None,
xanchor=None,
xpad=None,
y=None,
yanchor=None,
ypad=None,
**kwargs
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram2d.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for
dates see: https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal
number as well as "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.histogram2d.col
orbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.histog
ram2d.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
histogram2d.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn.
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.histogram2d.colorbar.Title
` instance or dict with compatible properties
titlefont
Deprecated: Please use histogram2d.colorbar.title.font
instead. Sets this color bar's title font. Note that
the title's font used to be set by the now deprecated
`titlefont` attribute.
titleside
Deprecated: Please use histogram2d.colorbar.title.side
instead. Determines the location of color bar's title
with respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
ColorBar
"""
super(ColorBar, self).__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.histogram2d.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram2d.ColorBar`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("borderwidth", None)
_v = borderwidth if borderwidth is not None else _v
if _v is not None:
self["borderwidth"] = _v
_v = arg.pop("dtick", None)
_v = dtick if dtick is not None else _v
if _v is not None:
self["dtick"] = _v
_v = arg.pop("exponentformat", None)
_v = exponentformat if exponentformat is not None else _v
if _v is not None:
self["exponentformat"] = _v
_v = arg.pop("len", None)
_v = len if len is not None else _v
if _v is not None:
self["len"] = _v
_v = arg.pop("lenmode", None)
_v = lenmode if lenmode is not None else _v
if _v is not None:
self["lenmode"] = _v
_v = arg.pop("minexponent", None)
_v = minexponent if minexponent is not None else _v
if _v is not None:
self["minexponent"] = _v
_v = arg.pop("nticks", None)
_v = nticks if nticks is not None else _v
if _v is not None:
self["nticks"] = _v
_v = arg.pop("outlinecolor", None)
_v = outlinecolor if outlinecolor is not None else _v
if _v is not None:
self["outlinecolor"] = _v
_v = arg.pop("outlinewidth", None)
_v = outlinewidth if outlinewidth is not None else _v
if _v is not None:
self["outlinewidth"] = _v
_v = arg.pop("separatethousands", None)
_v = separatethousands if separatethousands is not None else _v
if _v is not None:
self["separatethousands"] = _v
_v = arg.pop("showexponent", None)
_v = showexponent if showexponent is not None else _v
if _v is not None:
self["showexponent"] = _v
_v = arg.pop("showticklabels", None)
_v = showticklabels if showticklabels is not None else _v
if _v is not None:
self["showticklabels"] = _v
_v = arg.pop("showtickprefix", None)
_v = showtickprefix if showtickprefix is not None else _v
if _v is not None:
self["showtickprefix"] = _v
_v = arg.pop("showticksuffix", None)
_v = showticksuffix if showticksuffix is not None else _v
if _v is not None:
self["showticksuffix"] = _v
_v = arg.pop("thickness", None)
_v = thickness if thickness is not None else _v
if _v is not None:
self["thickness"] = _v
_v = arg.pop("thicknessmode", None)
_v = thicknessmode if thicknessmode is not None else _v
if _v is not None:
self["thicknessmode"] = _v
_v = arg.pop("tick0", None)
_v = tick0 if tick0 is not None else _v
if _v is not None:
self["tick0"] = _v
_v = arg.pop("tickangle", None)
_v = tickangle if tickangle is not None else _v
if _v is not None:
self["tickangle"] = _v
_v = arg.pop("tickcolor", None)
_v = tickcolor if tickcolor is not None else _v
if _v is not None:
self["tickcolor"] = _v
_v = arg.pop("tickfont", None)
_v = tickfont if tickfont is not None else _v
if _v is not None:
self["tickfont"] = _v
_v = arg.pop("tickformat", None)
_v = tickformat if tickformat is not None else _v
if _v is not None:
self["tickformat"] = _v
_v = arg.pop("tickformatstops", None)
_v = tickformatstops if tickformatstops is not None else _v
if _v is not None:
self["tickformatstops"] = _v
_v = arg.pop("tickformatstopdefaults", None)
_v = tickformatstopdefaults if tickformatstopdefaults is not None else _v
if _v is not None:
self["tickformatstopdefaults"] = _v
_v = arg.pop("ticklabeloverflow", None)
_v = ticklabeloverflow if ticklabeloverflow is not None else _v
if _v is not None:
self["ticklabeloverflow"] = _v
_v = arg.pop("ticklabelposition", None)
_v = ticklabelposition if ticklabelposition is not None else _v
if _v is not None:
self["ticklabelposition"] = _v
_v = arg.pop("ticklen", None)
_v = ticklen if ticklen is not None else _v
if _v is not None:
self["ticklen"] = _v
_v = arg.pop("tickmode", None)
_v = tickmode if tickmode is not None else _v
if _v is not None:
self["tickmode"] = _v
_v = arg.pop("tickprefix", None)
_v = tickprefix if tickprefix is not None else _v
if _v is not None:
self["tickprefix"] = _v
_v = arg.pop("ticks", None)
_v = ticks if ticks is not None else _v
if _v is not None:
self["ticks"] = _v
_v = arg.pop("ticksuffix", None)
_v = ticksuffix if ticksuffix is not None else _v
if _v is not None:
self["ticksuffix"] = _v
_v = arg.pop("ticktext", None)
_v = ticktext if ticktext is not None else _v
if _v is not None:
self["ticktext"] = _v
_v = arg.pop("ticktextsrc", None)
_v = ticktextsrc if ticktextsrc is not None else _v
if _v is not None:
self["ticktextsrc"] = _v
_v = arg.pop("tickvals", None)
_v = tickvals if tickvals is not None else _v
if _v is not None:
self["tickvals"] = _v
_v = arg.pop("tickvalssrc", None)
_v = tickvalssrc if tickvalssrc is not None else _v
if _v is not None:
self["tickvalssrc"] = _v
_v = arg.pop("tickwidth", None)
_v = tickwidth if tickwidth is not None else _v
if _v is not None:
self["tickwidth"] = _v
_v = arg.pop("title", None)
_v = title if title is not None else _v
if _v is not None:
self["title"] = _v
_v = arg.pop("titlefont", None)
_v = titlefont if titlefont is not None else _v
if _v is not None:
self["titlefont"] = _v
_v = arg.pop("titleside", None)
_v = titleside if titleside is not None else _v
if _v is not None:
self["titleside"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xanchor", None)
_v = xanchor if xanchor is not None else _v
if _v is not None:
self["xanchor"] = _v
_v = arg.pop("xpad", None)
_v = xpad if xpad is not None else _v
if _v is not None:
self["xpad"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("yanchor", None)
_v = yanchor if yanchor is not None else _v
if _v is not None:
self["yanchor"] = _v
_v = arg.pop("ypad", None)
_v = ypad if ypad is not None else _v
if _v is not None:
self["ypad"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/matplotlib/tests/test_artist.py | 1 | 4940 |
import copy
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.path as mpath
import matplotlib.transforms as mtrans
import matplotlib.collections as mcollections
from matplotlib.testing.decorators import image_comparison, cleanup
@cleanup
def test_patch_transform_of_none():
# tests the behaviour of patches added to an Axes with various transform
# specifications
ax = plt.axes()
ax.set_xlim([1, 3])
ax.set_ylim([1, 3])
# Draw an ellipse over data coord (2,2) by specifying device coords.
xy_data = (2, 2)
xy_pix = ax.transData.transform_point(xy_data)
# Not providing a transform of None puts the ellipse in data coordinates .
e = mpatches.Ellipse(xy_data, width=1, height=1, fc='yellow', alpha=0.5)
ax.add_patch(e)
assert e._transform == ax.transData
# Providing a transform of None puts the ellipse in device coordinates.
e = mpatches.Ellipse(xy_pix, width=120, height=120, fc='coral',
transform=None, alpha=0.5)
assert e.is_transform_set() is True
ax.add_patch(e)
assert isinstance(e._transform, mtrans.IdentityTransform)
# Providing an IdentityTransform puts the ellipse in device coordinates.
e = mpatches.Ellipse(xy_pix, width=100, height=100,
transform=mtrans.IdentityTransform(), alpha=0.5)
ax.add_patch(e)
assert isinstance(e._transform, mtrans.IdentityTransform)
# Not providing a transform, and then subsequently "get_transform" should
# not mean that "is_transform_set".
e = mpatches.Ellipse(xy_pix, width=120, height=120, fc='coral',
alpha=0.5)
intermediate_transform = e.get_transform()
assert e.is_transform_set() is False
ax.add_patch(e)
assert e.get_transform() != intermediate_transform
assert e.is_transform_set() is True
assert e._transform == ax.transData
@cleanup
def test_collection_transform_of_none():
# tests the behaviour of collections added to an Axes with various
# transform specifications
ax = plt.axes()
ax.set_xlim([1, 3])
ax.set_ylim([1, 3])
#draw an ellipse over data coord (2,2) by specifying device coords
xy_data = (2, 2)
xy_pix = ax.transData.transform_point(xy_data)
# not providing a transform of None puts the ellipse in data coordinates
e = mpatches.Ellipse(xy_data, width=1, height=1)
c = mcollections.PatchCollection([e], facecolor='yellow', alpha=0.5)
ax.add_collection(c)
# the collection should be in data coordinates
assert c.get_offset_transform() + c.get_transform() == ax.transData
# providing a transform of None puts the ellipse in device coordinates
e = mpatches.Ellipse(xy_pix, width=120, height=120)
c = mcollections.PatchCollection([e], facecolor='coral',
alpha=0.5)
c.set_transform(None)
ax.add_collection(c)
assert isinstance(c.get_transform(), mtrans.IdentityTransform)
# providing an IdentityTransform puts the ellipse in device coordinates
e = mpatches.Ellipse(xy_pix, width=100, height=100)
c = mcollections.PatchCollection([e], transform=mtrans.IdentityTransform(),
alpha=0.5)
ax.add_collection(c)
assert isinstance(c._transOffset, mtrans.IdentityTransform)
def test_point_in_path():
# Test #1787
verts2 = [(0,0), (0,1), (1,1), (1,0), (0,0)]
path = mpath.Path(verts2, closed=True)
points = [(0.5,0.5), (1.5,0.5)]
assert np.all(path.contains_points(points) == [True, False])
@image_comparison(baseline_images=["clip_path_clipping"], remove_text=True)
def test_clipping():
exterior = mpath.Path.unit_rectangle().deepcopy()
exterior.vertices *= 4
exterior.vertices -= 2
interior = mpath.Path.unit_circle().deepcopy()
interior.vertices = interior.vertices[::-1]
clip_path = mpath.Path(vertices=np.concatenate([exterior.vertices,
interior.vertices]),
codes=np.concatenate([exterior.codes,
interior.codes]))
star = mpath.Path.unit_regular_star(6).deepcopy()
star.vertices *= 2.6
ax1 = plt.subplot(121)
col = mcollections.PathCollection([star], lw=5, edgecolor='blue',
facecolor='red', alpha=0.7, hatch='*')
col.set_clip_path(clip_path, ax1.transData)
ax1.add_collection(col)
ax2 = plt.subplot(122, sharex=ax1, sharey=ax1)
patch = mpatches.PathPatch(star, lw=5, edgecolor='blue', facecolor='red',
alpha=0.7, hatch='*')
patch.set_clip_path(clip_path, ax2.transData)
ax2.add_patch(patch)
ax1.set_xlim([-3, 3])
ax1.set_ylim([-3, 3])
if __name__=='__main__':
import nose
nose.runmodule(argv=['-s','--with-doctest'], exit=False)
| gpl-3.0 |
SpaceKatt/CSPLN | apps/scaffolding/mac/web2py/web2py.app/Contents/Resources/lib/python2.7/matplotlib/backends/qt4_editor/figureoptions.py | 3 | 4724 | # -*- coding: utf-8 -*-
#
# Copyright © 2009 Pierre Raybaut
# Licensed under the terms of the MIT License
# see the mpl licenses directory for a copy of the license
"""Module that provides a GUI-based editor for matplotlib's figure options"""
import os.path as osp
import matplotlib.backends.qt4_editor.formlayout as formlayout
from matplotlib.backends.qt4_compat import QtGui
from matplotlib import markers
def get_icon(name):
import matplotlib
basedir = osp.join(matplotlib.rcParams['datapath'], 'images')
return QtGui.QIcon(osp.join(basedir, name))
LINESTYLES = {
'-': 'Solid',
'--': 'Dashed',
'-.': 'DashDot',
':': 'Dotted',
'steps': 'Steps',
'none': 'None',
}
MARKERS = markers.MarkerStyle.markers
COLORS = {'b': '#0000ff', 'g': '#00ff00', 'r': '#ff0000', 'c': '#ff00ff',
'm': '#ff00ff', 'y': '#ffff00', 'k': '#000000', 'w': '#ffffff'}
def col2hex(color):
"""Convert matplotlib color to hex"""
return COLORS.get(color, color)
def figure_edit(axes, parent=None):
"""Edit matplotlib figure options"""
sep = (None, None) # separator
has_curve = len(axes.get_lines()) > 0
# Get / General
xmin, xmax = axes.get_xlim()
ymin, ymax = axes.get_ylim()
general = [('Title', axes.get_title()),
sep,
(None, "<b>X-Axis</b>"),
('Min', xmin), ('Max', xmax),
('Label', axes.get_xlabel()),
('Scale', [axes.get_xscale(), 'linear', 'log']),
sep,
(None, "<b>Y-Axis</b>"),
('Min', ymin), ('Max', ymax),
('Label', axes.get_ylabel()),
('Scale', [axes.get_yscale(), 'linear', 'log'])
]
if has_curve:
# Get / Curves
linedict = {}
for line in axes.get_lines():
label = line.get_label()
if label == '_nolegend_':
continue
linedict[label] = line
curves = []
linestyles = LINESTYLES.items()
markers = MARKERS.items()
curvelabels = sorted(linedict.keys())
for label in curvelabels:
line = linedict[label]
curvedata = [
('Label', label),
sep,
(None, '<b>Line</b>'),
('Style', [line.get_linestyle()] + linestyles),
('Width', line.get_linewidth()),
('Color', col2hex(line.get_color())),
sep,
(None, '<b>Marker</b>'),
('Style', [line.get_marker()] + markers),
('Size', line.get_markersize()),
('Facecolor', col2hex(line.get_markerfacecolor())),
('Edgecolor', col2hex(line.get_markeredgecolor())),
]
curves.append([curvedata, label, ""])
datalist = [(general, "Axes", "")]
if has_curve:
datalist.append((curves, "Curves", ""))
def apply_callback(data):
"""This function will be called to apply changes"""
if has_curve:
general, curves = data
else:
general, = data
# Set / General
title, xmin, xmax, xlabel, xscale, ymin, ymax, ylabel, yscale = general
axes.set_xscale(xscale)
axes.set_yscale(yscale)
axes.set_title(title)
axes.set_xlim(xmin, xmax)
axes.set_xlabel(xlabel)
axes.set_ylim(ymin, ymax)
axes.set_ylabel(ylabel)
if has_curve:
# Set / Curves
for index, curve in enumerate(curves):
line = linedict[curvelabels[index]]
label, linestyle, linewidth, color, \
marker, markersize, markerfacecolor, markeredgecolor = curve
line.set_label(label)
line.set_linestyle(linestyle)
line.set_linewidth(linewidth)
line.set_color(color)
if marker is not 'none':
line.set_marker(marker)
line.set_markersize(markersize)
line.set_markerfacecolor(markerfacecolor)
line.set_markeredgecolor(markeredgecolor)
# Redraw
figure = axes.get_figure()
figure.canvas.draw()
data = formlayout.fedit(datalist, title="Figure options", parent=parent,
icon=get_icon('qt4_editor_options.svg'), apply=apply_callback)
if data is not None:
apply_callback(data)
| gpl-3.0 |
kazemakase/scikit-learn | sklearn/utils/multiclass.py | 92 | 13986 | # Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
import warnings
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_sequence_of_sequence(y):
if hasattr(y, '__array__'):
y = np.asarray(y)
return set(chain.from_iterable(y))
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-sequences': _unique_sequence_of_sequence,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1] for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %r" % ys)
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_label_indicator_matrix(y):
""" Check if ``y`` is in the label indicator matrix format (multilabel).
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a label indicator matrix format,
else ``False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_label_indicator_matrix
>>> is_label_indicator_matrix([0, 1, 0, 1])
False
>>> is_label_indicator_matrix([[1], [0, 2], []])
False
>>> is_label_indicator_matrix(np.array([[1, 0], [0, 0]]))
True
>>> is_label_indicator_matrix(np.array([[1], [0], [0]]))
False
>>> is_label_indicator_matrix(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.ptp(y.data) == 0 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def is_sequence_of_sequences(y):
""" Check if ``y`` is in the sequence of sequences format (multilabel).
This format is DEPRECATED.
Parameters
----------
y : sequence or array.
Returns
-------
out : bool,
Return ``True``, if ``y`` is a sequence of sequences else ``False``.
"""
# the explicit check for ndarray is for forward compatibility; future
# versions of Numpy might want to register ndarray as a Sequence
try:
if hasattr(y, '__array__'):
y = np.asarray(y)
out = (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types))
except (IndexError, TypeError):
return False
if out:
warnings.warn('Direct support for sequence of sequences multilabel '
'representation will be unavailable from version 0.17. '
'Use sklearn.preprocessing.MultiLabelBinarizer to '
'convert to a label indicator representation.',
DeprecationWarning)
return out
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
return is_label_indicator_matrix(y) or is_sequence_of_sequences(y)
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-sequences': `y` is a sequence of sequences, a 1d
array-like of objects that are sequences of labels.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_sequence_of_sequences(y):
return 'multilabel-sequences'
elif is_label_indicator_matrix(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# known to fail in numpy 1.3 for array of arrays
return 'unknown'
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown'
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown'
elif y.ndim == 2 and y.shape[1] > 1:
suffix = '-multioutput'
else:
# column vector or 1d
suffix = ''
# check float and contains non-integer float values:
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
return 'continuous' + suffix
if len(np.unique(y)) <= 2:
assert not suffix, "2d binary array-like should be multilabel"
return 'binary'
else:
return 'multiclass' + suffix
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.all(clf.classes_ == unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| bsd-3-clause |
dacoex/pvlib-python | pvlib/pvsystem.py | 2 | 43007 | """
The ``pvsystem`` module contains functions for modeling the output and
performance of PV modules and inverters.
"""
from __future__ import division
import logging
pvl_logger = logging.getLogger('pvlib')
import io
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
import numpy as np
import pandas as pd
from pvlib import tools
def systemdef(meta, surface_tilt, surface_azimuth, albedo, series_modules,
parallel_modules):
'''
Generates a dict of system parameters used throughout a simulation.
Parameters
----------
meta : dict
meta dict either generated from a TMY file using readtmy2 or readtmy3,
or a dict containing at least the following fields:
=============== ====== ====================
meta field format description
=============== ====== ====================
meta.altitude Float site elevation
meta.latitude Float site latitude
meta.longitude Float site longitude
meta.Name String site name
meta.State String state
meta.TZ Float timezone
=============== ====== ====================
surface_tilt : float or Series
Surface tilt angles in decimal degrees.
The tilt angle is defined as degrees from horizontal
(e.g. surface facing up = 0, surface facing horizon = 90)
surface_azimuth : float or Series
Surface azimuth angles in decimal degrees.
The azimuth convention is defined
as degrees east of north
(North=0, South=180, East=90, West=270).
albedo : float or Series
Ground reflectance, typically 0.1-0.4 for
surfaces on Earth (land), may increase over snow, ice, etc. May also
be known as the reflection coefficient. Must be >=0 and <=1.
series_modules : int
Number of modules connected in series in a string.
parallel_modules : int
Number of strings connected in parallel.
Returns
-------
Result : dict
A dict with the following fields.
* 'surface_tilt'
* 'surface_azimuth'
* 'albedo'
* 'series_modules'
* 'parallel_modules'
* 'latitude'
* 'longitude'
* 'tz'
* 'name'
* 'altitude'
See also
--------
pvlib.tmy.readtmy3
pvlib.tmy.readtmy2
'''
try:
name = meta['Name']
except KeyError:
name = meta['City']
system = {'surface_tilt': surface_tilt,
'surface_azimuth': surface_azimuth,
'albedo': albedo,
'series_modules': series_modules,
'parallel_modules': parallel_modules,
'latitude': meta['latitude'],
'longitude': meta['longitude'],
'tz': meta['TZ'],
'name': name,
'altitude': meta['altitude']}
return system
def ashraeiam(b, aoi):
'''
Determine the incidence angle modifier using the ASHRAE transmission model.
ashraeiam calculates the incidence angle modifier as developed in
[1], and adopted by ASHRAE (American Society of Heating, Refrigeration,
and Air Conditioning Engineers) [2]. The model has been used by model
programs such as PVSyst [3].
Note: For incident angles near 90 degrees, this model has a
discontinuity which has been addressed in this function.
Parameters
----------
b : float
A parameter to adjust the modifier as a function of angle of
incidence. Typical values are on the order of 0.05 [3].
aoi : Series
The angle of incidence between the module normal vector and the
sun-beam vector in degrees.
Returns
-------
IAM : Series
The incident angle modifier calculated as 1-b*(sec(aoi)-1) as
described in [2,3].
Returns nan for all abs(aoi) >= 90 and for all IAM values
that would be less than 0.
References
----------
[1] Souka A.F., Safwat H.H., "Determindation of the optimum orientations
for the double exposure flat-plate collector and its reflections".
Solar Energy vol .10, pp 170-174. 1966.
[2] ASHRAE standard 93-77
[3] PVsyst Contextual Help.
http://files.pvsyst.com/help/index.html?iam_loss.htm retrieved on
September 10, 2012
See Also
--------
irradiance.aoi
physicaliam
'''
IAM = 1 - b*((1/np.cos(np.radians(aoi)) - 1))
IAM[abs(aoi) >= 90] = np.nan
IAM[IAM < 0] = np.nan
return IAM
def physicaliam(K, L, n, aoi):
'''
Determine the incidence angle modifier using refractive
index, glazing thickness, and extinction coefficient
physicaliam calculates the incidence angle modifier as described in
De Soto et al. "Improvement and validation of a model for photovoltaic
array performance", section 3. The calculation is based upon a physical
model of absorbtion and transmission through a cover. Required
information includes, incident angle, cover extinction coefficient,
cover thickness
Note: The authors of this function believe that eqn. 14 in [1] is
incorrect. This function uses the following equation in its place:
theta_r = arcsin(1/n * sin(theta))
Parameters
----------
K : float
The glazing extinction coefficient in units of 1/meters. Reference
[1] indicates that a value of 4 is reasonable for "water white"
glass. K must be a numeric scalar or vector with all values >=0. If K
is a vector, it must be the same size as all other input vectors.
L : float
The glazing thickness in units of meters. Reference [1] indicates
that 0.002 meters (2 mm) is reasonable for most glass-covered
PV panels. L must be a numeric scalar or vector with all values >=0.
If L is a vector, it must be the same size as all other input vectors.
n : float
The effective index of refraction (unitless). Reference [1]
indicates that a value of 1.526 is acceptable for glass. n must be a
numeric scalar or vector with all values >=0. If n is a vector, it
must be the same size as all other input vectors.
aoi : Series
The angle of incidence between the module normal vector and the
sun-beam vector in degrees.
Returns
-------
IAM : float or Series
The incident angle modifier as specified in eqns. 14-16 of [1].
IAM is a column vector with the same number of elements as the
largest input vector.
Theta must be a numeric scalar or vector.
For any values of theta where abs(aoi)>90, IAM is set to 0. For any
values of aoi where -90 < aoi < 0, theta is set to abs(aoi) and
evaluated.
References
----------
[1] W. De Soto et al., "Improvement and validation of a model for
photovoltaic array performance", Solar Energy, vol 80, pp. 78-88,
2006.
[2] Duffie, John A. & Beckman, William A.. (2006). Solar Engineering
of Thermal Processes, third edition. [Books24x7 version] Available
from http://common.books24x7.com/toc.aspx?bookid=17160.
See Also
--------
getaoi
ephemeris
spa
ashraeiam
'''
thetar_deg = tools.asind(1.0 / n*(tools.sind(aoi)))
tau = ( np.exp(- 1.0 * (K*L / tools.cosd(thetar_deg))) *
((1 - 0.5*((((tools.sind(thetar_deg - aoi)) ** 2) /
((tools.sind(thetar_deg + aoi)) ** 2) +
((tools.tand(thetar_deg - aoi)) ** 2) /
((tools.tand(thetar_deg + aoi)) ** 2))))) )
zeroang = 1e-06
thetar_deg0 = tools.asind(1.0 / n*(tools.sind(zeroang)))
tau0 = ( np.exp(- 1.0 * (K*L / tools.cosd(thetar_deg0))) *
((1 - 0.5*((((tools.sind(thetar_deg0 - zeroang)) ** 2) /
((tools.sind(thetar_deg0 + zeroang)) ** 2) +
((tools.tand(thetar_deg0 - zeroang)) ** 2) /
((tools.tand(thetar_deg0 + zeroang)) ** 2))))) )
IAM = tau / tau0
IAM[abs(aoi) >= 90] = np.nan
IAM[IAM < 0] = np.nan
return IAM
def calcparams_desoto(poa_global, temp_cell, alpha_isc, module_parameters,
EgRef, dEgdT, M=1, irrad_ref=1000, temp_ref=25):
'''
Applies the temperature and irradiance corrections to
inputs for singlediode.
Applies the temperature and irradiance corrections to the IL, I0,
Rs, Rsh, and a parameters at reference conditions (IL_ref, I0_ref,
etc.) according to the De Soto et. al description given in [1]. The
results of this correction procedure may be used in a single diode
model to determine IV curves at irradiance = S, cell temperature =
Tcell.
Parameters
----------
poa_global : float or Series
The irradiance (in W/m^2) absorbed by the module.
temp_cell : float or Series
The average cell temperature of cells within a module in C.
alpha_isc : float
The short-circuit current temperature coefficient of the
module in units of 1/C.
module_parameters : dict
Parameters describing PV module performance at reference
conditions according to DeSoto's paper. Parameters may be
generated or found by lookup. For ease of use,
retrieve_sam can automatically generate a dict based on the
most recent SAM CEC module
database. The module_parameters dict must contain the
following 5 fields:
* a_ref - modified diode ideality factor parameter at
reference conditions (units of eV), a_ref can be calculated
from the usual diode ideality factor (n),
number of cells in series (Ns),
and cell temperature (Tcell) per equation (2) in [1].
* I_L_ref - Light-generated current (or photocurrent)
in amperes at reference conditions. This value is referred to
as Iph in some literature.
* I_o_ref - diode reverse saturation current in amperes,
under reference conditions.
* R_sh_ref - shunt resistance under reference conditions (ohms).
* R_s - series resistance under reference conditions (ohms).
EgRef : float
The energy bandgap at reference temperature (in eV).
1.121 eV for silicon. EgRef must be >0.
dEgdT : float
The temperature dependence of the energy bandgap at SRC (in 1/C).
May be either a scalar value (e.g. -0.0002677 as in [1]) or a
DataFrame of dEgdT values corresponding to each input condition (this
may be useful if dEgdT is a function of temperature).
M : float or Series (optional, default=1)
An optional airmass modifier, if omitted, M is given a value of 1,
which assumes absolute (pressure corrected) airmass = 1.5. In this
code, M is equal to M/Mref as described in [1] (i.e. Mref is assumed
to be 1). Source [1] suggests that an appropriate value for M
as a function absolute airmass (AMa) may be:
>>> M = np.polyval([-0.000126, 0.002816, -0.024459, 0.086257, 0.918093],
... AMa) # doctest: +SKIP
M may be a Series.
irrad_ref : float (optional, default=1000)
Reference irradiance in W/m^2.
temp_ref : float (optional, default=25)
Reference cell temperature in C.
Returns
-------
Tuple of the following results:
photocurrent : float or Series
Light-generated current in amperes at irradiance=S and
cell temperature=Tcell.
saturation_current : float or Series
Diode saturation curent in amperes at irradiance
S and cell temperature Tcell.
resistance_series : float
Series resistance in ohms at irradiance S and cell temperature Tcell.
resistance_shunt : float or Series
Shunt resistance in ohms at irradiance S and cell temperature Tcell.
nNsVth : float or Series
Modified diode ideality factor at irradiance S and cell temperature
Tcell. Note that in source [1] nNsVth = a (equation 2). nNsVth is the
product of the usual diode ideality factor (n), the number of
series-connected cells in the module (Ns), and the thermal voltage
of a cell in the module (Vth) at a cell temperature of Tcell.
References
----------
[1] W. De Soto et al., "Improvement and validation of a model for
photovoltaic array performance", Solar Energy, vol 80, pp. 78-88,
2006.
[2] System Advisor Model web page. https://sam.nrel.gov.
[3] A. Dobos, "An Improved Coefficient Calculator for the California
Energy Commission 6 Parameter Photovoltaic Module Model", Journal of
Solar Energy Engineering, vol 134, 2012.
[4] O. Madelung, "Semiconductors: Data Handbook, 3rd ed." ISBN
3-540-40488-0
See Also
--------
sapm
sapm_celltemp
singlediode
retrieve_sam
Notes
-----
If the reference parameters in the ModuleParameters struct are read
from a database or library of parameters (e.g. System Advisor Model),
it is important to use the same EgRef and dEgdT values that
were used to generate the reference parameters, regardless of the
actual bandgap characteristics of the semiconductor. For example, in
the case of the System Advisor Model library, created as described in
[3], EgRef and dEgdT for all modules were 1.121 and -0.0002677,
respectively.
This table of reference bandgap energies (EgRef), bandgap energy
temperature dependence (dEgdT), and "typical" airmass response (M) is
provided purely as reference to those who may generate their own
reference module parameters (a_ref, IL_ref, I0_ref, etc.) based upon the
various PV semiconductors. Again, we stress the importance of
using identical EgRef and dEgdT when generation reference
parameters and modifying the reference parameters (for irradiance,
temperature, and airmass) per DeSoto's equations.
Silicon (Si):
* EgRef = 1.121
* dEgdT = -0.0002677
>>> M = np.polyval([-1.26E-4, 2.816E-3, -0.024459, 0.086257, 0.918093],
... AMa) # doctest: +SKIP
Source: [1]
Cadmium Telluride (CdTe):
* EgRef = 1.475
* dEgdT = -0.0003
>>> M = np.polyval([-2.46E-5, 9.607E-4, -0.0134, 0.0716, 0.9196],
... AMa) # doctest: +SKIP
Source: [4]
Copper Indium diSelenide (CIS):
* EgRef = 1.010
* dEgdT = -0.00011
>>> M = np.polyval([-3.74E-5, 0.00125, -0.01462, 0.0718, 0.9210],
... AMa) # doctest: +SKIP
Source: [4]
Copper Indium Gallium diSelenide (CIGS):
* EgRef = 1.15
* dEgdT = ????
>>> M = np.polyval([-9.07E-5, 0.0022, -0.0202, 0.0652, 0.9417],
... AMa) # doctest: +SKIP
Source: Wikipedia
Gallium Arsenide (GaAs):
* EgRef = 1.424
* dEgdT = -0.000433
* M = unknown
Source: [4]
'''
M = np.max(M, 0)
a_ref = module_parameters['a_ref']
IL_ref = module_parameters['I_L_ref']
I0_ref = module_parameters['I_o_ref']
Rsh_ref = module_parameters['R_sh_ref']
Rs_ref = module_parameters['R_s']
k = 8.617332478e-05
Tref_K = temp_ref + 273.15
Tcell_K = temp_cell + 273.15
E_g = EgRef * (1 + dEgdT*(Tcell_K - Tref_K))
nNsVth = a_ref * (Tcell_K / Tref_K)
IL = (poa_global/irrad_ref) * M * (IL_ref + alpha_isc * (Tcell_K - Tref_K))
I0 = ( I0_ref * ((Tcell_K / Tref_K) ** 3) *
(np.exp(EgRef / (k*(Tref_K)) - (E_g / (k*(Tcell_K))))) )
Rsh = Rsh_ref * (irrad_ref / poa_global)
Rs = Rs_ref
return IL, I0, Rs, Rsh, nNsVth
def retrieve_sam(name=None, samfile=None):
'''
Retrieve latest module and inverter info from SAM website.
This function will retrieve either:
* CEC module database
* Sandia Module database
* CEC Inverter database
and return it as a pandas dataframe.
Parameters
----------
name : String
Name can be one of:
* 'CECMod' - returns the CEC module database
* 'CECInverter' - returns the CEC Inverter database
* 'SandiaInverter' - returns the CEC Inverter database (CEC is only current inverter db available; tag kept for backwards compatibility)
* 'SandiaMod' - returns the Sandia Module database
samfile : String
Absolute path to the location of local versions of the SAM file.
If file is specified, the latest versions of the SAM database will
not be downloaded. The selected file must be in .csv format.
If set to 'select', a dialogue will open allowing the user to navigate
to the appropriate page.
Returns
-------
A DataFrame containing all the elements of the desired database.
Each column represents a module or inverter, and a specific dataset
can be retrieved by the command
Examples
--------
>>> from pvlib import pvsystem
>>> invdb = pvsystem.retrieve_sam(name='CECInverter')
>>> inverter = invdb.AE_Solar_Energy__AE6_0__277V__277V__CEC_2012_
>>> inverter
Vac 277.000000
Paco 6000.000000
Pdco 6165.670000
Vdco 361.123000
Pso 36.792300
C0 -0.000002
C1 -0.000047
C2 -0.001861
C3 0.000721
Pnt 0.070000
Vdcmax 600.000000
Idcmax 32.000000
Mppt_low 200.000000
Mppt_high 500.000000
Name: AE_Solar_Energy__AE6_0__277V__277V__CEC_2012_, dtype: float64
'''
if name is not None:
name = name.lower()
if name == 'cecmod':
url = 'https://sam.nrel.gov/sites/sam.nrel.gov/files/sam-library-cec-modules-2015-6-30.csv'
elif name == 'sandiamod':
url = 'https://sam.nrel.gov/sites/sam.nrel.gov/files/sam-library-sandia-modules-2015-6-30.csv'
elif name in ['cecinverter', 'sandiainverter']: # Allowing either, to provide for old code, while aligning with current expectations
url = 'https://sam.nrel.gov/sites/sam.nrel.gov/files/sam-library-cec-inverters-2015-6-30.csv'
elif samfile is None:
raise ValueError('invalid name {}'.format(name))
if name is None and samfile is None:
raise ValueError('must supply name or samfile')
if samfile is None:
pvl_logger.info('retrieving %s from %s', name, url)
response = urlopen(url)
csvdata = io.StringIO(response.read().decode(errors='ignore'))
elif samfile == 'select':
import Tkinter
from tkFileDialog import askopenfilename
Tkinter.Tk().withdraw()
csvdata = askopenfilename()
else:
csvdata = samfile
return _parse_raw_sam_df(csvdata)
def _parse_raw_sam_df(csvdata):
df = pd.read_csv(csvdata, index_col=0, skiprows=[1,2])
colnames = df.columns.values.tolist()
parsedcolnames = []
for cn in colnames:
parsedcolnames.append(cn.replace(' ', '_'))
df.columns = parsedcolnames
parsedindex = []
for index in df.index:
parsedindex.append(index.replace(' ', '_').replace('-', '_')
.replace('.', '_').replace('(', '_')
.replace(')', '_').replace('[', '_')
.replace(']', '_').replace(':', '_')
.replace('+', '_').replace('/', '_')
.replace('"', '_').replace(',', '_'))
df.index = parsedindex
df = df.transpose()
return df
def sapm(module, poa_direct, poa_diffuse, temp_cell, airmass_absolute, aoi):
'''
The Sandia PV Array Performance Model (SAPM) generates 5 points on a PV
module's I-V curve (Voc, Isc, Ix, Ixx, Vmp/Imp) according to
SAND2004-3535. Assumes a reference cell temperature of 25 C.
Parameters
----------
module : Series or dict
A DataFrame defining the SAPM performance parameters.
poa_direct : Series
The direct irradiance incident upon the module (W/m^2).
poa_diffuse : Series
The diffuse irradiance incident on module.
temp_cell : Series
The cell temperature (degrees C).
airmass_absolute : Series
Absolute airmass.
aoi : Series
Angle of incidence (degrees).
Returns
-------
A DataFrame with the columns:
* i_sc : Short-circuit current (A)
* I_mp : Current at the maximum-power point (A)
* v_oc : Open-circuit voltage (V)
* v_mp : Voltage at maximum-power point (V)
* p_mp : Power at maximum-power point (W)
* i_x : Current at module V = 0.5Voc, defines 4th point on I-V
curve for modeling curve shape
* i_xx : Current at module V = 0.5(Voc+Vmp), defines 5th point on
I-V curve for modeling curve shape
* effective_irradiance : Effective irradiance
Notes
-----
The coefficients from SAPM which are required in ``module`` are:
======== ===============================================================
Key Description
======== ===============================================================
A0-A4 The airmass coefficients used in calculating
effective irradiance
B0-B5 The angle of incidence coefficients used in calculating
effective irradiance
C0-C7 The empirically determined coefficients relating
Imp, Vmp, Ix, and Ixx to effective irradiance
Isco Short circuit current at reference condition (amps)
Impo Maximum power current at reference condition (amps)
Aisc Short circuit current temperature coefficient at
reference condition (1/C)
Aimp Maximum power current temperature coefficient at
reference condition (1/C)
Bvoco Open circuit voltage temperature coefficient at
reference condition (V/C)
Mbvoc Coefficient providing the irradiance dependence for the BetaVoc
temperature coefficient at reference irradiance (V/C)
Bvmpo Maximum power voltage temperature coefficient at
reference condition
Mbvmp Coefficient providing the irradiance dependence for the
BetaVmp temperature coefficient at reference irradiance (V/C)
N Empirically determined "diode factor" (dimensionless)
Cells_in_Series Number of cells in series in a module's cell string(s)
IXO Ix at reference conditions
IXXO Ixx at reference conditions
FD Fraction of diffuse irradiance used by module
======== ===============================================================
References
----------
[1] King, D. et al, 2004, "Sandia Photovoltaic Array Performance Model",
SAND Report 3535, Sandia National Laboratories, Albuquerque, NM.
See Also
--------
retrieve_sam
sapm_celltemp
'''
T0 = 25
q = 1.60218e-19 # Elementary charge in units of coulombs
kb = 1.38066e-23 # Boltzmann's constant in units of J/K
E0 = 1000
am_coeff = [module['A4'], module['A3'], module['A2'], module['A1'],
module['A0']]
aoi_coeff = [module['B5'], module['B4'], module['B3'], module['B2'],
module['B1'], module['B0']]
F1 = np.polyval(am_coeff, airmass_absolute)
F2 = np.polyval(aoi_coeff, aoi)
# Ee is the "effective irradiance"
Ee = F1 * ( (poa_direct*F2 + module['FD']*poa_diffuse) / E0 )
Ee.fillna(0, inplace=True)
Ee = Ee.clip_lower(0)
Bvmpo = module['Bvmpo'] + module['Mbvmp']*(1 - Ee)
Bvoco = module['Bvoco'] + module['Mbvoc']*(1 - Ee)
delta = module['N'] * kb * (temp_cell + 273.15) / q
dfout = pd.DataFrame(index=Ee.index)
dfout['i_sc'] = (
module['Isco'] * Ee * (1 + module['Aisc']*(temp_cell - T0)) )
dfout['i_mp'] = ( module['Impo'] *
(module['C0']*Ee + module['C1']*(Ee**2)) *
(1 + module['Aimp']*(temp_cell - T0)) )
dfout['v_oc'] = (( module['Voco'] +
module['Cells_in_Series']*delta*np.log(Ee) + Bvoco*(temp_cell - T0) )
.clip_lower(0))
dfout['v_mp'] = ( module['Vmpo'] +
module['C2']*module['Cells_in_Series']*delta*np.log(Ee) +
module['C3']*module['Cells_in_Series']*((delta*np.log(Ee)) ** 2) +
Bvmpo*(temp_cell - T0) ).clip_lower(0)
dfout['p_mp'] = dfout['i_mp'] * dfout['v_mp']
dfout['i_x'] = ( module['IXO'] *
(module['C4']*Ee + module['C5']*(Ee**2)) *
(1 + module['Aisc']*(temp_cell - T0)) )
# the Ixx calculation in King 2004 has a typo (mixes up Aisc and Aimp)
dfout['i_xx'] = ( module['IXXO'] *
(module['C6']*Ee + module['C7']*(Ee**2)) *
(1 + module['Aisc']*(temp_cell - T0)) )
dfout['effective_irradiance'] = Ee
return dfout
def sapm_celltemp(irrad, wind, temp, model='open_rack_cell_glassback'):
'''
Estimate cell and module temperatures per the Sandia PV Array
Performance Model (SAPM, SAND2004-3535), from the incident
irradiance, wind speed, ambient temperature, and SAPM module
parameters.
Parameters
----------
irrad : float or Series
Total incident irradiance in W/m^2.
wind : float or Series
Wind speed in m/s at a height of 10 meters.
temp : float or Series
Ambient dry bulb temperature in degrees C.
model : string or list
Model to be used.
If string, can be:
* 'open_rack_cell_glassback' (default)
* 'roof_mount_cell_glassback'
* 'open_rack_cell_polymerback'
* 'insulated_back_polymerback'
* 'open_rack_polymer_thinfilm_steel'
* '22x_concentrator_tracker'
If list, supply the following parameters in the following order:
* a : float
SAPM module parameter for establishing the upper
limit for module temperature at low wind speeds and
high solar irradiance.
* b : float
SAPM module parameter for establishing the rate at
which the module temperature drops as wind speed increases
(see SAPM eqn. 11).
* deltaT : float
SAPM module parameter giving the temperature difference
between the cell and module back surface at the
reference irradiance, E0.
Returns
--------
DataFrame with columns 'temp_cell' and 'temp_module'.
Values in degrees C.
References
----------
[1] King, D. et al, 2004, "Sandia Photovoltaic Array Performance Model",
SAND Report 3535, Sandia National Laboratories, Albuquerque, NM.
See Also
--------
sapm
'''
temp_models = {'open_rack_cell_glassback': [-3.47, -.0594, 3],
'roof_mount_cell_glassback': [-2.98, -.0471, 1],
'open_rack_cell_polymerback': [-3.56, -.0750, 3],
'insulated_back_polymerback': [-2.81, -.0455, 0],
'open_rack_polymer_thinfilm_steel': [-3.58, -.113, 3],
'22x_concentrator_tracker': [-3.23, -.130, 13]
}
if isinstance(model, str):
model = temp_models[model.lower()]
elif isinstance(model, list):
model = model
a = model[0]
b = model[1]
deltaT = model[2]
E0 = 1000. # Reference irradiance
temp_module = pd.Series(irrad*np.exp(a + b*wind) + temp)
temp_cell = temp_module + (irrad / E0)*(deltaT)
return pd.DataFrame({'temp_cell': temp_cell, 'temp_module': temp_module})
def singlediode(module, photocurrent, saturation_current,
resistance_series, resistance_shunt, nNsVth):
'''
Solve the single-diode model to obtain a photovoltaic IV curve.
Singlediode solves the single diode equation [1]
.. math::
I = IL - I0*[exp((V+I*Rs)/(nNsVth))-1] - (V + I*Rs)/Rsh
for ``I`` and ``V`` when given
``IL, I0, Rs, Rsh,`` and ``nNsVth (nNsVth = n*Ns*Vth)`` which
are described later. Returns a DataFrame which contains
the 5 points on the I-V curve specified in SAND2004-3535 [3].
If all IL, I0, Rs, Rsh, and nNsVth are scalar, a single curve
will be returned, if any are Series (of the same length), multiple IV
curves will be calculated.
The input parameters can be calculated using calcparams_desoto from
meteorological data.
Parameters
----------
module : DataFrame
A DataFrame defining the SAPM performance parameters.
photocurrent : float or Series
Light-generated current (photocurrent) in amperes under desired IV
curve conditions. Often abbreviated ``I_L``.
saturation_current : float or Series
Diode saturation current in amperes under desired IV curve
conditions. Often abbreviated ``I_0``.
resistance_series : float or Series
Series resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rs``.
resistance_shunt : float or Series
Shunt resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rsh``.
nNsVth : float or Series
The product of three components. 1) The usual diode ideal
factor (n), 2) the number of cells in series (Ns), and 3) the cell
thermal voltage under the desired IV curve conditions (Vth).
The thermal voltage of the cell (in volts) may be calculated as
``k*temp_cell/q``, where k is Boltzmann's constant (J/K),
temp_cell is the temperature of the p-n junction in Kelvin,
and q is the charge of an electron (coulombs).
Returns
-------
If ``photocurrent`` is a Series, a DataFrame with the following columns.
All columns have the same number of rows as the largest input DataFrame.
If ``photocurrent`` is a scalar, a dict with the following keys.
* i_sc - short circuit current in amperes.
* v_oc - open circuit voltage in volts.
* i_mp - current at maximum power point in amperes.
* v_mp - voltage at maximum power point in volts.
* p_mp - power at maximum power point in watts.
* i_x - current, in amperes, at ``v = 0.5*v_oc``.
* i_xx - current, in amperes, at ``V = 0.5*(v_oc+v_mp)``.
Notes
-----
The solution employed to solve the implicit diode equation utilizes
the Lambert W function to obtain an explicit function of V=f(i) and
I=f(V) as shown in [2].
References
-----------
[1] S.R. Wenham, M.A. Green, M.E. Watt, "Applied Photovoltaics"
ISBN 0 86758 909 4
[2] A. Jain, A. Kapoor, "Exact analytical solutions of the parameters of
real solar cells using Lambert W-function", Solar Energy Materials
and Solar Cells, 81 (2004) 269-277.
[3] D. King et al, "Sandia Photovoltaic Array Performance Model",
SAND2004-3535, Sandia National Laboratories, Albuquerque, NM
See also
--------
sapm
calcparams_desoto
'''
pvl_logger.debug('pvsystem.singlediode')
# Find short circuit current using Lambert W
i_sc = i_from_v(resistance_shunt, resistance_series, nNsVth, 0.01,
saturation_current, photocurrent)
params = {'r_sh': resistance_shunt,
'r_s': resistance_series,
'nNsVth': nNsVth,
'i_0': saturation_current,
'i_l': photocurrent}
__, v_oc = _golden_sect_DataFrame(params, 0, module['V_oc_ref']*1.6,
_v_oc_optfcn)
p_mp, v_mp = _golden_sect_DataFrame(params, 0, module['V_oc_ref']*1.14,
_pwr_optfcn)
# Invert the Power-Current curve. Find the current where the inverted power
# is minimized. This is i_mp. Start the optimization at v_oc/2
i_mp = i_from_v(resistance_shunt, resistance_series, nNsVth, v_mp,
saturation_current, photocurrent)
# Find Ix and Ixx using Lambert W
i_x = i_from_v(resistance_shunt, resistance_series, nNsVth,
0.5*v_oc, saturation_current, photocurrent)
i_xx = i_from_v(resistance_shunt, resistance_series, nNsVth,
0.5*(v_oc+v_mp), saturation_current, photocurrent)
# @wholmgren: need to move this stuff to a different function
# If the user says they want a curve of with number of points equal to
# NumPoints (must be >=2), then create a voltage array where voltage is
# zero in the first column, and Voc in the last column. Number of columns
# must equal NumPoints. Each row represents the voltage for one IV curve.
# Then create a current array where current is Isc in the first column, and
# zero in the last column, and each row represents the current in one IV
# curve. Thus the nth (V,I) point of curve m would be found as follows:
# (Result.V(m,n),Result.I(m,n)).
# if NumPoints >= 2
# s = ones(1,NumPoints); # shaping DataFrame to shape the column DataFrame parameters into 2-D matrices
# Result.V = (Voc)*(0:1/(NumPoints-1):1);
# Result.I = I_from_V(Rsh*s, Rs*s, nNsVth*s, Result.V, I0*s, IL*s);
# end
dfout = {}
dfout['i_sc'] = i_sc
dfout['i_mp'] = i_mp
dfout['v_oc'] = v_oc
dfout['v_mp'] = v_mp
dfout['p_mp'] = p_mp
dfout['i_x'] = i_x
dfout['i_xx'] = i_xx
try:
dfout = pd.DataFrame(dfout, index=photocurrent.index)
except AttributeError:
pass
return dfout
# Created April,2014
# Author: Rob Andrews, Calama Consulting
def _golden_sect_DataFrame(params, VL, VH, func):
'''
Vectorized golden section search for finding MPPT
from a dataframe timeseries.
Parameters
----------
params : dict
Dictionary containing scalars or arrays
of inputs to the function to be optimized.
Each row should represent an independent optimization.
VL: float
Lower bound of the optimization
VH: float
Upper bound of the optimization
func: function
Function to be optimized must be in the form f(array-like, x)
Returns
-------
func(df,'V1') : DataFrame
function evaluated at the optimal point
df['V1']: Dataframe
Dataframe of optimal points
Notes
-----
This funtion will find the MAXIMUM of a function
'''
df = params
df['VH'] = VH
df['VL'] = VL
err = df['VH'] - df['VL']
errflag = True
iterations = 0
while errflag:
phi = (np.sqrt(5)-1)/2*(df['VH']-df['VL'])
df['V1'] = df['VL'] + phi
df['V2'] = df['VH'] - phi
df['f1'] = func(df, 'V1')
df['f2'] = func(df, 'V2')
df['SW_Flag'] = df['f1'] > df['f2']
df['VL'] = df['V2']*df['SW_Flag'] + df['VL']*(~df['SW_Flag'])
df['VH'] = df['V1']*~df['SW_Flag'] + df['VH']*(df['SW_Flag'])
err = df['V1'] - df['V2']
try:
errflag = (abs(err)>.01).all()
except ValueError:
errflag = (abs(err)>.01)
iterations += 1
if iterations > 50:
raise Exception("EXCEPTION:iterations exeeded maximum (50)")
return func(df, 'V1'), df['V1']
def _pwr_optfcn(df, loc):
'''
Function to find power from ``i_from_v``.
'''
I = i_from_v(df['r_sh'], df['r_s'], df['nNsVth'],
df[loc], df['i_0'], df['i_l'])
return I*df[loc]
def _v_oc_optfcn(df, loc):
'''
Function to find the open circuit voltage from ``i_from_v``.
'''
I = -abs(i_from_v(df['r_sh'], df['r_s'], df['nNsVth'],
df[loc], df['i_0'], df['i_l']))
return I
def i_from_v(resistance_shunt, resistance_series, nNsVth, voltage,
saturation_current, photocurrent):
'''
Calculates current from voltage per Eq 2 Jain and Kapoor 2004 [1].
Parameters
----------
resistance_series : float or Series
Series resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rs``.
resistance_shunt : float or Series
Shunt resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rsh``.
saturation_current : float or Series
Diode saturation current in amperes under desired IV curve
conditions. Often abbreviated ``I_0``.
nNsVth : float or Series
The product of three components. 1) The usual diode ideal
factor (n), 2) the number of cells in series (Ns), and 3) the cell
thermal voltage under the desired IV curve conditions (Vth).
The thermal voltage of the cell (in volts) may be calculated as
``k*temp_cell/q``, where k is Boltzmann's constant (J/K),
temp_cell is the temperature of the p-n junction in Kelvin,
and q is the charge of an electron (coulombs).
photocurrent : float or Series
Light-generated current (photocurrent) in amperes under desired IV
curve conditions. Often abbreviated ``I_L``.
Returns
-------
current : np.array
References
----------
[1] A. Jain, A. Kapoor, "Exact analytical solutions of the parameters of
real solar cells using Lambert W-function", Solar Energy Materials
and Solar Cells, 81 (2004) 269-277.
'''
try:
from scipy.special import lambertw
except ImportError:
raise ImportError('This function requires scipy')
Rsh = resistance_shunt
Rs = resistance_series
I0 = saturation_current
IL = photocurrent
V = voltage
argW = (Rs*I0*Rsh *
np.exp( Rsh*(Rs*(IL+I0)+V) / (nNsVth*(Rs+Rsh)) ) /
(nNsVth*(Rs + Rsh)) )
lambertwterm = lambertw(argW)
# Eqn. 4 in Jain and Kapoor, 2004
I = -V/(Rs + Rsh) - (nNsVth/Rs)*lambertwterm + Rsh*(IL + I0)/(Rs + Rsh)
return I.real
def snlinverter(inverter, v_dc, p_dc):
'''
Converts DC power and voltage to AC power using
Sandia's Grid-Connected PV Inverter model.
Determines the AC power output of an inverter given the DC voltage, DC
power, and appropriate Sandia Grid-Connected Photovoltaic Inverter
Model parameters. The output, ac_power, is clipped at the maximum power
output, and gives a negative power during low-input power conditions,
but does NOT account for maximum power point tracking voltage windows
nor maximum current or voltage limits on the inverter.
Parameters
----------
inverter : DataFrame
A DataFrame defining the inverter to be used, giving the
inverter performance parameters according to the Sandia
Grid-Connected Photovoltaic Inverter Model (SAND 2007-5036) [1].
A set of inverter performance parameters are provided with pvlib,
or may be generated from a System Advisor Model (SAM) [2]
library using retrievesam.
Required DataFrame columns are:
====== ============================================================
Column Description
====== ============================================================
Pac0 AC-power output from inverter based on input power
and voltage (W)
Pdc0 DC-power input to inverter, typically assumed to be equal
to the PV array maximum power (W)
Vdc0 DC-voltage level at which the AC-power rating is achieved
at the reference operating condition (V)
Ps0 DC-power required to start the inversion process, or
self-consumption by inverter, strongly influences inverter
efficiency at low power levels (W)
C0 Parameter defining the curvature (parabolic) of the
relationship between ac-power and dc-power at the reference
operating condition, default value of zero gives a
linear relationship (1/W)
C1 Empirical coefficient allowing Pdco to vary linearly
with dc-voltage input, default value is zero (1/V)
C2 Empirical coefficient allowing Pso to vary linearly with
dc-voltage input, default value is zero (1/V)
C3 Empirical coefficient allowing Co to vary linearly with
dc-voltage input, default value is zero (1/V)
Pnt AC-power consumed by inverter at night (night tare) to
maintain circuitry required to sense PV array voltage (W)
====== ============================================================
v_dc : float or Series
DC voltages, in volts, which are provided as input to the inverter.
Vdc must be >= 0.
p_dc : float or Series
A scalar or DataFrame of DC powers, in watts, which are provided
as input to the inverter. Pdc must be >= 0.
Returns
-------
ac_power : float or Series
Modeled AC power output given the input
DC voltage, Vdc, and input DC power, Pdc. When ac_power would be
greater than Pac0, it is set to Pac0 to represent inverter
"clipping". When ac_power would be less than Ps0 (startup power
required), then ac_power is set to -1*abs(Pnt) to represent nightly
power losses. ac_power is not adjusted for maximum power point
tracking (MPPT) voltage windows or maximum current limits of the
inverter.
References
----------
[1] SAND2007-5036, "Performance Model for Grid-Connected Photovoltaic
Inverters by D. King, S. Gonzalez, G. Galbraith, W. Boyson
[2] System Advisor Model web page. https://sam.nrel.gov.
See also
--------
sapm
singlediode
'''
Paco = inverter['Paco']
Pdco = inverter['Pdco']
Vdco = inverter['Vdco']
Pso = inverter['Pso']
C0 = inverter['C0']
C1 = inverter['C1']
C2 = inverter['C2']
C3 = inverter['C3']
Pnt = inverter['Pnt']
A = Pdco * (1 + C1*(v_dc - Vdco))
B = Pso * (1 + C2*(v_dc - Vdco))
C = C0 * (1 + C3*(v_dc - Vdco))
# ensures that function works with scalar or Series input
p_dc = pd.Series(p_dc)
ac_power = ( Paco/(A-B) - C*(A-B) ) * (p_dc-B) + C*((p_dc-B)**2)
ac_power[ac_power > Paco] = Paco
ac_power[ac_power < Pso] = - 1.0 * abs(Pnt)
if len(ac_power) == 1:
ac_power = ac_power.ix[0]
return ac_power
| bsd-3-clause |
lol/BCI-BO-old | BCI_Framework/GDA_Learner.py | 1 | 6416 | import numpy as np
import pickle
import sys
from Learner import Learner, NONRF_Learner
from sklearn.preprocessing import StandardScaler
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import pairwise, zero_one_loss, mean_squared_error
from sklearn.cross_validation import StratifiedKFold, cross_val_score
from sklearn.utils import shuffle
import logging
from sklearn.lda import LDA
from sklearn.qda import QDA
from sklearn import cross_validation
from sklearn.metrics import classification_report
import json
class GDA_Learner(NONRF_Learner):
"""applying GDA to BCI dataset"""
def __init__(self, config, type = 'LDA', method = 'classification'):
""" """
Learner.__init__(self, config, method)
self.type = type
def generate_param_grid(self, feature_param_list, learner_name):
if feature_param_list is None:
scores = np.zeros(shape=(1, self.config.configuration["number_of_cvs_dict"][learner_name]))
param_grid = [ None ]
self.grid_dictionary = {}
else:
scores = np.zeros(shape=(len(feature_param_list), self.config.configuration["number_of_cvs_dict"][learner_name]))
param_grid = [ (None, feat_param) for feat_param in feature_param_list]
self.grid_dictionary = {'fe_params':1}
return param_grid, scores
def set_params_list( self, learner_params, i):
n_jobs = self.config.configuration["n_jobs"]
if self.type == 'LDA':
self.learner = LDA()
elif self.type == 'QDA':
self.learner = QDA()
def set_params_dict(self, learner_params):
n_jobs = self.config.configuration["n_jobs"]
if self.type == 'LDA':
self.learner = LDA()
elif self.type == 'QDA':
self.learner = QDA()
# def fit_calc_cv_scores(self, X_train, y_train, X_test, y_test):
#
# self.learner.fit(X_train, y_train)
# return self.predict_error(X_test, y_test)
#
# def predict_error(self, X_test, Y_test):
#
# preds = self.learner.predict(X_test)
# classification_error = np.sum((preds != Y_test))/float(len(Y_test))
# precision, recall, _ , _ = precision_recall_fscore_support(Y_test, preds, average='weighted')
#
# return classification_error, precision, recall
def train_learner(self, X, Y, X_test = [], Y_test = [], learner_params = [] ,optimal = False):
""" """
if optimal:
self.train_learner_opt(X, Y, X_test, Y_test, learner_params)
else:
self.train_learner_cv(X, Y)
def train_learner_cv(self, Xs, Y, optimal = False):
""" """
self.logging.info('Standardizing data!')
assert self.result_path != ''
# X = np.asarray( X, dtype=np.float32, order='F')
# Y = np.asarray( Y, dtype=np.short, order='F')
# scaler = StandardScaler()
# X = scaler.fit_transform(X)
scaled_Xs = self.scale_training_data(Xs)
self.logging.info('X size is: %s and Y size is: %s', '_'.join(map(str,scaled_Xs[0].shape)), map(str,Y.shape))
for i in range(self.config.configuration["number_of_cvs"]):
self.logging.info('iteration number %s for cross validation', str(i))
X_new, Y_new = shuffle(X, Y, random_state=i)
scores = cross_validation.cross_val_score(self.learner, X_new, Y_new, cv=self.config.configuration["number_of_cv_folds"])
if self.method == 'classification':
self.scores[:,i] = 1 - np.mean(scores)
elif self.method == 'regression':
pass
# self.scores[:, i] = np.mean(self.learnerCV.mse_path_, axis = 1)
# clf = self.learner.fit(X,Y)
# aa = clf.predict_proba(X)
self.logging.info('Writing the results to file!')
with open(self.result_path, 'w') as res_file:
print>>res_file, np.mean(self.scores)
print>>res_file,{}
print>>res_file, np.std(self.scores, axis=1)
def train_learner_opt(self, X, Y, X_test, Y_test, learner_params = []):
""" """
# self.logging.info('Standardizing data!')
# Y_test = np.array(Y_test)
# scaler = StandardScaler()
# X = scaler.fit_transform(X)
# X_test = scaler.transform(X_test)
# self.logging.info('X size is: %s and Y size is: %s and X_test size is: %s and Y_test size is: %s',
# '_'.join(map(str,X.shape)), str(len(Y)), '_'.join(map(str,X_test.shape)), str(len(Y_test)))
X, Y, X_test, Y_test = self.scale_all_data(X, Y, X_test, Y_test)
if self.method == 'classification':
clf = self.learner
self.logging.info('optimal GDA classifier trained')
elif self.method == 'regression':
pass
# clf = self.learner()
# self.logging.info('optimal linear regressor trained with alpha = %s!', str(learner_params['C']))
clf.fit(X, Y)
self.fit_opt_learner(X, Y, X_test, Y_test, clf)
# clf.fit(X, Y)
#
# Y_pred_train = clf.predict(X)
#
# Y_pred = clf.predict(X_test)
# nonnan_indices = ~np.isnan(Y_test)
# error = self.my_loss(Y_test[nonnan_indices], Y_pred[nonnan_indices])
# self.logging.info('error is %s', str(error))
#
# probs_train = clf.predict_proba(X)
# probs_test = clf.predict_proba(X_test)
#
# np.savez(self.result_opt_path, error=error, Y_pred=Y_pred, Y_pred_train=Y_pred_train, probs_train=probs_train, probs_test = probs_test)
# with open(self.result_opt_path,'w') as res_file:
# res_file.write(str(error))
# res_file.write('\n')
# res_file.write(' '.join(map(str, Y_pred) + ['\n']))
# res_file.write('\n')
# res_file.write(' '.join(map(str, Y_pred_train) + ['\n']))
# res_file.write('#Train probabilities: ')
# res_file.write(' '.join(map(str, probs) + ['\n']))
#
# res_file.write('#Test probabilities: ')
# res_file.write(' '.join(map(str, probs_test) + ['\n']))
| gpl-3.0 |
plaes/numpy | numpy/doc/creation.py | 3 | 5425 | """
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros, etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or record arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to Numpy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most obvious
examples are lists and tuples. See the documentation for array() for details for
its use. Some objects may support the array-protocol and allow conversion to arrays
this way. A simple way to find out if the object can be converted to a numpy array
using array() is simply to try it interactively and see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists, and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic Numpy Array Creation
==============================
Numpy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64.
``>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])``
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=np.float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: PyTables
FITS: PyFITS
Others? xxx
Examples of formats that cannot be read directly but for which it is not hard
to convert are libraries like PIL (able to read and write many image formats
such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques (see
xxx) though that certainly is much more work and requires significantly more
advanced knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal)
"""
| bsd-3-clause |
sugartom/tensorflow-alien | tensorflow/contrib/learn/python/learn/tests/dataframe/arithmetic_transform_test.py | 62 | 2343 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for arithmetic transforms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe import tensorflow_dataframe as df
from tensorflow.python.platform import test
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class SumTestCase(test.TestCase):
"""Test class for `Sum` transform."""
def testSum(self):
if not HAS_PANDAS:
return
num_rows = 100
pandas_df = pd.DataFrame({
"a": np.arange(num_rows),
"b": np.arange(num_rows, 2 * num_rows)
})
frame = df.TensorFlowDataFrame.from_pandas(
pandas_df, shuffle=False, batch_size=num_rows)
frame["a+b"] = frame["a"] + frame["b"]
expected_sum = pandas_df["a"] + pandas_df["b"]
actual_sum = frame.run_one_batch()["a+b"]
np.testing.assert_array_equal(expected_sum, actual_sum)
class DifferenceTestCase(test.TestCase):
"""Test class for `Difference` transform."""
def testDifference(self):
if not HAS_PANDAS:
return
num_rows = 100
pandas_df = pd.DataFrame({
"a": np.arange(num_rows),
"b": np.arange(num_rows, 2 * num_rows)
})
frame = df.TensorFlowDataFrame.from_pandas(
pandas_df, shuffle=False, batch_size=num_rows)
frame["a-b"] = frame["a"] - frame["b"]
expected_diff = pandas_df["a"] - pandas_df["b"]
actual_diff = frame.run_one_batch()["a-b"]
np.testing.assert_array_equal(expected_diff, actual_diff)
if __name__ == "__main__":
test.main()
| apache-2.0 |
allisony/aplpy | ah_bootstrap.py | 2 | 36165 | """
This bootstrap module contains code for ensuring that the astropy_helpers
package will be importable by the time the setup.py script runs. It also
includes some workarounds to ensure that a recent-enough version of setuptools
is being used for the installation.
This module should be the first thing imported in the setup.py of distributions
that make use of the utilities in astropy_helpers. If the distribution ships
with its own copy of astropy_helpers, this module will first attempt to import
from the shipped copy. However, it will also check PyPI to see if there are
any bug-fix releases on top of the current version that may be useful to get
past platform-specific bugs that have been fixed. When running setup.py, use
the ``--offline`` command-line option to disable the auto-upgrade checks.
When this module is imported or otherwise executed it automatically calls a
main function that attempts to read the project's setup.cfg file, which it
checks for a configuration section called ``[ah_bootstrap]`` the presences of
that section, and options therein, determine the next step taken: If it
contains an option called ``auto_use`` with a value of ``True``, it will
automatically call the main function of this module called
`use_astropy_helpers` (see that function's docstring for full details).
Otherwise no further action is taken (however,
``ah_bootstrap.use_astropy_helpers`` may be called manually from within the
setup.py script).
Additional options in the ``[ah_boostrap]`` section of setup.cfg have the same
names as the arguments to `use_astropy_helpers`, and can be used to configure
the bootstrap script when ``auto_use = True``.
See https://github.com/astropy/astropy-helpers for more details, and for the
latest version of this module.
"""
import contextlib
import errno
import imp
import io
import locale
import os
import re
import subprocess as sp
import sys
try:
from ConfigParser import ConfigParser, RawConfigParser
except ImportError:
from configparser import ConfigParser, RawConfigParser
if sys.version_info[0] < 3:
_str_types = (str, unicode)
_text_type = unicode
PY3 = False
else:
_str_types = (str, bytes)
_text_type = str
PY3 = True
# What follows are several import statements meant to deal with install-time
# issues with either missing or misbehaving pacakges (including making sure
# setuptools itself is installed):
# Some pre-setuptools checks to ensure that either distribute or setuptools >=
# 0.7 is used (over pre-distribute setuptools) if it is available on the path;
# otherwise the latest setuptools will be downloaded and bootstrapped with
# ``ez_setup.py``. This used to be included in a separate file called
# setuptools_bootstrap.py; but it was combined into ah_bootstrap.py
try:
import pkg_resources
_setuptools_req = pkg_resources.Requirement.parse('setuptools>=0.7')
# This may raise a DistributionNotFound in which case no version of
# setuptools or distribute is properly installed
_setuptools = pkg_resources.get_distribution('setuptools')
if _setuptools not in _setuptools_req:
# Older version of setuptools; check if we have distribute; again if
# this results in DistributionNotFound we want to give up
_distribute = pkg_resources.get_distribution('distribute')
if _setuptools != _distribute:
# It's possible on some pathological systems to have an old version
# of setuptools and distribute on sys.path simultaneously; make
# sure distribute is the one that's used
sys.path.insert(1, _distribute.location)
_distribute.activate()
imp.reload(pkg_resources)
except:
# There are several types of exceptions that can occur here; if all else
# fails bootstrap and use the bootstrapped version
from ez_setup import use_setuptools
use_setuptools()
# Note: The following import is required as a workaround to
# https://github.com/astropy/astropy-helpers/issues/89; if we don't import this
# module now, it will get cleaned up after `run_setup` is called, but that will
# later cause the TemporaryDirectory class defined in it to stop working when
# used later on by setuptools
try:
import setuptools.py31compat
except ImportError:
pass
# matplotlib can cause problems if it is imported from within a call of
# run_setup(), because in some circumstances it will try to write to the user's
# home directory, resulting in a SandboxViolation. See
# https://github.com/matplotlib/matplotlib/pull/4165
# Making sure matplotlib, if it is available, is imported early in the setup
# process can mitigate this (note importing matplotlib.pyplot has the same
# issue)
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot
except:
# Ignore if this fails for *any* reason*
pass
# End compatibility imports...
# In case it didn't successfully import before the ez_setup checks
import pkg_resources
from setuptools import Distribution
from setuptools.package_index import PackageIndex
from setuptools.sandbox import run_setup
from distutils import log
from distutils.debug import DEBUG
# TODO: Maybe enable checking for a specific version of astropy_helpers?
DIST_NAME = 'astropy-helpers'
PACKAGE_NAME = 'astropy_helpers'
# Defaults for other options
DOWNLOAD_IF_NEEDED = True
INDEX_URL = 'https://pypi.python.org/simple'
USE_GIT = True
OFFLINE = False
AUTO_UPGRADE = True
# A list of all the configuration options and their required types
CFG_OPTIONS = [
('auto_use', bool), ('path', str), ('download_if_needed', bool),
('index_url', str), ('use_git', bool), ('offline', bool),
('auto_upgrade', bool)
]
class _Bootstrapper(object):
"""
Bootstrapper implementation. See ``use_astropy_helpers`` for parameter
documentation.
"""
def __init__(self, path=None, index_url=None, use_git=None, offline=None,
download_if_needed=None, auto_upgrade=None):
if path is None:
path = PACKAGE_NAME
if not (isinstance(path, _str_types) or path is False):
raise TypeError('path must be a string or False')
if PY3 and not isinstance(path, _text_type):
fs_encoding = sys.getfilesystemencoding()
path = path.decode(fs_encoding) # path to unicode
self.path = path
# Set other option attributes, using defaults where necessary
self.index_url = index_url if index_url is not None else INDEX_URL
self.offline = offline if offline is not None else OFFLINE
# If offline=True, override download and auto-upgrade
if self.offline:
download_if_needed = False
auto_upgrade = False
self.download = (download_if_needed
if download_if_needed is not None
else DOWNLOAD_IF_NEEDED)
self.auto_upgrade = (auto_upgrade
if auto_upgrade is not None else AUTO_UPGRADE)
# If this is a release then the .git directory will not exist so we
# should not use git.
git_dir_exists = os.path.exists(os.path.join(os.path.dirname(__file__), '.git'))
if use_git is None and not git_dir_exists:
use_git = False
self.use_git = use_git if use_git is not None else USE_GIT
# Declared as False by default--later we check if astropy-helpers can be
# upgraded from PyPI, but only if not using a source distribution (as in
# the case of import from a git submodule)
self.is_submodule = False
@classmethod
def main(cls, argv=None):
if argv is None:
argv = sys.argv
config = cls.parse_config()
config.update(cls.parse_command_line(argv))
auto_use = config.pop('auto_use', False)
bootstrapper = cls(**config)
if auto_use:
# Run the bootstrapper, otherwise the setup.py is using the old
# use_astropy_helpers() interface, in which case it will run the
# bootstrapper manually after reconfiguring it.
bootstrapper.run()
return bootstrapper
@classmethod
def parse_config(cls):
if not os.path.exists('setup.cfg'):
return {}
cfg = ConfigParser()
try:
cfg.read('setup.cfg')
except Exception as e:
if DEBUG:
raise
log.error(
"Error reading setup.cfg: {0!r}\n{1} will not be "
"automatically bootstrapped and package installation may fail."
"\n{2}".format(e, PACKAGE_NAME, _err_help_msg))
return {}
if not cfg.has_section('ah_bootstrap'):
return {}
config = {}
for option, type_ in CFG_OPTIONS:
if not cfg.has_option('ah_bootstrap', option):
continue
if type_ is bool:
value = cfg.getboolean('ah_bootstrap', option)
else:
value = cfg.get('ah_bootstrap', option)
config[option] = value
return config
@classmethod
def parse_command_line(cls, argv=None):
if argv is None:
argv = sys.argv
config = {}
# For now we just pop recognized ah_bootstrap options out of the
# arg list. This is imperfect; in the unlikely case that a setup.py
# custom command or even custom Distribution class defines an argument
# of the same name then we will break that. However there's a catch22
# here that we can't just do full argument parsing right here, because
# we don't yet know *how* to parse all possible command-line arguments.
if '--no-git' in argv:
config['use_git'] = False
argv.remove('--no-git')
if '--offline' in argv:
config['offline'] = True
argv.remove('--offline')
return config
def run(self):
strategies = ['local_directory', 'local_file', 'index']
dist = None
# Check to see if the path is a submodule
self.is_submodule = self._check_submodule()
for strategy in strategies:
method = getattr(self, 'get_{0}_dist'.format(strategy))
dist = method()
if dist is not None:
break
else:
raise _AHBootstrapSystemExit(
"No source found for the {0!r} package; {0} must be "
"available and importable as a prerequisite to building "
"or installing this package.".format(PACKAGE_NAME))
# This is a bit hacky, but if astropy_helpers was loaded from a
# directory/submodule its Distribution object gets a "precedence" of
# "DEVELOP_DIST". However, in other cases it gets a precedence of
# "EGG_DIST". However, when activing the distribution it will only be
# placed early on sys.path if it is treated as an EGG_DIST, so always
# do that
dist = dist.clone(precedence=pkg_resources.EGG_DIST)
# Otherwise we found a version of astropy-helpers, so we're done
# Just active the found distribution on sys.path--if we did a
# download this usually happens automatically but it doesn't hurt to
# do it again
# Note: Adding the dist to the global working set also activates it
# (makes it importable on sys.path) by default.
# But first, remove any previously imported versions of
# astropy_helpers; this is necessary for nested installs where one
# package's installer is installing another package via
# setuptools.sandbox.run_set, as in the case of setup_requires
for key in list(sys.modules):
try:
if key == PACKAGE_NAME or key.startswith(PACKAGE_NAME + '.'):
del sys.modules[key]
except AttributeError:
# Sometimes mysterious non-string things can turn up in
# sys.modules
continue
try:
pkg_resources.working_set.add(dist, replace=True)
except TypeError:
# Some (much) older versions of setuptools do not have the
# replace=True option here. These versions are old enough that all
# bets may be off anyways, but it's easy enough to work around just
# in case...
if dist.key in pkg_resources.working_set.by_key:
del pkg_resources.working_set.by_key[dist.key]
pkg_resources.working_set.add(dist)
@property
def config(self):
"""
A `dict` containing the options this `_Bootstrapper` was configured
with.
"""
return dict((optname, getattr(self, optname))
for optname, _ in CFG_OPTIONS if hasattr(self, optname))
def get_local_directory_dist(self):
"""
Handle importing a vendored package from a subdirectory of the source
distribution.
"""
if not os.path.isdir(self.path):
return
log.info('Attempting to import astropy_helpers from {0} {1!r}'.format(
'submodule' if self.is_submodule else 'directory',
self.path))
dist = self._directory_import()
if dist is None:
log.warn(
'The requested path {0!r} for importing {1} does not '
'exist, or does not contain a copy of the {1} '
'package.'.format(self.path, PACKAGE_NAME))
elif self.auto_upgrade and not self.is_submodule:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_local_file_dist(self):
"""
Handle importing from a source archive; this also uses setup_requires
but points easy_install directly to the source archive.
"""
if not os.path.isfile(self.path):
return
log.info('Attempting to unpack and import astropy_helpers from '
'{0!r}'.format(self.path))
try:
dist = self._do_download(find_links=[self.path])
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to import {0} from the specified archive {1!r}: '
'{2}'.format(PACKAGE_NAME, self.path, str(e)))
dist = None
if dist is not None and self.auto_upgrade:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_index_dist(self):
if not self.download:
log.warn('Downloading {0!r} disabled.'.format(DIST_NAME))
return False
log.warn(
"Downloading {0!r}; run setup.py with the --offline option to "
"force offline installation.".format(DIST_NAME))
try:
dist = self._do_download()
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to download and/or install {0!r} from {1!r}:\n'
'{2}'.format(DIST_NAME, self.index_url, str(e)))
dist = None
# No need to run auto-upgrade here since we've already presumably
# gotten the most up-to-date version from the package index
return dist
def _directory_import(self):
"""
Import astropy_helpers from the given path, which will be added to
sys.path.
Must return True if the import succeeded, and False otherwise.
"""
# Return True on success, False on failure but download is allowed, and
# otherwise raise SystemExit
path = os.path.abspath(self.path)
# Use an empty WorkingSet rather than the man
# pkg_resources.working_set, since on older versions of setuptools this
# will invoke a VersionConflict when trying to install an upgrade
ws = pkg_resources.WorkingSet([])
ws.add_entry(path)
dist = ws.by_key.get(DIST_NAME)
if dist is None:
# We didn't find an egg-info/dist-info in the given path, but if a
# setup.py exists we can generate it
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
with _silence():
run_setup(os.path.join(path, 'setup.py'),
['egg_info'])
for dist in pkg_resources.find_distributions(path, True):
# There should be only one...
return dist
return dist
def _do_download(self, version='', find_links=None):
if find_links:
allow_hosts = ''
index_url = None
else:
allow_hosts = None
index_url = self.index_url
# Annoyingly, setuptools will not handle other arguments to
# Distribution (such as options) before handling setup_requires, so it
# is not straightforward to programmatically augment the arguments which
# are passed to easy_install
class _Distribution(Distribution):
def get_option_dict(self, command_name):
opts = Distribution.get_option_dict(self, command_name)
if command_name == 'easy_install':
if find_links is not None:
opts['find_links'] = ('setup script', find_links)
if index_url is not None:
opts['index_url'] = ('setup script', index_url)
if allow_hosts is not None:
opts['allow_hosts'] = ('setup script', allow_hosts)
return opts
if version:
req = '{0}=={1}'.format(DIST_NAME, version)
else:
req = DIST_NAME
attrs = {'setup_requires': [req]}
try:
if DEBUG:
_Distribution(attrs=attrs)
else:
with _silence():
_Distribution(attrs=attrs)
# If the setup_requires succeeded it will have added the new dist to
# the main working_set
return pkg_resources.working_set.by_key.get(DIST_NAME)
except Exception as e:
if DEBUG:
raise
msg = 'Error retrieving {0} from {1}:\n{2}'
if find_links:
source = find_links[0]
elif index_url != INDEX_URL:
source = index_url
else:
source = 'PyPI'
raise Exception(msg.format(DIST_NAME, source, repr(e)))
def _do_upgrade(self, dist):
# Build up a requirement for a higher bugfix release but a lower minor
# release (so API compatibility is guaranteed)
next_version = _next_version(dist.parsed_version)
req = pkg_resources.Requirement.parse(
'{0}>{1},<{2}'.format(DIST_NAME, dist.version, next_version))
package_index = PackageIndex(index_url=self.index_url)
upgrade = package_index.obtain(req)
if upgrade is not None:
return self._do_download(version=upgrade.version)
def _check_submodule(self):
"""
Check if the given path is a git submodule.
See the docstrings for ``_check_submodule_using_git`` and
``_check_submodule_no_git`` for further details.
"""
if (self.path is None or
(os.path.exists(self.path) and not os.path.isdir(self.path))):
return False
if self.use_git:
return self._check_submodule_using_git()
else:
return self._check_submodule_no_git()
def _check_submodule_using_git(self):
"""
Check if the given path is a git submodule. If so, attempt to initialize
and/or update the submodule if needed.
This function makes calls to the ``git`` command in subprocesses. The
``_check_submodule_no_git`` option uses pure Python to check if the given
path looks like a git submodule, but it cannot perform updates.
"""
cmd = ['git', 'submodule', 'status', '--', self.path]
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except _CommandNotFound:
# The git command simply wasn't found; this is most likely the
# case on user systems that don't have git and are simply
# trying to install the package from PyPI or a source
# distribution. Silently ignore this case and simply don't try
# to use submodules
return False
stderr = stderr.strip()
if returncode != 0 and stderr:
# Unfortunately the return code alone cannot be relied on, as
# earlier versions of git returned 0 even if the requested submodule
# does not exist
# This is a warning that occurs in perl (from running git submodule)
# which only occurs with a malformatted locale setting which can
# happen sometimes on OSX. See again
# https://github.com/astropy/astropy/issues/2749
perl_warning = ('perl: warning: Falling back to the standard locale '
'("C").')
if not stderr.strip().endswith(perl_warning):
# Some other unknown error condition occurred
log.warn('git submodule command failed '
'unexpectedly:\n{0}'.format(stderr))
return False
# Output of `git submodule status` is as follows:
#
# 1: Status indicator: '-' for submodule is uninitialized, '+' if
# submodule is initialized but is not at the commit currently indicated
# in .gitmodules (and thus needs to be updated), or 'U' if the
# submodule is in an unstable state (i.e. has merge conflicts)
#
# 2. SHA-1 hash of the current commit of the submodule (we don't really
# need this information but it's useful for checking that the output is
# correct)
#
# 3. The output of `git describe` for the submodule's current commit
# hash (this includes for example what branches the commit is on) but
# only if the submodule is initialized. We ignore this information for
# now
_git_submodule_status_re = re.compile(
'^(?P<status>[+-U ])(?P<commit>[0-9a-f]{40}) '
'(?P<submodule>\S+)( .*)?$')
# The stdout should only contain one line--the status of the
# requested submodule
m = _git_submodule_status_re.match(stdout)
if m:
# Yes, the path *is* a git submodule
self._update_submodule(m.group('submodule'), m.group('status'))
return True
else:
log.warn(
'Unexpected output from `git submodule status`:\n{0}\n'
'Will attempt import from {1!r} regardless.'.format(
stdout, self.path))
return False
def _check_submodule_no_git(self):
"""
Like ``_check_submodule_using_git``, but simply parses the .gitmodules file
to determine if the supplied path is a git submodule, and does not exec any
subprocesses.
This can only determine if a path is a submodule--it does not perform
updates, etc. This function may need to be updated if the format of the
.gitmodules file is changed between git versions.
"""
gitmodules_path = os.path.abspath('.gitmodules')
if not os.path.isfile(gitmodules_path):
return False
# This is a minimal reader for gitconfig-style files. It handles a few of
# the quirks that make gitconfig files incompatible with ConfigParser-style
# files, but does not support the full gitconfig syntax (just enough
# needed to read a .gitmodules file).
gitmodules_fileobj = io.StringIO()
# Must use io.open for cross-Python-compatible behavior wrt unicode
with io.open(gitmodules_path) as f:
for line in f:
# gitconfig files are more flexible with leading whitespace; just
# go ahead and remove it
line = line.lstrip()
# comments can start with either # or ;
if line and line[0] in (':', ';'):
continue
gitmodules_fileobj.write(line)
gitmodules_fileobj.seek(0)
cfg = RawConfigParser()
try:
cfg.readfp(gitmodules_fileobj)
except Exception as exc:
log.warn('Malformatted .gitmodules file: {0}\n'
'{1} cannot be assumed to be a git submodule.'.format(
exc, self.path))
return False
for section in cfg.sections():
if not cfg.has_option(section, 'path'):
continue
submodule_path = cfg.get(section, 'path').rstrip(os.sep)
if submodule_path == self.path.rstrip(os.sep):
return True
return False
def _update_submodule(self, submodule, status):
if status == ' ':
# The submodule is up to date; no action necessary
return
elif status == '-':
if self.offline:
raise _AHBootstrapSystemExit(
"Cannot initialize the {0} submodule in --offline mode; "
"this requires being able to clone the submodule from an "
"online repository.".format(submodule))
cmd = ['update', '--init']
action = 'Initializing'
elif status == '+':
cmd = ['update']
action = 'Updating'
if self.offline:
cmd.append('--no-fetch')
elif status == 'U':
raise _AHBoostrapSystemExit(
'Error: Submodule {0} contains unresolved merge conflicts. '
'Please complete or abandon any changes in the submodule so that '
'it is in a usable state, then try again.'.format(submodule))
else:
log.warn('Unknown status {0!r} for git submodule {1!r}. Will '
'attempt to use the submodule as-is, but try to ensure '
'that the submodule is in a clean state and contains no '
'conflicts or errors.\n{2}'.format(status, submodule,
_err_help_msg))
return
err_msg = None
cmd = ['git', 'submodule'] + cmd + ['--', submodule]
log.warn('{0} {1} submodule with: `{2}`'.format(
action, submodule, ' '.join(cmd)))
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except OSError as e:
err_msg = str(e)
else:
if returncode != 0:
err_msg = stderr
if err_msg is not None:
log.warn('An unexpected error occurred updating the git submodule '
'{0!r}:\n{1}\n{2}'.format(submodule, err_msg,
_err_help_msg))
class _CommandNotFound(OSError):
"""
An exception raised when a command run with run_cmd is not found on the
system.
"""
def run_cmd(cmd):
"""
Run a command in a subprocess, given as a list of command-line
arguments.
Returns a ``(returncode, stdout, stderr)`` tuple.
"""
try:
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
# XXX: May block if either stdout or stderr fill their buffers;
# however for the commands this is currently used for that is
# unlikely (they should have very brief output)
stdout, stderr = p.communicate()
except OSError as e:
if DEBUG:
raise
if e.errno == errno.ENOENT:
msg = 'Command not found: `{0}`'.format(' '.join(cmd))
raise _CommandNotFound(msg, cmd)
else:
raise _AHBoostrapSystemExit(
'An unexpected error occurred when running the '
'`{0}` command:\n{1}'.format(' '.join(cmd), str(e)))
# Can fail of the default locale is not configured properly. See
# https://github.com/astropy/astropy/issues/2749. For the purposes under
# consideration 'latin1' is an acceptable fallback.
try:
stdio_encoding = locale.getdefaultlocale()[1] or 'latin1'
except ValueError:
# Due to an OSX oddity locale.getdefaultlocale() can also crash
# depending on the user's locale/language settings. See:
# http://bugs.python.org/issue18378
stdio_encoding = 'latin1'
# Unlikely to fail at this point but even then let's be flexible
if not isinstance(stdout, _text_type):
stdout = stdout.decode(stdio_encoding, 'replace')
if not isinstance(stderr, _text_type):
stderr = stderr.decode(stdio_encoding, 'replace')
return (p.returncode, stdout, stderr)
def _next_version(version):
"""
Given a parsed version from pkg_resources.parse_version, returns a new
version string with the next minor version.
Examples
========
>>> _next_version(pkg_resources.parse_version('1.2.3'))
'1.3.0'
"""
if hasattr(version, 'base_version'):
# New version parsing from setuptools >= 8.0
if version.base_version:
parts = version.base_version.split('.')
else:
parts = []
else:
parts = []
for part in version:
if part.startswith('*'):
break
parts.append(part)
parts = [int(p) for p in parts]
if len(parts) < 3:
parts += [0] * (3 - len(parts))
major, minor, micro = parts[:3]
return '{0}.{1}.{2}'.format(major, minor + 1, 0)
class _DummyFile(object):
"""A noop writeable object."""
errors = '' # Required for Python 3.x
encoding = 'utf-8'
def write(self, s):
pass
def flush(self):
pass
@contextlib.contextmanager
def _silence():
"""A context manager that silences sys.stdout and sys.stderr."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = _DummyFile()
sys.stderr = _DummyFile()
exception_occurred = False
try:
yield
except:
exception_occurred = True
# Go ahead and clean up so that exception handling can work normally
sys.stdout = old_stdout
sys.stderr = old_stderr
raise
if not exception_occurred:
sys.stdout = old_stdout
sys.stderr = old_stderr
_err_help_msg = """
If the problem persists consider installing astropy_helpers manually using pip
(`pip install astropy_helpers`) or by manually downloading the source archive,
extracting it, and installing by running `python setup.py install` from the
root of the extracted source code.
"""
class _AHBootstrapSystemExit(SystemExit):
def __init__(self, *args):
if not args:
msg = 'An unknown problem occurred bootstrapping astropy_helpers.'
else:
msg = args[0]
msg += '\n' + _err_help_msg
super(_AHBootstrapSystemExit, self).__init__(msg, *args[1:])
if sys.version_info[:2] < (2, 7):
# In Python 2.6 the distutils log does not log warnings, errors, etc. to
# stderr so we have to wrap it to ensure consistency at least in this
# module
import distutils
class log(object):
def __getattr__(self, attr):
return getattr(distutils.log, attr)
def warn(self, msg, *args):
self._log_to_stderr(distutils.log.WARN, msg, *args)
def error(self, msg):
self._log_to_stderr(distutils.log.ERROR, msg, *args)
def fatal(self, msg):
self._log_to_stderr(distutils.log.FATAL, msg, *args)
def log(self, level, msg, *args):
if level in (distutils.log.WARN, distutils.log.ERROR,
distutils.log.FATAL):
self._log_to_stderr(level, msg, *args)
else:
distutils.log.log(level, msg, *args)
def _log_to_stderr(self, level, msg, *args):
# This is the only truly 'public' way to get the current threshold
# of the log
current_threshold = distutils.log.set_threshold(distutils.log.WARN)
distutils.log.set_threshold(current_threshold)
if level >= current_threshold:
if args:
msg = msg % args
sys.stderr.write('%s\n' % msg)
sys.stderr.flush()
log = log()
BOOTSTRAPPER = _Bootstrapper.main()
def use_astropy_helpers(**kwargs):
"""
Ensure that the `astropy_helpers` module is available and is importable.
This supports automatic submodule initialization if astropy_helpers is
included in a project as a git submodule, or will download it from PyPI if
necessary.
Parameters
----------
path : str or None, optional
A filesystem path relative to the root of the project's source code
that should be added to `sys.path` so that `astropy_helpers` can be
imported from that path.
If the path is a git submodule it will automatically be initialized
and/or updated.
The path may also be to a ``.tar.gz`` archive of the astropy_helpers
source distribution. In this case the archive is automatically
unpacked and made temporarily available on `sys.path` as a ``.egg``
archive.
If `None` skip straight to downloading.
download_if_needed : bool, optional
If the provided filesystem path is not found an attempt will be made to
download astropy_helpers from PyPI. It will then be made temporarily
available on `sys.path` as a ``.egg`` archive (using the
``setup_requires`` feature of setuptools. If the ``--offline`` option
is given at the command line the value of this argument is overridden
to `False`.
index_url : str, optional
If provided, use a different URL for the Python package index than the
main PyPI server.
use_git : bool, optional
If `False` no git commands will be used--this effectively disables
support for git submodules. If the ``--no-git`` option is given at the
command line the value of this argument is overridden to `False`.
auto_upgrade : bool, optional
By default, when installing a package from a non-development source
distribution ah_boostrap will try to automatically check for patch
releases to astropy-helpers on PyPI and use the patched version over
any bundled versions. Setting this to `False` will disable that
functionality. If the ``--offline`` option is given at the command line
the value of this argument is overridden to `False`.
offline : bool, optional
If `False` disable all actions that require an internet connection,
including downloading packages from the package index and fetching
updates to any git submodule. Defaults to `True`.
"""
global BOOTSTRAPPER
config = BOOTSTRAPPER.config
config.update(**kwargs)
# Create a new bootstrapper with the updated configuration and run it
BOOTSTRAPPER = _Bootstrapper(**config)
BOOTSTRAPPER.run()
| mit |
DonghoChoi/ISB_Project | local/parse_video_coding_boris.py | 2 | 2506 | #!/usr/bin/python
# Author: Dongho Choi
import os.path
import datetime
import math
import time
import itertools
import pandas as pd
from sshtunnel import SSHTunnelForwarder # for SSH connection
import pymysql.cursors # MySQL handling API
import sys
import csv
#sys.path.append("./configs/")
sys.path.append("/Users/donghochoi/Documents/Work/Exploration_Study/Dissertation/Code/local/configs/")
import server_config # (1) info2_server (2) exploration_db
if __name__ == "__main__":
# Server connection
server = SSHTunnelForwarder(
(server_config.info2_server['host'], 22),
ssh_username=server_config.info2_server['user'],
ssh_password=server_config.info2_server['password'],
remote_bind_address=('127.0.0.1', 3306))
server.start()
connection = pymysql.connect(host='127.0.0.1',
port=server.local_bind_port,
user=server_config.exploration_db['user'],
password=server_config.exploration_db['password'],
db=server_config.exploration_db['database'])
connection.autocommit(True)
cursor = connection.cursor()
print("MySQL connection established.")
# Read a video code data file
data_directory = "/Users/donghochoi/Documents/Work/Exploration_Study/Dissertation/Video_Analysis/TH/boris_export/"
print("PARSING BORIS CODING DATA")
userID_list = {2,3,5,6,9,10,11,12,14,16,20,21,22,23,25,26,27,28,32,33,35,37,38,40,42,45,46}
for userID in userID_list:
input_file_name = data_directory+ "user" + str(userID) + ".txt.tsv"
with open(input_file_name) as f:
tsvreader = csv.reader(f, delimiter="\t")
for _ in range(16):
next(tsvreader)
for line in tsvreader:
print(line)
#currentline = line.split("\t")
#print("time:{0}, behavior: {1}, label: {2}, status: {3}".format(currentline[0],currentline[5],currentline[6],currentline[8]))
print("time:{0}, behavior: {1}, label: {2}, status: {3}".format(line[0],line[5],line[6],line[8]))
sql = "INSERT INTO user_TH_Boris_results (userID,observed_time,behavior,label,status) VALUES (" + \
str(userID) + "," + str(line[0]) + ",'" + str(line[5]) + "','" + str(line[6]) + "','" + str(line[8]) +"');"
print(sql)
cursor.execute(sql)
f.close()
server.stop() | gpl-3.0 |
abonil91/ncanda-data-integration | scripts/reporting/baseline_cases.py | 1 | 1317 | #!/usr/bin/env python
##
## Copyright 2016 SRI International
## See COPYING file distributed along with the package for the copyright and license terms.
##
"""
Baseline cases
==============
This script generates a list of all subject that have a valid baseline and follow-up visit.
"""
import os
import pandas
import redcap
# First REDCap connection for the Summary project (this is where we put data)
summary_key_file = open(os.path.join( os.path.expanduser("~"), '.server_config/redcap-dataentry-token' ), 'r')
summary_api_key = summary_key_file.read().strip()
rc_summary = redcap.Project('https://ncanda.sri.com/redcap/api/', summary_api_key, verify_ssl=False)
# Get all np reports for baseline and 1r
visit = rc_summary.export_records(fields=['study_id', 'exclude', 'visit_ignore___yes'],
forms=['mr_session_report','visit_date'],
events=['baseline_visit_arm_1'],
format='df')
# Create filters for cases that are included
visit_included = visit.exclude != 1
np_collected = visit.visit_ignore___yes != 1
# Apply filters for results
results = visit[visit_included & np_collected]
results.to_csv('subjects_with_valid_baseline.csv', columns = ['exclude','visit_ignore___yes', 'mri_xnat_sid','mri_xnat_eids'])
| bsd-3-clause |
talonchandler/dipsim | paper/figures/sweep-na.py | 1 | 1509 | from dipsim import util
from dipsim import multiframe
import numpy as np
import matplotlib.pyplot as plt
import os; import time; start = time.time(); print('Running...')
# Main input parameters
n_pts = [1000]
nas = np.linspace(0.4, 1.33, num=25)
inch_fig = 5
dpi = 400
# Compute and plot on axes
fig, ax = plt.subplots(1, 1, figsize=(5,5))
for n_pt in n_pts:
d = []
for na in nas:
exp = multiframe.MultiFrameMicroscope(ill_thetas=[0], det_thetas=[0],
ill_nas=[na], det_nas=[na],
ill_types=['wide'], det_types=['lens'],
colors=['(1,0,0)'], n_frames=4,
n_pts=n_pt, max_photons=1000, n_samp=1.33)
print('Computing microscope: ' + str(n_pt) + ', ' + str(na))
exp.calc_estimation_stats()
d.append(util.coeff_of_variation(exp.root_det_sin))
print('Optimal NA is: ' + str(nas[np.argmin(d)]))
ax.plot(nas, d, '-', label='N = '+str(n_pt))
if n_pt == n_pts[-1]:
x = nas[np.argmin(d)]
y = np.min(d)
ax.arrow(x, y+0.05, 0, -0.04)
ax.annotate('NA* = {:.3}'.format(x), xy=(x, y+0.05),
xytext=(x, y+0.08), horizontalalignment='center')
ax.set_xlabel('NA')
ax.set_ylabel('$c_v$')
ax.set_xlim([0.4, 1.33])
ax.legend(loc='upper right')
fig.savefig('sweep-na.png', dpi=dpi)
print('Total time: '+str(np.round(time.time() - start, 2)))
os.system('say "done"')
| mit |
jkarnows/scikit-learn | examples/cluster/plot_color_quantization.py | 297 | 3443 | # -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <robertlayton@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1]
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
| bsd-3-clause |
ljishen/ga4gh-testing | client/jupyter_notebook_config.py | 1 | 21179 | # Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Configurable configuration
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# LoggingConfigurable configuration
#------------------------------------------------------------------------------
# A parent class for Configurables that log.
#
# Subclasses have a log trait, and the default behavior is to get the logger
# from the currently running Application.
#------------------------------------------------------------------------------
# SingletonConfigurable configuration
#------------------------------------------------------------------------------
# A configurable that only allows one instance.
#
# This class is for classes that should only have one instance of itself or
# *any* subclass. To create and retrieve such a class use the
# :meth:`SingletonConfigurable.instance` method.
#------------------------------------------------------------------------------
# Application configuration
#------------------------------------------------------------------------------
# This is an application.
# The date format used by logging formatters for %(asctime)s
# c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
# The Logging format template
# c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Set the log level by value or name.
# c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterApp configuration
#------------------------------------------------------------------------------
# Base class for Jupyter applications
# Answer yes to any prompts.
# c.JupyterApp.answer_yes = False
# Full path of a config file.
# c.JupyterApp.config_file = ''
# Specify a config file to load.
# c.JupyterApp.config_file_name = ''
# Generate default config file.
# c.JupyterApp.generate_config = False
#------------------------------------------------------------------------------
# NotebookApp configuration
#------------------------------------------------------------------------------
# Set the Access-Control-Allow-Credentials: true header
# c.NotebookApp.allow_credentials = False
# Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
# c.NotebookApp.allow_origin = ''
# Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
# c.NotebookApp.allow_origin_pat = ''
# DEPRECATED use base_url
# c.NotebookApp.base_project_url = '/'
# The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
# c.NotebookApp.base_url = '/'
# Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
# c.NotebookApp.browser = ''
# The full path to an SSL/TLS certificate file.
# c.NotebookApp.certfile = ''
# The full path to a certificate authority certifificate for SSL/TLS client
# authentication.
# c.NotebookApp.client_ca = ''
# The config manager class to use
# c.NotebookApp.config_manager_class = 'notebook.services.config.manager.ConfigManager'
# The notebook manager class to use.
# c.NotebookApp.contents_manager_class = 'notebook.services.contents.filemanager.FileContentsManager'
# Extra keyword arguments to pass to `set_secure_cookie`. See tornado's
# set_secure_cookie docs for details.
# c.NotebookApp.cookie_options = {}
# The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
# c.NotebookApp.cookie_secret = b''
# The file where the cookie secret is stored.
# c.NotebookApp.cookie_secret_file = ''
# The default URL to redirect to from `/`
# c.NotebookApp.default_url = '/tree'
# Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
# c.NotebookApp.enable_mathjax = True
# extra paths to look for Javascript notebook extensions
# c.NotebookApp.extra_nbextensions_path = []
# Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
# c.NotebookApp.extra_static_paths = []
# Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
# c.NotebookApp.extra_template_paths = []
#
# c.NotebookApp.file_to_run = ''
# Use minified JS file or not, mainly use during dev to avoid JS recompilation
# c.NotebookApp.ignore_minified_js = False
# (bytes/sec) Maximum rate at which messages can be sent on iopub before they
# are limited.
# c.NotebookApp.iopub_data_rate_limit = 0
# (msg/sec) Maximum rate at which messages can be sent on iopub before they are
# limited.
# c.NotebookApp.iopub_msg_rate_limit = 0
# The IP address the notebook server will listen on.
c.NotebookApp.ip = '*'
# Supply extra arguments that will be passed to Jinja environment.
# c.NotebookApp.jinja_environment_options = {}
# Extra variables to supply to jinja templates when rendering.
# c.NotebookApp.jinja_template_vars = {}
# The kernel manager class to use.
# c.NotebookApp.kernel_manager_class = 'notebook.services.kernels.kernelmanager.MappingKernelManager'
# The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of Jupyter and the next stable one.
# c.NotebookApp.kernel_spec_manager_class = 'jupyter_client.kernelspec.KernelSpecManager'
# The full path to a private key file for usage with SSL/TLS.
# c.NotebookApp.keyfile = ''
# The login handler class to use.
# c.NotebookApp.login_handler_class = 'notebook.auth.login.LoginHandler'
# The logout handler class to use.
# c.NotebookApp.logout_handler_class = 'notebook.auth.logout.LogoutHandler'
# The url for MathJax.js.
# c.NotebookApp.mathjax_url = ''
# Dict of Python modules to load as notebook server extensions.Entry values can
# be used to enable and disable the loading ofthe extensions.
# c.NotebookApp.nbserver_extensions = {}
# The directory to use for notebooks and kernels.
# c.NotebookApp.notebook_dir = ''
# Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
c.NotebookApp.open_browser = False
# Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
# c.NotebookApp.password = ''
# The port the notebook server will listen on.
c.NotebookApp.port = 8888
# The number of additional ports to try if the specified port is not available.
# c.NotebookApp.port_retries = 50
# DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
# c.NotebookApp.pylab = 'disabled'
# (sec) Time window used to check the message and data rate limits.
# c.NotebookApp.rate_limit_window = 1.0
# Reraise exceptions encountered loading server extensions?
# c.NotebookApp.reraise_server_extension_failures = False
# DEPRECATED use the nbserver_extensions dict instead
# c.NotebookApp.server_extensions = []
# The session manager class to use.
# c.NotebookApp.session_manager_class = 'notebook.services.sessions.sessionmanager.SessionManager'
# Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
# c.NotebookApp.ssl_options = {}
# Supply overrides for the tornado.web.Application that the Jupyter notebook
# uses.
# c.NotebookApp.tornado_settings = {}
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
# c.NotebookApp.trust_xheaders = False
# DEPRECATED, use tornado_settings
# c.NotebookApp.webapp_settings = {}
# The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
# c.NotebookApp.websocket_url = ''
#------------------------------------------------------------------------------
# ConnectionFileMixin configuration
#------------------------------------------------------------------------------
# Mixin for configurable classes that work with connection files
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.ConnectionFileMixin.connection_file = ''
# set the control (ROUTER) port [default: random]
# c.ConnectionFileMixin.control_port = 0
# set the heartbeat port [default: random]
# c.ConnectionFileMixin.hb_port = 0
# set the iopub (PUB) port [default: random]
# c.ConnectionFileMixin.iopub_port = 0
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.ConnectionFileMixin.ip = ''
# set the shell (ROUTER) port [default: random]
# c.ConnectionFileMixin.shell_port = 0
# set the stdin (ROUTER) port [default: random]
# c.ConnectionFileMixin.stdin_port = 0
#
# c.ConnectionFileMixin.transport = 'tcp'
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = True
# DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
# c.KernelManager.kernel_cmd = []
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# Whether to check PID to protect against calls after fork.
#
# This check can be disabled if fork-safety is handled elsewhere.
# c.Session.check_pid = True
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# Debug output in the Session
# c.Session.debug = False
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# execution key, for signing messages.
# c.Session.key = b''
# path to file containing execution key.
# c.Session.keyfile = ''
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# The UUID identifying this session.
# c.Session.session = ''
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# Username for the Session. Default is your system username.
# c.Session.username = 'username'
#------------------------------------------------------------------------------
# MultiKernelManager configuration
#------------------------------------------------------------------------------
# A class for managing multiple kernels.
# The name of the default kernel to start
# c.MultiKernelManager.default_kernel_name = 'python3'
# The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
# c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# MappingKernelManager configuration
#------------------------------------------------------------------------------
# A KernelManager that handles notebook mapping and HTTP error handling
#
# c.MappingKernelManager.root_dir = ''
#------------------------------------------------------------------------------
# ContentsManager configuration
#------------------------------------------------------------------------------
# Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
#
# c.ContentsManager.checkpoints = None
#
# c.ContentsManager.checkpoints_class = 'notebook.services.contents.checkpoints.Checkpoints'
#
# c.ContentsManager.checkpoints_kwargs = {}
# Glob patterns to hide in file and directory listings.
# c.ContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
# Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
# c.ContentsManager.pre_save_hook = None
# The base name used when creating untitled directories.
# c.ContentsManager.untitled_directory = 'Untitled Folder'
# The base name used when creating untitled files.
# c.ContentsManager.untitled_file = 'untitled'
# The base name used when creating untitled notebooks.
# c.ContentsManager.untitled_notebook = 'Untitled'
#------------------------------------------------------------------------------
# FileManagerMixin configuration
#------------------------------------------------------------------------------
# Mixin for ContentsAPI classes that interact with the filesystem.
#
# Provides facilities for reading, writing, and copying both notebooks and
# generic files.
#
# Shared by FileContentsManager and FileCheckpoints.
#
# Note ---- Classes using this mixin must provide the following attributes:
#
# root_dir : unicode
# A directory against against which API-style paths are to be resolved.
#
# log : logging.Logger
# By default notebooks are saved on disk on a temporary file and then if
# succefully written, it replaces the old ones. This procedure, namely
# 'atomic_writing', causes some bugs on file system whitout operation order
# enforcement (like some networked fs). If set to False, the new notebook is
# written directly on the old one which could fail (eg: full filesystem or quota
# )
# c.FileManagerMixin.use_atomic_writing = True
#------------------------------------------------------------------------------
# FileContentsManager configuration
#------------------------------------------------------------------------------
# Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
# c.FileContentsManager.post_save_hook = None
#
# c.FileContentsManager.root_dir = ''
# DEPRECATED, use post_save_hook. Will be removed in Notebook 5.0
# c.FileContentsManager.save_script = False
#------------------------------------------------------------------------------
# NotebookNotary configuration
#------------------------------------------------------------------------------
# A class for computing and verifying notebook signatures.
# The hashing algorithm used to sign notebooks.
# c.NotebookNotary.algorithm = 'sha256'
# The number of notebook signatures to cache. When the number of signatures
# exceeds this value, the oldest 25% of signatures will be culled.
# c.NotebookNotary.cache_size = 65535
# The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter runtime directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
# c.NotebookNotary.db_file = ''
# The secret key with which notebooks are signed.
# c.NotebookNotary.secret = b''
# The file where the secret key is stored.
# c.NotebookNotary.secret_file = ''
#------------------------------------------------------------------------------
# KernelSpecManager configuration
#------------------------------------------------------------------------------
# If there is no Python kernelspec registered and the IPython kernel is
# available, ensure it is added to the spec list.
# c.KernelSpecManager.ensure_native_kernel = True
# The kernel spec class. This is configurable to allow subclassing of the
# KernelSpecManager for customized behavior.
# c.KernelSpecManager.kernel_spec_class = 'jupyter_client.kernelspec.KernelSpec'
# Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
# c.KernelSpecManager.whitelist = set()
| apache-2.0 |
alexcritschristoph/PaperGraph | app.py | 1 | 3805 | from flask import Flask
from flask import render_template
from flask import request
import json
from flask import jsonify
import uuid
import subprocess
from xml.dom import minidom
from sklearn.feature_extraction.text import TfidfVectorizer
import numpy as np
from sets import Set
import os
import signal
import sys
from math import log
from textblob import TextBlob
app = Flask(__name__)
test = TextBlob("hi")
@app.route("/")
def main_page():
return render_template('index.html')
@app.route('/search', methods=['POST'])
def search_graph():
print "GOT HERE"
query = request.form['query']
#Run the esearch command
file_name = str(uuid.uuid4()) + ".dat"
command = "esearch -query '"+ query +"' -db pubmed | efetch -format Abstract -mode xml "
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
bytes = lines = 0
result = ''
for line in process.stdout:
lines += 1
result += line
if lines > 2200 and '</PubmedArticle>' in line:
os.killpg(process.pid, signal.SIGTERM)
break
print result.count("PubmedArticleSet")
if result.count("PubmedArticleSet") % 2 == 0:
result += "</PubmedArticleSet>"
result = result.replace("&", '')
#Parse results of esearch command
print "****1***"
try:
xmldoc = minidom.parseString(result)
except Exception as e:
print e.message
print "****2***"
itemlist = xmldoc.getElementsByTagName('PubmedArticle')
print "****3***"
years = []
titles = []
abstracts = []
imp_words = []
pmids = []
journals = []
print "****4***"
for s in itemlist:
print "****5***"
print s.toprettyxml()
title = s.getElementsByTagName('ArticleTitle')
title = title[0].toprettyxml().split(">")[1].split("<")[0]
print "found title"
titles.append(title)
imp_words.append(' '.join(' '.join(list(TextBlob(title).noun_phrases)[0:4]).split(' ')[0:4]))
try:
Abstract = s.getElementsByTagName('AbstractText')
Abstract = Abstract[0].toprettyxml().split(">")[1].split("<")[0]
abstracts.append(Abstract)
print "found abstract"
except:
Abstract = ''
abstracts.append(Abstract)
pmid = s.getElementsByTagName('PMID')
pmid = pmid[0].toprettyxml().split(">")[1].split("<")[0]
pmids.append(pmid)
print "found pmid"
journal = s.getElementsByTagName('Journal')
j_title = journal[0].getElementsByTagName('Title')
j_title = j_title[0].toprettyxml().split(">")[1].split("<")[0]
journals.append(j_title)
print "found journal"
date = s.getElementsByTagName("DateCreated")
year = date[0].getElementsByTagName("Year")
year = year[0].toprettyxml().split(">")[1].split("<")[0]
years.append(year)
print "found year"
# Create Objects
papers = {}
i = 0
for paper in pmids:
papers[paper] = {}
papers[paper]['title'] = titles[i]
papers[paper]['year'] = years[i]
papers[paper]['journal'] = journals[i]
papers[paper]['abstract'] = abstracts[i]
papers[paper]['pmid'] = pmids[i]
papers[paper]['imp_words'] = imp_words[i]
i+= 1
vect = TfidfVectorizer(min_df=1)
abstract_vect = vect.fit_transform(abstracts)
similarities = (abstract_vect * abstract_vect.T).A
cutoff = np.percentile(similarities, 25)
similarity_array = similarities.tolist()
print similarity_array
links = Set([])
i = 0
j = 0
for row in similarity_array:
for comparison in row:
if comparison < 0.99 and comparison > cutoff:
if i < j:
links.add(str(i) + "," + str(j))
else:
links.add(str(j) + "," + str(i))
j += 1
i += 1
j = 0
print len(sorted(list(links)))
# Print link file
link_data = []
for link in links:
i = pmids[int(link.split(",")[0])]
j = pmids[int(link.split(",")[1])]
link_data.append([i,j])
#Pass [links, nodes] to javascript
return jsonify({"links":link_data, "nodes":papers})
if __name__ == "__main__":
app.run(debug=True) | gpl-2.0 |
molobrakos/home-assistant | homeassistant/components/smappee/__init__.py | 6 | 12597 | """Support for Smappee energy monitor."""
import logging
from datetime import datetime, timedelta
import re
import voluptuous as vol
from requests.exceptions import RequestException
from homeassistant.const import (
CONF_USERNAME, CONF_PASSWORD, CONF_HOST
)
from homeassistant.util import Throttle
from homeassistant.helpers.discovery import load_platform
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Smappee'
DEFAULT_HOST_PASSWORD = 'admin'
CONF_CLIENT_ID = 'client_id'
CONF_CLIENT_SECRET = 'client_secret'
CONF_HOST_PASSWORD = 'host_password'
DOMAIN = 'smappee'
DATA_SMAPPEE = 'SMAPPEE'
_SENSOR_REGEX = re.compile(
r'(?P<key>([A-Za-z]+))\=' +
r'(?P<value>([0-9\.]+))')
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Inclusive(CONF_CLIENT_ID, 'Server credentials'): cv.string,
vol.Inclusive(CONF_CLIENT_SECRET, 'Server credentials'): cv.string,
vol.Inclusive(CONF_USERNAME, 'Server credentials'): cv.string,
vol.Inclusive(CONF_PASSWORD, 'Server credentials'): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_HOST_PASSWORD, default=DEFAULT_HOST_PASSWORD):
cv.string
}),
}, extra=vol.ALLOW_EXTRA)
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)
def setup(hass, config):
"""Set up the Smapee component."""
client_id = config.get(DOMAIN).get(CONF_CLIENT_ID)
client_secret = config.get(DOMAIN).get(CONF_CLIENT_SECRET)
username = config.get(DOMAIN).get(CONF_USERNAME)
password = config.get(DOMAIN).get(CONF_PASSWORD)
host = config.get(DOMAIN).get(CONF_HOST)
host_password = config.get(DOMAIN).get(CONF_HOST_PASSWORD)
smappee = Smappee(client_id, client_secret, username,
password, host, host_password)
if not smappee.is_local_active and not smappee.is_remote_active:
_LOGGER.error("Neither Smappee server or local component enabled.")
return False
hass.data[DATA_SMAPPEE] = smappee
load_platform(hass, 'switch', DOMAIN, {}, config)
load_platform(hass, 'sensor', DOMAIN, {}, config)
return True
class Smappee:
"""Stores data retrieved from Smappee sensor."""
def __init__(self, client_id, client_secret, username,
password, host, host_password):
"""Initialize the data."""
import smappy
self._remote_active = False
self._local_active = False
if client_id is not None:
try:
self._smappy = smappy.Smappee(client_id, client_secret)
self._smappy.authenticate(username, password)
self._remote_active = True
except RequestException as error:
self._smappy = None
_LOGGER.exception(
"Smappee server authentication failed (%s)",
error)
else:
_LOGGER.warning("Smappee server component init skipped.")
if host is not None:
try:
self._localsmappy = smappy.LocalSmappee(host)
self._localsmappy.logon(host_password)
self._local_active = True
except RequestException as error:
self._localsmappy = None
_LOGGER.exception(
"Local Smappee device authentication failed (%s)",
error)
else:
_LOGGER.warning("Smappee local component init skipped.")
self.locations = {}
self.info = {}
self.consumption = {}
self.sensor_consumption = {}
self.instantaneous = {}
if self._remote_active or self._local_active:
self.update()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update data from Smappee API."""
if self.is_remote_active:
service_locations = self._smappy.get_service_locations() \
.get('serviceLocations')
for location in service_locations:
location_id = location.get('serviceLocationId')
if location_id is not None:
self.sensor_consumption[location_id] = {}
self.locations[location_id] = location.get('name')
self.info[location_id] = self._smappy \
.get_service_location_info(location_id)
_LOGGER.debug("Remote info %s %s",
self.locations, self.info[location_id])
for sensors in self.info[location_id].get('sensors'):
sensor_id = sensors.get('id')
self.sensor_consumption[location_id]\
.update({sensor_id: self.get_sensor_consumption(
location_id, sensor_id,
aggregation=3, delta=1440)})
_LOGGER.debug("Remote sensors %s %s",
self.locations,
self.sensor_consumption[location_id])
self.consumption[location_id] = self.get_consumption(
location_id, aggregation=3, delta=1440)
_LOGGER.debug("Remote consumption %s %s",
self.locations,
self.consumption[location_id])
if self.is_local_active:
self.local_devices = self.get_switches()
_LOGGER.debug("Local switches %s", self.local_devices)
self.instantaneous = self.load_instantaneous()
_LOGGER.debug("Local values %s", self.instantaneous)
@property
def is_remote_active(self):
"""Return true if Smappe server is configured and working."""
return self._remote_active
@property
def is_local_active(self):
"""Return true if Smappe local device is configured and working."""
return self._local_active
def get_switches(self):
"""Get switches from local Smappee."""
if not self.is_local_active:
return
try:
return self._localsmappy.load_command_control_config()
except RequestException as error:
_LOGGER.error(
"Error getting switches from local Smappee. (%s)",
error)
def get_consumption(self, location_id, aggregation, delta):
"""Update data from Smappee."""
# Start & End accept epoch (in milliseconds),
# datetime and pandas timestamps
# Aggregation:
# 1 = 5 min values (only available for the last 14 days),
# 2 = hourly values,
# 3 = daily values,
# 4 = monthly values,
# 5 = quarterly values
if not self.is_remote_active:
return
end = datetime.utcnow()
start = end - timedelta(minutes=delta)
try:
return self._smappy.get_consumption(location_id,
start,
end,
aggregation)
except RequestException as error:
_LOGGER.error(
"Error getting comsumption from Smappee cloud. (%s)",
error)
def get_sensor_consumption(self, location_id, sensor_id,
aggregation, delta):
"""Update data from Smappee."""
# Start & End accept epoch (in milliseconds),
# datetime and pandas timestamps
# Aggregation:
# 1 = 5 min values (only available for the last 14 days),
# 2 = hourly values,
# 3 = daily values,
# 4 = monthly values,
# 5 = quarterly values
if not self.is_remote_active:
return
end = datetime.utcnow()
start = end - timedelta(minutes=delta)
try:
return self._smappy.get_sensor_consumption(location_id,
sensor_id,
start,
end, aggregation)
except RequestException as error:
_LOGGER.error(
"Error getting comsumption from Smappee cloud. (%s)",
error)
def actuator_on(self, location_id, actuator_id,
is_remote_switch, duration=None):
"""Turn on actuator."""
# Duration = 300,900,1800,3600
# or any other value for an undetermined period of time.
#
# The comport plugs have a tendency to ignore the on/off signal.
# And because you can't read the status of a plug, it's more
# reliable to execute the command twice.
try:
if is_remote_switch:
self._smappy.actuator_on(location_id, actuator_id, duration)
self._smappy.actuator_on(location_id, actuator_id, duration)
else:
self._localsmappy.on_command_control(actuator_id)
self._localsmappy.on_command_control(actuator_id)
except RequestException as error:
_LOGGER.error(
"Error turning actuator on. (%s)",
error)
return False
return True
def actuator_off(self, location_id, actuator_id,
is_remote_switch, duration=None):
"""Turn off actuator."""
# Duration = 300,900,1800,3600
# or any other value for an undetermined period of time.
#
# The comport plugs have a tendency to ignore the on/off signal.
# And because you can't read the status of a plug, it's more
# reliable to execute the command twice.
try:
if is_remote_switch:
self._smappy.actuator_off(location_id, actuator_id, duration)
self._smappy.actuator_off(location_id, actuator_id, duration)
else:
self._localsmappy.off_command_control(actuator_id)
self._localsmappy.off_command_control(actuator_id)
except RequestException as error:
_LOGGER.error(
"Error turning actuator on. (%s)",
error)
return False
return True
def active_power(self):
"""Get sum of all instantaneous active power values from local hub."""
if not self.is_local_active:
return
try:
return self._localsmappy.active_power()
except RequestException as error:
_LOGGER.error(
"Error getting data from Local Smappee unit. (%s)",
error)
def active_cosfi(self):
"""Get the average of all instantaneous cosfi values."""
if not self.is_local_active:
return
try:
return self._localsmappy.active_cosfi()
except RequestException as error:
_LOGGER.error(
"Error getting data from Local Smappee unit. (%s)",
error)
def instantaneous_values(self):
"""ReportInstantaneousValues."""
if not self.is_local_active:
return
report_instantaneous_values = \
self._localsmappy.report_instantaneous_values()
report_result = \
report_instantaneous_values['report'].split('<BR>')
properties = {}
for lines in report_result:
lines_result = lines.split(',')
for prop in lines_result:
match = _SENSOR_REGEX.search(prop)
if match:
properties[match.group('key')] = \
match.group('value')
_LOGGER.debug(properties)
return properties
def active_current(self):
"""Get current active Amps."""
if not self.is_local_active:
return
properties = self.instantaneous_values()
return float(properties['current'])
def active_voltage(self):
"""Get current active Voltage."""
if not self.is_local_active:
return
properties = self.instantaneous_values()
return float(properties['voltage'])
def load_instantaneous(self):
"""LoadInstantaneous."""
if not self.is_local_active:
return
try:
return self._localsmappy.load_instantaneous()
except RequestException as error:
_LOGGER.error(
"Error getting data from Local Smappee unit. (%s)",
error)
| apache-2.0 |
kubeflow/pipelines | components/kubeflow/kfserving/src/kfservingdeployer.py | 1 | 15866 | # Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from distutils.util import strtobool
import json
import os
import sys
import time
import yaml
from kubernetes import client
from kfserving import constants
from kfserving import KFServingClient
from kfserving import V1beta1InferenceService
from kfserving import V1beta1InferenceServiceSpec
from kfserving import V1beta1LightGBMSpec
from kfserving import V1beta1ONNXRuntimeSpec
from kfserving import V1beta1PMMLSpec
from kfserving import V1beta1PredictorSpec
from kfserving import V1beta1SKLearnSpec
from kfserving import V1beta1TFServingSpec
from kfserving import V1beta1TorchServeSpec
from kfserving import V1beta1TritonSpec
from kfserving import V1beta1XGBoostSpec
from kfserving.api.kf_serving_watch import isvc_watch
AVAILABLE_FRAMEWORKS = {
'tensorflow': V1beta1TFServingSpec,
'pytorch': V1beta1TorchServeSpec,
'sklearn': V1beta1SKLearnSpec,
'xgboost': V1beta1XGBoostSpec,
'onnx': V1beta1ONNXRuntimeSpec,
'triton': V1beta1TritonSpec,
'pmml': V1beta1PMMLSpec,
'lightgbm': V1beta1LightGBMSpec
}
def create_predictor_spec(framework, storage_uri, canary_traffic_percent,
service_account, min_replicas, max_replicas, containers, request_timeout):
"""
Create and return V1beta1PredictorSpec to be used in a V1beta1InferenceServiceSpec
object.
"""
predictor_spec = V1beta1PredictorSpec(
service_account_name=service_account,
min_replicas=(min_replicas
if min_replicas >= 0
else None
),
max_replicas=(max_replicas
if max_replicas > 0 and max_replicas >= min_replicas
else None
),
containers=(containers or None),
canary_traffic_percent=canary_traffic_percent,
timeout=request_timeout
)
# If the containers field was set, then this is custom model serving.
if containers:
return predictor_spec
if framework not in AVAILABLE_FRAMEWORKS:
raise ValueError("Error: No matching framework: " + framework)
setattr(
predictor_spec,
framework,
AVAILABLE_FRAMEWORKS[framework](storage_uri=storage_uri)
)
return predictor_spec
def create_custom_container_spec(custom_model_spec):
"""
Given a JSON container spec, return a V1Container object
representing the container. This is used for passing in
custom server images. The expected format for the input is:
{ "image": "test/containerimage",
"port":5000,
"name": "custom-container" }
"""
env = (
[
client.V1EnvVar(name=i["name"], value=i["value"])
for i in custom_model_spec["env"]
]
if custom_model_spec.get("env", "")
else None
)
ports = (
[client.V1ContainerPort(container_port=int(custom_model_spec.get("port", "")), protocol="TCP")]
if custom_model_spec.get("port", "")
else None
)
resources = (
client.V1ResourceRequirements(
requests=(custom_model_spec["resources"]["requests"]
if custom_model_spec.get('resources', {}).get('requests')
else None
),
limits=(custom_model_spec["resources"]["limits"]
if custom_model_spec.get('resources', {}).get('limits')
else None
),
)
if custom_model_spec.get("resources", {})
else None
)
return client.V1Container(
name=custom_model_spec.get("name", "custom-container"),
image=custom_model_spec["image"],
env=env,
ports=ports,
command=custom_model_spec.get("command", None),
args=custom_model_spec.get("args", None),
image_pull_policy=custom_model_spec.get("image_pull_policy", None),
working_dir=custom_model_spec.get("working_dir", None),
resources=resources
)
def create_inference_service(metadata, predictor_spec):
"""
Build and return V1beta1InferenceService object.
"""
return V1beta1InferenceService(
api_version=constants.KFSERVING_V1BETA1,
kind=constants.KFSERVING_KIND,
metadata=metadata,
spec=V1beta1InferenceServiceSpec(
predictor=predictor_spec
),
)
def submit_api_request(kfs_client, action, name, isvc, namespace=None,
watch=False, timeout_seconds=300):
"""
Creates or updates a Kubernetes custom object. This code is borrowed from the
KFServingClient.create/patch methods as using those directly doesn't allow for
sending in dicts as the InferenceService object which is needed for supporting passing
in raw InferenceService serialized YAML.
"""
custom_obj_api = kfs_client.api_instance
args = [constants.KFSERVING_GROUP,constants.KFSERVING_V1BETA1_VERSION,
namespace, constants.KFSERVING_PLURAL]
if action == 'update':
outputs = custom_obj_api.patch_namespaced_custom_object(*args, name, isvc)
else:
outputs = custom_obj_api.create_namespaced_custom_object(*args, isvc)
if watch:
# Sleep 3 to avoid status still be True within a very short time.
time.sleep(3)
isvc_watch(
name=outputs['metadata']['name'],
namespace=namespace,
timeout_seconds=timeout_seconds)
else:
return outputs
def perform_action(action, model_name, model_uri, canary_traffic_percent, namespace,
framework, custom_model_spec, service_account, inferenceservice_yaml,
request_timeout, autoscaling_target=0, enable_istio_sidecar=True,
watch_timeout=300, min_replicas=0, max_replicas=0):
"""
Perform the specified action. If the action is not 'delete' and `inferenceService_yaml`
was provided, the dict representation of the YAML will be sent directly to the
Kubernetes API. Otherwise, a V1beta1InferenceService object will be built using the
provided input and then sent for creation/update.
:return InferenceService JSON output
"""
kfs_client = KFServingClient()
if inferenceservice_yaml:
# Overwrite name and namespace if exists
if namespace:
inferenceservice_yaml['metadata']['namespace'] = namespace
if model_name:
inferenceservice_yaml['metadata']['name'] = model_name
else:
model_name = inferenceservice_yaml['metadata']['name']
kfsvc = inferenceservice_yaml
elif action != 'delete':
# Create annotations
annotations = {}
if int(autoscaling_target) != 0:
annotations["autoscaling.knative.dev/target"] = str(autoscaling_target)
if not enable_istio_sidecar:
annotations["sidecar.istio.io/inject"] = 'false'
if not annotations:
annotations = None
metadata = client.V1ObjectMeta(
name=model_name, namespace=namespace, annotations=annotations
)
# If a custom model container spec was provided, build the V1Container
# object using it.
containers = []
if custom_model_spec:
containers = [create_custom_container_spec(custom_model_spec)]
# Build the V1beta1PredictorSpec.
predictor_spec = create_predictor_spec(
framework, model_uri, canary_traffic_percent, service_account,
min_replicas, max_replicas, containers, request_timeout
)
kfsvc = create_inference_service(metadata, predictor_spec)
if action == "create":
submit_api_request(kfs_client, 'create', model_name, kfsvc, namespace,
watch=True, timeout_seconds=watch_timeout)
elif action == "update":
submit_api_request(kfs_client, 'update', model_name, kfsvc, namespace,
watch=True, timeout_seconds=watch_timeout)
elif action == "apply":
try:
submit_api_request(kfs_client, 'create', model_name, kfsvc, namespace,
watch=True, timeout_seconds=watch_timeout)
except Exception:
submit_api_request(kfs_client, 'update', model_name, kfsvc, namespace,
watch=True, timeout_seconds=watch_timeout)
elif action == "delete":
kfs_client.delete(model_name, namespace=namespace)
else:
raise ("Error: No matching action: " + action)
model_status = kfs_client.get(model_name, namespace=namespace)
return model_status
def main():
"""
This parses arguments passed in from the CLI and performs the corresponding action.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--action", type=str, help="Action to execute on KFServing", default="create"
)
parser.add_argument(
"--model-name", type=str, help="Name to give to the deployed model"
)
parser.add_argument(
"--model-uri",
type=str,
help="Path of the S3, GCS or PVC directory containing the model",
)
parser.add_argument(
"--canary-traffic-percent",
type=str,
help="The traffic split percentage between the candidate model and the last ready model",
default="100",
)
parser.add_argument(
"--namespace",
type=str,
help="Kubernetes namespace where the KFServing service is deployed",
default="",
)
parser.add_argument(
"--framework",
type=str,
help="Model serving framework to use. Available frameworks: " +
str(list(AVAILABLE_FRAMEWORKS.keys())),
default=""
)
parser.add_argument(
"--custom-model-spec",
type=json.loads,
help="The container spec for a custom model runtime",
default="{}",
)
parser.add_argument(
"--autoscaling-target", type=str, help="Autoscaling target number", default="0"
)
parser.add_argument(
"--service-account",
type=str,
help="Service account containing s3 credentials",
default="",
)
parser.add_argument(
"--enable-istio-sidecar",
type=strtobool,
help="Whether to inject istio sidecar",
default="True"
)
parser.add_argument(
"--inferenceservice-yaml",
type=yaml.safe_load,
help="Raw InferenceService serialized YAML for deployment",
default="{}"
)
parser.add_argument("--output-path", type=str, help="Path to store URI output")
parser.add_argument("--watch-timeout",
type=str,
help="Timeout seconds for watching until InferenceService becomes ready.",
default="300")
parser.add_argument(
"--min-replicas", type=str, help="Minimum number of replicas", default="-1"
)
parser.add_argument(
"--max-replicas", type=str, help="Maximum number of replicas", default="-1"
)
parser.add_argument("--request-timeout",
type=str,
help="Specifies the number of seconds to wait before timing out a request to the component.",
default="60")
args = parser.parse_args()
action = args.action.lower()
model_name = args.model_name
model_uri = args.model_uri
canary_traffic_percent = int(args.canary_traffic_percent)
namespace = args.namespace
framework = args.framework.lower()
output_path = args.output_path
custom_model_spec = args.custom_model_spec
autoscaling_target = int(args.autoscaling_target)
service_account = args.service_account
enable_istio_sidecar = args.enable_istio_sidecar
inferenceservice_yaml = args.inferenceservice_yaml
watch_timeout = int(args.watch_timeout)
min_replicas = int(args.min_replicas)
max_replicas = int(args.max_replicas)
request_timeout = int(args.request_timeout)
# Default the namespace.
if not namespace:
namespace = 'anonymous'
# If no namespace was provided, but one is listed in the YAML, use that.
if inferenceservice_yaml and inferenceservice_yaml.get('metadata', {}).get('namespace'):
namespace = inferenceservice_yaml['metadata']['namespace']
# Only require model name when an Isvc YAML was not provided.
if not inferenceservice_yaml and not model_name:
parser.error('{} argument is required when performing "{}" action'.format(
'model_name', action
))
# If the action isn't a delete, require 'model-uri' and 'framework' only if an Isvc YAML
# or custom model container spec are not provided.
if action != 'delete':
if not inferenceservice_yaml and not custom_model_spec and not (model_uri and framework):
parser.error('Arguments for {} and {} are required when performing "{}" action'.format(
'model_uri', 'framework', action
))
model_status = perform_action(
action=action,
model_name=model_name,
model_uri=model_uri,
canary_traffic_percent=canary_traffic_percent,
namespace=namespace,
framework=framework,
custom_model_spec=custom_model_spec,
autoscaling_target=autoscaling_target,
service_account=service_account,
enable_istio_sidecar=enable_istio_sidecar,
inferenceservice_yaml=inferenceservice_yaml,
request_timeout=request_timeout,
watch_timeout=watch_timeout,
min_replicas=min_replicas,
max_replicas=max_replicas
)
print(model_status)
if action != 'delete':
# Check whether the model is ready
for condition in model_status["status"]["conditions"]:
if condition['type'] == 'Ready':
if condition['status'] == 'True':
print('Model is ready\n')
break
print('Model is timed out, please check the InferenceService events for more details.')
sys.exit(1)
try:
print( model_status["status"]["url"] + " is the Knative domain.")
print("Sample test commands: \n")
# model_status['status']['url'] is like http://flowers-sample.kubeflow.example.com/v1/models/flowers-sample
print("curl -v -X GET %s" % model_status["status"]["url"])
print("\nIf the above URL is not accessible, it's recommended to setup Knative with a configured DNS.\n"\
"https://knative.dev/docs/install/installing-istio/#configuring-dns")
except Exception:
print("Model is not ready, check the logs for the Knative URL status.")
sys.exit(1)
if output_path:
try:
# Remove some less needed fields to reduce output size.
del model_status['metadata']['managedFields']
del model_status['status']['conditions']
except KeyError:
pass
if not os.path.exists(os.path.dirname(output_path)):
os.makedirs(os.path.dirname(output_path))
with open(output_path, "w") as report:
report.write(json.dumps(model_status, indent=4))
if __name__ == "__main__":
main()
| apache-2.0 |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/sklearn/utils/multiclass.py | 17 | 12959 |
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
"""
y_type = type_of_target(y)
if y_type not in ['binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences']:
raise ValueError("Unknown label type: %r" % y)
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| gpl-2.0 |
dsm054/pandas | pandas/tests/indexes/timedeltas/test_scalar_compat.py | 1 | 2423 | # -*- coding: utf-8 -*-
"""
Tests for TimedeltaIndex methods behaving like their Timedelta counterparts
"""
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import Index, Series, Timedelta, TimedeltaIndex, timedelta_range
class TestVectorizedTimedelta(object):
def test_tdi_total_seconds(self):
# GH#10939
# test index
rng = timedelta_range('1 days, 10:11:12.100123456', periods=2,
freq='s')
expt = [1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9,
1 * 86400 + 10 * 3600 + 11 * 60 + 13 + 100123456. / 1e9]
tm.assert_almost_equal(rng.total_seconds(), Index(expt))
# test Series
ser = Series(rng)
s_expt = Series(expt, index=[0, 1])
tm.assert_series_equal(ser.dt.total_seconds(), s_expt)
# with nat
ser[1] = np.nan
s_expt = Series([1 * 86400 + 10 * 3600 + 11 * 60 +
12 + 100123456. / 1e9, np.nan], index=[0, 1])
tm.assert_series_equal(ser.dt.total_seconds(), s_expt)
# with both nat
ser = Series([np.nan, np.nan], dtype='timedelta64[ns]')
tm.assert_series_equal(ser.dt.total_seconds(),
Series([np.nan, np.nan], index=[0, 1]))
def test_tdi_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00')])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
assert elt.round(freq='H') == expected_elt
msg = pd._libs.tslibs.frequencies.INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
td.round(freq='foo')
with pytest.raises(ValueError, match=msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
with pytest.raises(ValueError, match=msg):
td.round(freq='M')
with pytest.raises(ValueError, match=msg):
elt.round(freq='M')
| bsd-3-clause |
rainest/dance-partner-matching | networkx/readwrite/tests/test_gml.py | 1 | 2734 | #!/usr/bin/env python
import io
from nose.tools import *
from nose import SkipTest
import networkx
class TestGraph(object):
@classmethod
def setupClass(cls):
global pyparsing
try:
import pyparsing
except ImportError:
try:
import matplotlib.pyparsing as pyparsing
except:
raise SkipTest('gml test: pyparsing not available.')
def setUp(self):
self.simple_data="""Creator me
graph [
comment "This is a sample graph"
directed 1
IsPlanar 1
pos [ x 0 y 1 ]
node [
id 1
label "Node 1"
pos [ x 1 y 1 ]
]
node [
id 2
pos [ x 1 y 2 ]
label "Node 2"
]
node [
id 3
label "Node 3"
pos [ x 1 y 3 ]
]
edge [
source 1
target 2
label "Edge from node 1 to node 2"
color [line "blue" thickness 3]
]
edge [
source 2
target 3
label "Edge from node 2 to node 3"
]
edge [
source 3
target 1 label
"Edge from node 3 to node 1"
]
]
"""
def test_parse_gml(self):
G=networkx.parse_gml(self.simple_data,relabel=True)
assert_equals(sorted(G.nodes()),\
['Node 1', 'Node 2', 'Node 3'])
assert_equals( [e for e in sorted(G.edges())],\
[('Node 1', 'Node 2'),
('Node 2', 'Node 3'),
('Node 3', 'Node 1')])
assert_equals( [e for e in sorted(G.edges(data=True))],\
[('Node 1', 'Node 2',
{'color': {'line': 'blue', 'thickness': 3},
'label': 'Edge from node 1 to node 2'}),
('Node 2', 'Node 3',
{'label': 'Edge from node 2 to node 3'}),
('Node 3', 'Node 1',
{'label': 'Edge from node 3 to node 1'})])
def test_read_gml(self):
import os,tempfile
(fd,fname)=tempfile.mkstemp()
fh=open(fname,'w')
fh.write(self.simple_data)
fh.close()
Gin=networkx.read_gml(fname,relabel=True)
G=networkx.parse_gml(self.simple_data,relabel=True)
assert_equals( sorted(G.nodes(data=True)), sorted(Gin.nodes(data=True)))
assert_equals( sorted(G.edges(data=True)), sorted(Gin.edges(data=True)))
os.close(fd)
os.unlink(fname)
def test_relabel_dupliate(self):
data="""
graph
[
label ""
directed 1
node
[
id 0
label "same"
]
node
[
id 1
label "same"
]
]
"""
fh = io.BytesIO(data.encode('UTF-8'))
fh.seek(0)
assert_raises(networkx.NetworkXError,networkx.read_gml,fh,relabel=True)
| bsd-2-clause |
YeoLab/gscripts | gscripts/rnaseq/splicing_map.py | 1 | 8107 | __author__ = 'gpratt'
import numpy as np
import pandas as pd
import pybedtools
import pyBigWig
from gscripts.general import dataviz
import seaborn as sns
class ReadDensity():
def __init__(self, pos, neg):
self.pos = pyBigWig.open(pos)
self.neg = pyBigWig.open(neg)
def values(self, chrom, start, end, strand):
if strand == "+":
return self.pos.values(chrom, start, end)
elif strand == "-":
return list(reversed(self.neg.values(chrom, start, end)))
else:
raise("Strand neither + or -")
def miso_to_bed(miso_list):
result = []
for exon in miso_list:
chrom, start, stop, strand = exon.split(":")
result.append(pybedtools.create_interval_from_list([chrom, start, stop, "0", "0", strand]))
return pybedtools.BedTool(result)
def five_prime_site(rbp, interval):
if interval.strand == "+":
wiggle = rbp.values(interval.chrom, interval.start - 300, interval.start + 50, interval.strand)
elif interval.strand == "-":
wiggle = rbp.values(interval.chrom, interval.end - 50, interval.end + 300, interval.strand)
return wiggle
def three_prime_site(rbp, interval):
if interval.strand == "+":
wiggle = rbp.values(interval.chrom, interval.end - 50, interval.end + 300, interval.strand)
elif interval.strand == "-":
wiggle = rbp.values(interval.chrom, interval.start - 300, interval.start + 50, interval.strand)
return wiggle
def exon_range(rbp, interval):
if interval.strand == "+":
wiggle = rbp.values(interval.chrom, interval.start - 300, interval.end + 300, interval.strand)
elif interval.strand == "-":
wiggle = rbp.values(interval.chrom, interval.start - 300, interval.end + 300, interval.strand)
else:
print "Strand not correct", interval.strand
raise()
return wiggle
def plot_miso(miso_names, rbp):
upstream_exon = miso_to_bed([item.split("@")[0] for item in miso_names]).saveas()
skipped_exon = miso_to_bed([item.split("@")[1] for item in miso_names]).saveas()
downstream_exon = miso_to_bed([item.split("@")[2] for item in miso_names]).saveas()
three_prime_upstream = []
for interval in upstream_exon:
wiggle = three_prime_site(rbp, interval)
#if not all(np.isnan(wiggle)):
three_prime_upstream.append(wiggle)
three_prime_upstream = np.abs(pd.DataFrame(three_prime_upstream).fillna(0))
five_prime_se = []
for interval in skipped_exon:
wiggle = five_prime_site(rbp, interval)
#if not all(np.isnan(wiggle)):
five_prime_se.append(wiggle)
five_prime_se = np.abs(pd.DataFrame(five_prime_se).fillna(0))
three_prime_se = []
for interval in skipped_exon:
wiggle = three_prime_site(rbp, interval)
#if not all(np.isnan(wiggle)):
three_prime_se.append(wiggle)
three_prime_se = np.abs(pd.DataFrame(three_prime_se).fillna(0))
five_prime_downstream = []
for interval in downstream_exon:
wiggle = five_prime_site(rbp, interval)
#if not all(np.isnan(wiggle)):
five_prime_downstream.append(wiggle)
five_prime_downstream = np.abs(pd.DataFrame(five_prime_downstream).fillna(0))
return three_prime_upstream, five_prime_se, three_prime_se, five_prime_downstream
def modify_plot(df):
df = df[df.sum(axis=1) > 5]
min_normalized_read_number = min([item for item in df.unstack().values if item > 0])
df = df + min_normalized_read_number
return df.div(df.sum(axis=1), axis=0).dropna().mean()
#return df.mean()
def mats_reformat_geneid(interval):
"""
Given an row in a mats formatted df returns a miso id
:param interval: SE rMATS formatted output
:return: miso formatted id
"""
keys = {"chrom": interval['chr'],
"strand": interval.strand,
"first_start": interval.upstreamES,
"first_stop": interval.upstreamEE,
"middle_start": interval.exonStart_0base,
"middle_stop": interval.exonEnd,
"last_start": interval.downstreamES,
"last_stop": interval.downstreamEE}
return "{chrom}:{first_start}:{first_stop}:{strand}@{chrom}:{middle_start}:{middle_stop}:{strand}@{chrom}:{last_start}:{last_stop}:{strand}".format(**keys)
def mats_get_direction(interval):
return "included" if interval.IncLevelDifference > 0 else "excluded"
def plot_splice_map(rbp, splicing_events, title, out_name):
"""
:param rbp: ReadDensity object to plot against
:param splicing_events: splicing dataframe must contain 3 columns event name, miso formatted events, direction e
either "included" or "excluded" and P-value for filtering by significance.
:param title: str title of the plot
:param out_name: str, what to save the plot as
:return:
"""
linewidth = 2.5
max_height = .005
min_height = .0015
exc_color = 'g'
inc_color = 'b'
significant_splicing_events = splicing_events[splicing_events['P-value'] < .05]
#background_events = splicing_events[splicing_events.bayes_factor < 1]
included_events = significant_splicing_events[significant_splicing_events.direction == "included"]
excluded_events = significant_splicing_events[significant_splicing_events.direction == "excluded"]
#background_events = significant_splicing_events[(significant_splicing_events['diff'] >= -.2) & (significant_splicing_events['diff'] <= .2)]
inc_three_prime_upstream, inc_five_prime_se, inc_three_prime_se, inc_five_prime_downstream = plot_miso(included_events.event_name, rbp)
exc_three_prime_upstream, exc_five_prime_se, exc_three_prime_se, exc_five_prime_downstream = plot_miso(excluded_events.event_name, rbp)
#bg_three_prime_upstream, bg_five_prime_se, bg_three_prime_se, bg_five_prime_downstream = plot_miso(background_events.event_name, rbp)
num_rows = 1
num_cols = 4
with dataviz.Figure(out_name, figsize=(num_cols * 2.5, num_rows * 2.5)) as fig:
ax = fig.add_subplot(1,4,1)
ax.plot(modify_plot(inc_three_prime_upstream), linewidth=linewidth, alpha=.7, color=inc_color)
ax.plot(modify_plot(exc_three_prime_upstream), linewidth=linewidth, alpha=.7, color=exc_color)
#ax.plot(modify_plot(bg_three_prime_upstream), linewidth=linewidth, alpha=.7, color='.7')
sns.despine(ax=ax)
ax.set_ylim(min_height, max_height)
ax.set_xticklabels(np.arange(-50, 301, 50))
ax.set_ylabel("Mean Read Density")
ax = fig.add_subplot(1, 4, 2)
ax.plot(modify_plot(inc_five_prime_se), linewidth=linewidth, alpha=.7, color=inc_color)
ax.plot(modify_plot(exc_five_prime_se), linewidth=linewidth, alpha=.7, color=exc_color)
#ax.plot(modify_plot(bg_five_prime_se), linewidth=linewidth, alpha=.7, color='.7')
sns.despine(ax=ax, left=True)
ax.set_ylim(min_height, max_height)
ax.set_xticklabels(np.arange(-300, 51, 50))
ax.set_yticklabels([])
ax = fig.add_subplot(1, 4, 3)
ax.plot(modify_plot(inc_three_prime_se), linewidth=linewidth, alpha=.7, color=inc_color)
ax.plot(modify_plot(exc_three_prime_se), linewidth=linewidth, alpha=.7, color=exc_color)
#ax.plot(modify_plot(bg_three_prime_se), linewidth=linewidth, alpha=.7, color='.7')
ax.set_title(title)
sns.despine(ax=ax, left=True)
ax.set_ylim(min_height, max_height)
ax.set_xticklabels(np.arange(-50, 301, 50))
ax.set_yticklabels([])
ax = fig.add_subplot(1, 4, 4)
ax.plot(modify_plot(inc_five_prime_downstream), label="Included", linewidth=linewidth, alpha=.7, color=inc_color)
ax.plot(modify_plot(exc_five_prime_downstream), label="Excluded", linewidth=linewidth, alpha=.7, color=exc_color)
#ax.plot(modify_plot(bg_five_prime_downstream), label="Background", linewidth=linewidth, alpha=.7, color='.7')
ax.legend()
sns.despine(ax=ax, left=True)
ax.set_ylim(min_height, max_height)
ax.set_yticklabels([])
ax.set_xticklabels(np.arange(-300, 51, 50))
| mit |
icdishb/scikit-learn | examples/decomposition/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| bsd-3-clause |
dhruv13J/scikit-learn | examples/datasets/plot_iris_dataset.py | 283 | 1928 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
apurvbhartia/gnuradio-routing | gnuradio-examples/python/pfb/interpolate.py | 7 | 8340 | #!/usr/bin/env python
#
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blks2
import sys, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 100000 # number of samples to use
self._fs = 2000 # initial sampling rate
self._interp = 5 # Interpolation rate for PFB interpolator
self._ainterp = 5.5 # Resampling rate for the PFB arbitrary resampler
# Frequencies of the signals we construct
freq1 = 100
freq2 = 200
# Create a set of taps for the PFB interpolator
# This is based on the post-interpolation sample rate
self._taps = gr.firdes.low_pass_2(self._interp, self._interp*self._fs, freq2+50, 50,
attenuation_dB=120, window=gr.firdes.WIN_BLACKMAN_hARRIS)
# Create a set of taps for the PFB arbitrary resampler
# The filter size is the number of filters in the filterbank; 32 will give very low side-lobes,
# and larger numbers will reduce these even farther
# The taps in this filter are based on a sampling rate of the filter size since it acts
# internally as an interpolator.
flt_size = 32
self._taps2 = gr.firdes.low_pass_2(flt_size, flt_size*self._fs, freq2+50, 150,
attenuation_dB=120, window=gr.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._interp))
print "Number of taps: ", len(self._taps)
print "Number of filters: ", self._interp
print "Taps per channel: ", tpc
# Create a couple of signals at different frequencies
self.signal1 = gr.sig_source_c(self._fs, gr.GR_SIN_WAVE, freq1, 0.5)
self.signal2 = gr.sig_source_c(self._fs, gr.GR_SIN_WAVE, freq2, 0.5)
self.signal = gr.add_cc()
self.head = gr.head(gr.sizeof_gr_complex, self._N)
# Construct the PFB interpolator filter
self.pfb = blks2.pfb_interpolator_ccf(self._interp, self._taps)
# Construct the PFB arbitrary resampler filter
self.pfb_ar = blks2.pfb_arb_resampler_ccf(self._ainterp, self._taps2, flt_size)
self.snk_i = gr.vector_sink_c()
#self.pfb_ar.pfb.print_taps()
#self.pfb.pfb.print_taps()
# Connect the blocks
self.connect(self.signal1, self.head, (self.signal,0))
self.connect(self.signal2, (self.signal,1))
self.connect(self.signal, self.pfb)
self.connect(self.signal, self.pfb_ar)
self.connect(self.signal, self.snk_i)
# Create the sink for the interpolated signals
self.snk1 = gr.vector_sink_c()
self.snk2 = gr.vector_sink_c()
self.connect(self.pfb, self.snk1)
self.connect(self.pfb_ar, self.snk2)
def main():
tb = pfb_top_block()
tstart = time.time()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig1 = pylab.figure(1, figsize=(12,10), facecolor="w")
fig2 = pylab.figure(2, figsize=(12,10), facecolor="w")
fig3 = pylab.figure(3, figsize=(12,10), facecolor="w")
Ns = 10000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
# Plot input signal
fs = tb._fs
d = tb.snk_i.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b-o")
#p1_t = sp1_t.plot(t_in, x_in.imag, "r-o")
sp1_t.set_ylim([-2.5, 2.5])
sp1_t.set_title("Input Signal", weight="bold")
sp1_t.set_xlabel("Time (s)")
sp1_t.set_ylabel("Amplitude")
# Plot output of PFB interpolator
fs_int = tb._fs*tb._interp
sp2_f = fig2.add_subplot(2, 1, 1)
d = tb.snk1.data()[Ns:Ns+(tb._interp*Ne)]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_int/2.0, fs_int/2.0, fs_int/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+1])
sp2_f.set_ylim([-200.0, 50.0])
sp2_f.set_title("Output Signal from PFB Interpolator", weight="bold")
sp2_f.set_xlabel("Frequency (Hz)")
sp2_f.set_ylabel("Power (dBW)")
Ts_int = 1.0/fs_int
Tmax = len(d)*Ts_int
t_o = scipy.arange(0, Tmax, Ts_int)
x_o1 = scipy.array(d)
sp2_t = fig2.add_subplot(2, 1, 2)
p2_t = sp2_t.plot(t_o, x_o1.real, "b-o")
#p2_t = sp2_t.plot(t_o, x_o.imag, "r-o")
sp2_t.set_ylim([-2.5, 2.5])
sp2_t.set_title("Output Signal from PFB Interpolator", weight="bold")
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
# Plot output of PFB arbitrary resampler
fs_aint = tb._fs * tb._ainterp
sp3_f = fig3.add_subplot(2, 1, 1)
d = tb.snk2.data()[Ns:Ns+(tb._interp*Ne)]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_aint/2.0, fs_aint/2.0, fs_aint/float(X_o.size))
p3_f = sp3_f.plot(f_o, X_o, "b")
sp3_f.set_xlim([min(f_o), max(f_o)+1])
sp3_f.set_ylim([-200.0, 50.0])
sp3_f.set_title("Output Signal from PFB Arbitrary Resampler", weight="bold")
sp3_f.set_xlabel("Frequency (Hz)")
sp3_f.set_ylabel("Power (dBW)")
Ts_aint = 1.0/fs_aint
Tmax = len(d)*Ts_aint
t_o = scipy.arange(0, Tmax, Ts_aint)
x_o2 = scipy.array(d)
sp3_f = fig3.add_subplot(2, 1, 2)
p3_f = sp3_f.plot(t_o, x_o2.real, "b-o")
p3_f = sp3_f.plot(t_o, x_o1.real, "m-o")
#p3_f = sp3_f.plot(t_o, x_o2.imag, "r-o")
sp3_f.set_ylim([-2.5, 2.5])
sp3_f.set_title("Output Signal from PFB Arbitrary Resampler", weight="bold")
sp3_f.set_xlabel("Time (s)")
sp3_f.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
wuxue/altanalyze | QC.py | 1 | 42383 |
import traceback
import sys
try:
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=UserWarning) ### hides import warnings
import matplotlib
matplotlib.rcParams['backend'] = 'TkAgg'
import matplotlib.pyplot as pylab
matplotlib.rcParams['axes.linewidth'] = 0.5
matplotlib.rcParams['pdf.fonttype'] = 42
#matplotlib.rcParams['font.family'] = 'sans-serif'
#matplotlib.rcParams['font.sans-serif'] = 'Arial'
import numpy
except Exception:
print traceback.format_exc()
import string
import time
import random
import math
import sys, os
import statistics
import ExpressionBuilder
import export
alphabet = map(chr, range(65, 91))+map(chr, range(97, 124)) ### Python magic
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def filepath(filename):
try:
import unique ### local to AltAnalyze
fn = unique.filepath(filename)
except Exception:
### Should work fine when run as a script with this (AltAnalyze code is specific for packaging with AltAnalyze)
dir=os.path.dirname(dirfile.__file__)
try: dir_list = os.listdir(filename); fn = filename ### test to see if the path can be found (then it is the full path)
except Exception: fn=os.path.join(dir,filename)
return fn
def summarizeExpressionData(filename,qc_type):
start_time = time.time()
fn = filepath(filename)
matrix=[]
row_header=[]
import RNASeq
platform = RNASeq.checkExpressionFileFormat(fn,"3'array")
x=0
if '/' in filename:
dataset_name = string.split(filename,'/')[-1][:-4]
else:
dataset_name = string.split(filename,'\\')[-1][:-4]
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if data[0] =='#': x=0
elif x==0:
group_db, column_header, qc_db = assignGroupColors(t[1:],qc_type)
x=1
else:
if ' ' not in t and '' not in t: ### Occurs for rows with missing data
if qc_type == 'distribution':
#values = map(lambda x: round(float(x), 1), t[1:]) ### report value to one decimal place
values = map(lambda x: float(x), t[1:])
i=0
for r in values:
if r!=0:
if 'counts' in dataset_name or platform == 'RNASeq':
r = round(math.log(r,2),1)
else:
r = round(r,1)
try:
qc_db[column_header[i]][r]+=1 ### count this rounded expression value once for this filename
except Exception:
qc_db[column_header[i]][r]=1
i+=1
if qc_type == 'feature' or qc_type == 'totals':
if 'counts' in dataset_name:
feature_id = string.split(t[0],'=')[0]
if '-' in feature_id: feature = 'junction'
elif ':I' in feature_id: feature = 'intron'
elif ':E' in feature_id: feature = 'exon'
values = map(lambda x: float(x), t[1:])
i=0
for r in values:
if r!=0:
if qc_type == 'feature':
r = round(math.log(r,2),1)
try:
qc_db[column_header[i]][feature].append(r) ### add all expression values
except Exception:
qc_db[column_header[i]][feature] = [r]
i+=1
x+=1
time_diff = str(round(time.time()-start_time,1))
print 'Dataset import in %s seconds' % time_diff
return qc_db,dataset_name
def reformatAltHeaders(headers):
new_headers = []
for i in headers:
try: group, i = string.split(i,':')
except Exception: pass
new_headers.append(i)
return new_headers
def importTableEntries(filename,filter_db,ensembl_exon_db,gene_db,root_dir,transpose,display,showIntrons,analysisType='plot'):
import collections
average_samples = True
if showIntrons == 'yes': include_introns = True
else: include_introns = False
uid_db={} ### probeset or AltAnalyze RNA-Seq ID keyed
uid_list={} ### ordered from first to last exon region
uid_gene_db={} ### Lets us look at multiple genes
try:
import UI
biotypes = UI.getBiotypes(filename)
except Exception: biotypes={}
for gene in ensembl_exon_db:
uid_list[gene]=[]
for (index,ed,id) in ensembl_exon_db[gene]:
proceed = False
if 'exp.' in filename:
if include_introns:
proceed = True
elif 'E' in ed.ExonID():
proceed = True
else: ### Include introns for splicing index view
if include_introns == True: proceed = True
elif 'E' in ed.ExonID(): proceed = True
if proceed:
uid_db[id] = ed
uid_list[gene].append(id)
uid_gene_db[id]=gene
if '_vs_' in filename: ### If one two groups, this is what will be output to the RawSplice folder - need to have this alternate way of getting the expression file location
rootdir = string.split(filename, 'AltResults')[0]
exp_dir = getValidExpFile(rootdir+'ExpressionInput')
alt_groups_dir = string.split(exp_dir, 'ExpressionInput')[0]+'ExpressionInput/groups.'+findFilename(exp_dir)
alt_groups_dir = string.replace(alt_groups_dir,'exp.','')
start_time = time.time()
fn = filepath(filename)
matrix_gene_db={}
stdev_gene_matrix_db={}
row_header_gene={}
ids={}
x=0
if 'heatmap' in analysisType:
average_samples = False
if '/' in filename:
dataset_name = string.split(filename,'/')[-1][:-4]
else:
dataset_name = string.split(filename,'\\')[-1][:-4]
for line in open(fn,'rU').xreadlines():
data = line.strip()
t = string.split(data,'\t')
if data[0]=='#': x=0
elif x==0:
if platform == 'RNASeq':
removeExtension=True
else:
removeExtension=False
group_db, column_header, sample_name_db = assignGroupColors(t[1:],'',removeExtension=removeExtension)
x=1
altresults = False
if average_samples:
if 'AltResults' in filename:
altresults=True
groups_dir = string.split(filename, 'AltResults')[0]+'ExpressionInput/groups.'+findFilename(filename)
if verifyFile(groups_dir)==False:
groups_dir = alt_groups_dir
new_column_header = reformatAltHeaders(t[3:])
start = 3
else:
if 'exp.' in filename:
groups_dir = string.replace(filename,'exp.','groups.')
else:
groups_dir = string.replace(filename,'counts.','groups.')
new_column_header = column_header
start = 1 ### starting index with numeric values
groups_dir = string.replace(groups_dir,'stats.','groups.')
groups_dir = string.replace(groups_dir,'-steady-state.txt','.txt') ### groups is for the non-steady-state file
try: group_index_db=collections.OrderedDict()
except Exception:
import ordereddict
group_index_db = ordereddict.OrderedDict()
### use comps in the future to visualize group comparison changes
sample_list,group_sample_db,group_db,group_name_sample_db,comp_groups,comps_name_db = ExpressionBuilder.simpleGroupImport(groups_dir)
for item in sample_list:
group_name = group_db[item]
proceed=False
try: sample_index = new_column_header.index(item); proceed=True
except Exception:
try:
item = string.replace(item,'.bed','')
item = string.replace(item,'.CEL','') ### Probe-level analyses as RNA-Seq
item = string.replace(item,'.cel','')
item = string.replace(item,'.txt','')
item = string.replace(item,'.TXT','')
item = string.replace(item,'.TAB','')
item = string.replace(item,'.tab','')
sample_index = new_column_header.index(item)
proceed=True
except Exception:
pass
#print [item]
#print column_header
#print Error
if proceed:
try: group_index_db[group_name].append(sample_index)
except Exception:
try: group_index_db[group_name] = [sample_index] ### dictionary of group to input file sample indexes
except Exception: pass ### Occurs when analyzing splicing-index for two groups when more than two groups exist (error from 5 lines up)
groups = map(str, group_index_db) ### store group names
new_sample_list = map(lambda item: group_db[item], sample_list) ### lookup index of each sample in the ordered group sample list
column_header = groups
else:
if 'AltResults' in filename: start = 3
else: start = 1 ### starting index with numeric values
column_header = t[start-1:]
row_number=1
else:
if ' ' not in t and '' not in t: ### Occurs for rows with missing data
uid = t[start-1]
if ';' in uid:
uid = string.split(uid,';')[0]
ids[uid]=None
ens_geneID = string.split(uid,':')[0]
#if ens_geneID in gene_db: print uid
if uid in filter_db or ('heatmap' in analysisType and ens_geneID in gene_db):
try:
if len(biotypes)==1 and 'junction' in biotypes:
gene = ens_geneID
else:
gene = uid_gene_db[uid]
try: row_header_gene[gene].append(uid)
except Exception: row_header_gene[gene] = [uid]
if average_samples == False:
values = map(float,t[start:])
try: matrix_gene_db[gene].append(values)
except Exception: matrix_gene_db[gene]=[values]
else:
if platform == 'RNASeq' and altresults==False:
### Convert to log2 RPKM values - or counts
values = map(lambda x: math.log(float(x),2), t[start:])
else:
values = map(float,t[start:])
if 'AltResults' in filename: ### If splicing scores, normalize these to the mean values
mean = statistics.avg(values)
values = map(lambda x: x-mean, values)
avg_ls=[]; std_ls = []
for group_name in group_index_db:
group_values = map(lambda x: values[x], group_index_db[group_name]) ### simple and fast way to reorganize the samples
avg = statistics.avg(group_values)
try: st_err = statistics.stdev(group_values)/math.sqrt(len(group_values))
except Exception:
### Occurs if no replicates in the dataset
st_err = 0
avg_ls.append(avg)
std_ls.append(st_err)
try: matrix_gene_db[gene].append(avg_ls)
except Exception: matrix_gene_db[gene]=[avg_ls]
try: stdev_gene_matrix_db[gene].append(std_ls)
except Exception: stdev_gene_matrix_db[gene]=[std_ls]
except Exception:
#print traceback.format_exc()
pass
x+=1
global colors
original_column_header = list(column_header)
if len(uid_list)==0:
print 'No genes found in the exon expression database'; forceNoExonExpError
successfully_output_genes=0
display_count=0 ### Only display a certain number of genes
for last_gene in uid_list: pass
for gene in uid_list:
fig = pylab.figure() ### Create this here - resulting in a single figure for memory purposes
new_header = []
new_matrix = []
new_stdev = []
annotation_list=[]
gene_symbol = gene_db[gene]
try: matrix = matrix_gene_db[gene]
except Exception:
print gene_symbol, 'not in alternative expression database'
continue ### go the next gene - no alt.expression for this gene
row_header = row_header_gene[gene]
try: stdev_matrix = stdev_gene_matrix_db[gene]
except Exception: pass
for uid in uid_list[gene]:
#print row_header;sys.exit()
try:
i = row_header.index(uid) ### If the ID is in the filtered annotated exon list (not just core)
new_header.append(uid)
try: new_matrix.append(matrix[i])
except Exception: print uid, i,len(matrix);sys.exit()
ed = uid_db[uid]
annotation_list.append(ed)
try: new_stdev.append(stdev_matrix[i])
except Exception: pass
except Exception: pass
if len(new_matrix)>0:
matrix = new_matrix
if len(new_header)>0:
row_header = new_header
if 'heatmap' in analysisType:
export_dir = root_dir + gene_symbol + '-heatmap.txt'
export_obj = export.ExportFile(export_dir)
export_obj.write(string.join(column_header,'\t')+'\n')
ki=0
if len(annotation_list)>0:
for ed in annotation_list:
if 'AltResults' not in filename and platform == 'RNASeq':
values = map(lambda x: math.log(x,2), matrix[ki])
else: values = matrix[ki]
export_obj.write(string.join([ed.ExonID()] + map(str,values),'\t')+'\n')
ki+=1
row_metric = 'euclidean'; row_method = None
else:
### Just junctions analyzed here... no sorted junctions yet
ki=0
for uid in row_header_gene[gene]:
if 'AltResults' not in filename and platform == 'RNASeq':
values = map(lambda x: math.log(x,2), matrix[ki])
else: values = matrix[ki]
export_obj.write(string.join([uid] + map(str,values),'\t')+'\n')
ki+=1
row_metric = 'euclidean'; row_method = 'average'
export_obj.close()
import clustering
column_metric = 'euclidean'; column_method = 'hopach'
color_gradient = 'red_black_sky'; transpose = False; graphic_links=[]
if ki>100: transpose = True
if gene == last_gene: display = True
else: display = False
graphic_links = clustering.runHCexplicit(export_dir, graphic_links, row_method, row_metric, column_method, column_metric, color_gradient, transpose, display=display, Normalize=True, compressAxis = False, contrast = 2.5)
successfully_output_genes+=1
else:
stdev_matrix = new_stdev
time_diff = str(round(time.time()-start_time,1))
#print '%d rows and %d columns imported for %s in %s seconds...' % (len(matrix),len(column_header),dataset_name,time_diff)
if transpose == True:
matrix = map(numpy.array, zip(*matrix)) ### coverts these to tuples
column_header, row_header = row_header, original_column_header
stdev_matrix = map(numpy.array, zip(*stdev_matrix))
matrix = numpy.array(matrix)
stdev_matrix = numpy.array(stdev_matrix)
try:
if len(uid_list)>10:
#if display_count==5: display=False
display=False
if display_count==0:
### store a consistent color palete to use
colors=[]
"""
k=0
while k < len(row_header):
colors.append(tuple(rand(3)))
k+=1"""
#http://stackoverflow.com/questions/3016283/create-a-color-generator-from-given-colormap-in-matplotlib
cm = pylab.cm.get_cmap('gist_rainbow') #gist_ncar
for i in range(len(row_header)):
colors.append(cm(1.*i/len(row_header))) # color will now be an RGBA tuple
plotExonExpression(fig,matrix,stdev_matrix,row_header,column_header,dataset_name,annotation_list,gene_symbol,root_dir,display=display)
successfully_output_genes+=1
display_count+=1
except Exception:
print traceback.format_exc();sys.exit()
print gene_symbol, 'failed'
try: pylab.close()
except Exception: pass
if successfully_output_genes>0:
#try: print 'Gene graphs exported to ExonPlots...'
#except Exception: pass
pass
else:
print '\nWARNING!!!! No genes with associated alternative exon evidence found\n'; forceNoExonExpError
try:
import gc
fig.clf()
pylab.close()
gc.collect()
except Exception:
pass
def importDataSimple(filename,transpose):
start_time = time.time()
fn = filepath(filename)
matrix=[]
row_header=[]
x=0
if '/' in filename:
dataset_name = string.split(filename,'/')[-1][:-4]
else:
dataset_name = string.split(filename,'\\')[-1][:-4]
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if data[0]=='#': x=0
elif x==0:
group_db, column_header, sample_name_db = assignGroupColors(t[1:],'')
x=1
else:
if ' ' not in t and '' not in t: ### Occurs for rows with missing data
row_header.append(t[0])
t = map(float,t[1:])
if (abs(max(t)-min(t)))>0:
matrix.append(t)
x+=1
time_diff = str(round(time.time()-start_time,1))
print '%d rows and %d columns imported for %s in %s seconds...' % (len(matrix),len(column_header),dataset_name,time_diff)
if transpose == True:
matrix = map(numpy.array, zip(*matrix)) ### coverts these to tuples
column_header, row_header = row_header, column_header
return numpy.array(matrix), column_header, row_header, dataset_name, group_db
def assignGroupColors(t,qc_type,removeExtension=True):
""" Assign a unique color to each group """
k = 0
sample_name_db={}
column_header=[]; group_db={}; color_db={}
color_list = ['r', 'b', 'y', 'g', 'w', 'k', 'm']
for i in t:
if len(i)>4:
if i[-4] == '.' and removeExtension:
i = i[:-4] ### remove the .cel, .txt, .tab or .bed
if ':' in i:
group,i = string.split(i,':')
try: color = color_db[group]
except Exception:
try: color_db[group] = color_list[k]
except Exception:
### If not listed in the standard color set add a new random color
rgb = tuple(rand(3)) ### random color
color_list.append(rgb)
color_db[group] = color_list[k]
color = color_db[group]
k+=1
group_db[i] = group, color
column_header.append(i)
sample_name_db[i]=db={} ### Initialize a dictionary within the filename dictionary
return group_db, column_header, sample_name_db
def plotNormalizationResults(matrix,row_headers,column_headers,dataset_name,plot_type):
### See - http://bib.oxfordjournals.org/content/early/2011/04/15/bib.bbq086.full Fig.1
fig = pylab.figure()
if plot_type == 'pm_mean':
pylab.title('Average Raw Intensity Signal - %s' % dataset_name)
pylab.ylabel('Signal Intensity')
else:
pylab.title('Deviation of Residuals from Median - %s' % dataset_name)
pylab.ylabel('Mean absolute deviation')
pylab.subplots_adjust(left=0.125, right=0.95, top=0.9, bottom=0.40)
ax = fig.add_subplot(111)
i = row_headers.index(plot_type)
y_axis = matrix[i]
pylab.plot(range(len(y_axis)),y_axis,'b*',marker='s',markersize=7)
### Increase the max of the y-axis to accomidate the legend
pylab.ylim(ymin=0)
new_max = increaseYaxis(max(y_axis))
pylab.ylim(ymax=new_max)
#ax.yaxis.set_ticks([min(all_totals),max(all_totals)])
ax.xaxis.set_ticks([-1]+range(len(y_axis)+1))
xtickNames = pylab.setp(pylab.gca(), xticklabels=['']+column_headers+[''])
pylab.setp(xtickNames, rotation=90, fontsize=10)
filename = 'QC-%s-%s.pdf' % (dataset_name,plot_type)
pylab.savefig(root_dir + filename)
#print 'Exporting:',filename
filename = filename[:-3]+'png'
pylab.savefig(root_dir + filename) #,dpi=200
graphic_link.append(['QC - '+plot_type,root_dir+filename])
try:
import gc
fig.clf()
pylab.close()
gc.collect()
except Exception:
pass
def plotExonExpression(fig,matrix,stdev_matrix,row_headers,column_headers,dataset_name,annotation_list,gene_symbol,root_dir,display=True):
""" Display exon-level expression for splicing-index, RPKMs or Affymetrix probeset intensities """
#print len(matrix);sys.exit()
ax = fig.add_subplot(111)
print '.',
if 'exp.' in dataset_name:
datatype = '-RawExonExp'
pylab.ylabel('Exon Expression (log2)')
pylab.title(gene_symbol+' Exon Expression - '+dataset_name)
else:
datatype = '-AltExonExp'
pylab.ylabel('Splicing Index Fold Change')
pylab.title(gene_symbol+' Exon Splicing Index - '+dataset_name)
pylab.xlabel('Exon Regions')
data_max=[]
data_min=[]
for values in matrix:
data_max.append(max(values))
data_max.append(abs(min(values)))
data_min.append(min(values))
### Increase the max of the y-axis to accomidate the legend
try: new_max = increaseYaxis(max(data_max))
except Exception: print len(matrix);sys.exit()
total_min = min(data_min)
if max(data_max) < 3:
new_max = 5
if total_min > -3: total_min = -4
else:
new_max = new_max/1.3
pylab.ylim(ymax=new_max,ymin=total_min)
exon_annotations = []
for ed in annotation_list:
exon_annotations.append(ed.ExonID())
if len(column_headers) != len(exon_annotations):
exon_annotations = map(lambda x: string.split(x,':')[-1],column_headers)
i=0
color_list = ['r', '#ff9900', 'y', 'g', 'b', '#ff66cc', '#9900ff', '#996633', '#336666']
font_ratio = 40.00/len(exon_annotations) ### at about 30 exons a font of 14 looks OK
if font_ratio<1: fontsize = font_ratio*14
else: fontsize = 12
for sample_name in row_headers:
#name = '(%s) %s' % (alphabet[i],sample_name)
if len(row_headers)<10: color = color_list[i]
else:
color = colors[i]
x_axis = list(range(0, len(column_headers), 1)) ### Numeric x-axis values (can be spaced differently)
y_axis = matrix[i]
err = stdev_matrix[i]
if sum(err)==0:
pylab.errorbar(x_axis,y_axis,color=color,linewidth=1.75,label=sample_name)
else:
#pylab.plot(x_axis,y_axis,color=color,linewidth=1.75)
pylab.errorbar(x_axis,y_axis,yerr=err,color=color,linewidth=1.75,label=sample_name)
#k = y_axis.index(max(y_axis))
#pylab.text(x_axis[k],y_axis[k],alphabet[i],fontsize=12) ### Plot this on the maximum value
a = [-1]+range(len(y_axis))+[len(y_axis)]
ax.xaxis.set_ticks(a)
xtickNames = pylab.setp(pylab.gca(), xticklabels=['']+exon_annotations)
pylab.setp(xtickNames, rotation=90, fontsize=fontsize)
i+=1
loc = "upper center"
size = 12
ncol = 1
if len(row_headers)>5:
ncol = 2
if len(row_headers)>20:
loc = 'upper center'
ncol = 3
size = 8
if len(row_headers)>30:
size = 5.5
ncol = 4
if len(row_headers)>40:
size = 5
ncol = 4
if loc == 'upper center':
# Shink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width, box.height*0.6])
try: ax.legend(loc=loc, ncol=ncol, bbox_to_anchor=(0., 1.02,1.,.8),fontsize = size) ### move the legend over to the right of the plot
except Exception:
### Older versions of Matplotlib
try:
pylab.legend(prop={fontsize: 'small'})
ax.legend(loc=loc, ncol=ncol, bbox_to_anchor=(0., 1.02,1.,.8))
except Exception:
pass
else:
pylab.legend(loc=loc, ncol=ncol, prop={'size': size})
filename = gene_symbol+datatype+'.pdf'
pylab.savefig(root_dir + filename)
filename = filename[:-3]+'png'
pylab.savefig(root_dir + filename,dpi=120) #,dpi=200
if display:
pylab.show()
try:
import gc
fig.clf()
pylab.close()
gc.collect()
except Exception:
pass
def plotTotalExpression(qc_db,dataset_name,features):
fig = pylab.figure()
#pylab.xlabel('Biological Sample Names')
pylab.ylabel('Total Number of Reads')
pylab.title('Total Expression for Transcript Features - %s' % dataset_name)
pylab.subplots_adjust(left=0.125, right=0.95, top=0.9, bottom=0.40)
ax = fig.add_subplot(111)
color_db = {}
color_db['exon']='r*'
color_db['junction']='b*'
color_db['intron']='y*'
feature_summary_db={}
samples=[]
for sample_name in qc_db:
samples.append(sample_name)
samples.sort()
all_totals=[]
for sample_name in samples:
ls=[]; x_ls=[]; y_ls=[]
for feature_type in qc_db[sample_name]:
total_exp = sum(qc_db[sample_name][feature_type])
try:
feature_summary_db[feature_type].append(total_exp)
except Exception:
feature_summary_db[feature_type] = [total_exp]
all_totals.append(total_exp)
for feature_type in feature_summary_db:
y_axis = feature_summary_db[feature_type]
pylab.plot(range(len(y_axis)),y_axis,color_db[feature_type],marker='o',markersize=7,label=feature_type)
### Increase the max of the y-axis to accomidate the legend
new_max = increaseYaxis(max(all_totals))
pylab.ylim(ymax=new_max)
#ax.yaxis.set_ticks([min(all_totals),max(all_totals)])
ax.xaxis.set_ticks([-1]+range(len(y_axis)+1))
xtickNames = pylab.setp(pylab.gca(), xticklabels=['']+samples+[''])
pylab.setp(xtickNames, rotation=90, fontsize=10)
pylab.legend(loc="upper right", prop={'size': 11})
filename = 'QC-%s-TotalFeatureExpression.pdf' % dataset_name
pylab.savefig(root_dir + filename)
#print 'Exporting:',filename
filename = filename[:-3]+'png'
pylab.savefig(root_dir + filename)
graphic_link.append(['QC - Total Feature Expression',root_dir+filename])
try:
import gc
fig.clf()
pylab.close()
gc.collect()
except Exception:
pass
def increaseYaxis(max):
count = len(str(int(max)))-2
round_down_max = round(max,-1*count) ### Rounds major unit down
#new_max = round_down_max+(round_down_max/2)
new_max = max*2
return new_max
def plotFeatureBoxPlots(qc_db,dataset_name,feature_type):
pylab.figure()
pylab.xlabel('Biological Sample Names')
pylab.ylabel('Read Counts - Log2')
pylab.title('Expression BoxPlots for %ss - %s' % (feature_type,dataset_name))
#pylab.subplots_adjust(left=0.085, right=0.95, top=0.2, bottom=0.35)
pylab.subplots_adjust(left=0.075, right=0.95, top=0.9, bottom=0.35)
#axes = getAxes(scores) ### adds buffer space to the end of each axis and creates room for a legend
#pylab.axis(axes)
boxplots=[]
samples=[]
sample_sorted_list=[]
for sample_name in qc_db:
try: qc = qc_db[sample_name][feature_type]
except Exception:
print 'No junction data found for at least one sample:',sample_name; forceExit
sample_sorted_list.append([statistics.avg(qc),statistics.stdev(qc),sample_name])
sample_sorted_list.sort()
sample_sorted_list.reverse()
filename = 'QC-%s-BoxPlot-%s.pdf' % (dataset_name,feature_type)
export_obj = export.ExportFile(root_dir + filename[:-4]+'.txt')
export_obj.write('SampleID\tAverage Expression\n')
firstEntry=True
for (mean,stdev,sample_name) in sample_sorted_list:
ls=[]; x_ls=[]; y_ls=[]
qc = qc_db[sample_name][feature_type]
boxplots.append(qc)
samples.append(sample_name)
export_obj.write(sample_name+'\t'+str(mean)+'\n')
if firstEntry:
threshold=mean-2*stdev
firstEntry=False
else:
if mean<threshold:
print sample_name,'expression is considered very low (2 standard deviations away from the max).'
pylab.boxplot(boxplots, notch=0, whis=1.5, positions=None, widths=None, patch_artist=False)
#pylab.boxplot(boxplots, notch=0, sym='+', vert=1, whis=1.5, positions=None, widths=None, patch_artist=False)
xtickNames = pylab.setp(pylab.gca(), xticklabels=samples)
pylab.setp(xtickNames, rotation=90, fontsize=10)
export_obj.close()
#print 'Exporting:',filename
pylab.savefig(root_dir + filename)
filename = filename[:-3]+'png'
pylab.savefig(root_dir + filename) #,dpi=200
graphic_link.append(['QC - BoxPlot-'+feature_type+' Expression',root_dir+filename])
try:
import gc
pylab.figure.clf()
pylab.close()
gc.collect()
except Exception:
pass
def rand(i):
### There is a pylab and scipy rand which does not appear to be in numpy or matplotlib
return map(lambda x: random.random(), ['']*i)
def plotExpressionDistribution(qc_db,dataset_name):
pylab.figure()
pylab.xlabel('Log2 Expression (x10-1 Precision)')
pylab.ylabel('Number of Observations')
pylab.title('Biological Sample Expression Distribution - '+dataset_name)
#axes = getAxes(scores) ### adds buffer space to the end of each axis and creates room for a legend
#pylab.axis(axes)
i=0
color_list = ['r', 'b', 'y', 'g', 'k', 'm']
sample_list=[]
for sample_name in qc_db:
sample_list.append(sample_name)
sample_list.sort()
for sample_name in sample_list:
ls=[]; x_ls=[]; y_ls=[]
qc = qc_db[sample_name]
try: code = alphabet[i]
except Exception: code = str(i)
name = '(%s) %s' % (code,sample_name)
for x in qc:
ls.append((x,qc[x]))
ls.sort() ### Get all x,y values into a sorted list and then provide those two lists to plot
for (x,y) in ls:
x_ls.append(x); y_ls.append(y)
if len(qc_db)<7: color = color_list[i]
else: color = tuple(rand(3))
pylab.plot(x_ls,y_ls,color=color,label=name,linewidth=1.75)
try:
k = y_ls.index(max(y_ls))
pylab.text(x_ls[k],y_ls[k],code,fontsize=7) ### Plot this on the maximum value
except Exception: pass
i+=1
if len(sample_list)<15:
font_size = 11
elif len(sample_list)<25:
font_size = 8
elif len(sample_list)<45:
font_size = 5.5
else:
font_size = 5
pylab.legend(loc="upper right", prop={'size': font_size})
filename = 'QC-%s-distribution.pdf' % dataset_name
pylab.savefig(root_dir + filename)
#print 'Exporting:',filename
filename = filename[:-3]+'png'
pylab.savefig(root_dir + filename) #,dpi=200
graphic_link.append(['QC - Expression Distribution',root_dir+filename])
try:
import gc
pylab.figure.clf()
pylab.close()
gc.collect()
except Exception:
pass
def getAxes(x_axis,y_axis):
x_axis_min = min((x_axis)+min(x_axis)/2.5)
x_axis_max = min((x_axis)+max(x_axis)/5)
y_axis_min = min((y_axis)+min(y_axis)/5)
y_axis_max = max((x_axis)+max(x_axis)/5)
return [x_axis_min, x_axis_max, y_axis_min, y_axis_max]
def findParentDir(filename):
filename = string.replace(filename,'//','/')
filename = string.replace(filename,'\\','/')
x = string.find(filename[::-1],'/')*-1
return filename[:x]
def findFilename(filename):
filename = string.replace(filename,'//','/')
filename = string.replace(filename,'\\','/')
x = string.find(filename[::-1],'/')*-1 ### get just the parent directory
return filename[x:]
def verifyFile(filename):
fn=filepath(filename)
try:
for line in open(fn,'rU').xreadlines(): found = True; break
except Exception: found = False
return found
def outputArrayQC(filename):
""" QC plots for Affymetrix array analysis. The file is all probeset expression values (exp.DatasetName) """
global root_dir
global graphic_link
graphic_link = []
root_dir = findParentDir(filename)
root_dir = string.replace(root_dir,'ExpressionInput','DataPlots')
try: os.mkdir(root_dir)
except Exception: null=[] ### dir exists
try:
### If Affymetrix RMA result summaries available, use these
if '/' in filename: delim = '/'
else: delim = '\\'
if 'ExpressionInput' in filename:
apt_dir = string.split(filename,'ExpressionInput')[0]+'ExpressionInput/APT-output/rma-sketch.report.txt'
else:
apt_dir = string.join(string.split(filename,delim)[:-1],delim)+'/ExpressionInput/APT-output/rma-sketch.report.txt'
if verifyFile(apt_dir)==False:
apt_dir = string.replace(apt_dir,'-sketch','')
transpose = True
matrix, column_header, row_header, dataset_name, group_db = importDataSimple(apt_dir,transpose)
plotNormalizationResults(matrix,row_header,column_header,dataset_name,'pm_mean')
plotNormalizationResults(matrix,row_header,column_header,dataset_name,'all_probeset_mad_residual_mean')
except Exception:
null=[]
qc_type = 'distribution'
qc_db,dataset_name = summarizeExpressionData(filename,qc_type)
plotExpressionDistribution(qc_db,dataset_name)
return graphic_link
def outputRNASeqQC(filename):
""" QC plots for RNA-Seq analysis. The file should be exon-level read counts"""
global root_dir
global graphic_link
graphic_link = []
root_dir = findParentDir(filename)
root_dir = string.replace(root_dir,'ExpressionInput','DataPlots')
try: os.mkdir(root_dir)
except Exception: null=[] ### dir exists
qc_type = 'distribution'
### Distribution of each bin of expression values (log2 binned at the single decimal level)
qc_db,dataset_name = summarizeExpressionData(filename,qc_type)
plotExpressionDistribution(qc_db,dataset_name)
qc_type = 'feature'
qc_db,dataset_name = summarizeExpressionData(filename,qc_type)
### BoxPlot of expression values for each feature
features=[]
for s in qc_db:
for feature in qc_db[s]:
features.append(feature)
break
for feature in features:
plotFeatureBoxPlots(qc_db,dataset_name,feature)
qc_type = 'totals'
### Total expression (total reads) for each feature
plotTotalExpression(qc_db,dataset_name,features)
return graphic_link
def verifyFileLength(filename):
count = 0
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
count+=1
if count>9: break
except Exception: null=[]
return count
def getValidExpFile(altanalyze_rawexp_dir):
import unique
dir_files = unique.read_directory(altanalyze_rawexp_dir)
valid_file = ''
for file in dir_files:
if 'exp.' in file and 'state.txt' not in file and 'feature' not in file:
valid_file = altanalyze_rawexp_dir+'/'+file
break
return valid_file
def displayExpressionGraph(species,Platform,exp_file,gene,transpose,display=True,showIntrons=False,analysisType='plot'):
### Get gene annotations (users can provide an Ensembl or symbol)
print 'Importing exon-level expression data for visualization (be patient)...'
import ExonAnalyze_module
global platform
platform = Platform
if platform != 'AltMouse': gene_annotation_file = "AltDatabase/ensembl/"+species+"/"+species+"_Ensembl-annotations.txt"
else: gene_annotation_file = "AltDatabase/"+species+"/"+platform+"/"+platform+"_gene_annotations.txt"
genes=[]
gene=string.replace(gene,'|',',')
gene=string.replace(gene,' ',',')
if ',' in gene:
genes += string.split(gene,',')
else: genes.append(gene)
gene_db={}
for gene in genes:
try:
if 'ENS' in gene:
try: annotate_db ### If variable is defined
except Exception:
annotate_db = ExonAnalyze_module.import_annotations(gene_annotation_file,platform,keyBySymbol=False) ### Make an SQLite call
gene_symbol = annotate_db[gene].Symbol()
else:
try: annotate_db ### If variable is defined
except Exception:
annotate_db = ExonAnalyze_module.import_annotations(gene_annotation_file,platform,keyBySymbol=True)
gene_symbol = gene
gene = annotate_db[gene].GeneID()
gene_db[gene]=gene_symbol
except Exception:
#if len(gene)>0: print gene, 'not in database'
pass
if len(gene_db)==0:
force_no_gene_found_error
if 'AltResults' in exp_file:
root_dir = string.split(exp_file,'AltResults')[0]+'ExonPlots/'
else:
root_dir = string.split(exp_file,'ExpressionInput')[0]+'ExonPlots/'
import ExonAnalyze_module
if platform == 'RNASeq': datatype = 'exons'
else: datatype = 'probesets'
export_exon_filename = 'AltDatabase/'+species+'/'+platform+'/'+species+'_Ensembl_'+datatype+'.txt'
if verifyFileLength(export_exon_filename) == 0:
rootdir = string.replace(root_dir,'ExonPlots/','')
export_exon_filename = rootdir+'/'+export_exon_filename
import ExonArrayEnsemblRules
ensembl_exon_db = ExonArrayEnsemblRules.reimportEnsemblProbesetsForSeqExtraction(export_exon_filename,'gene-probesets',gene_db) ### Make an SQLite call
filter_db = {}
for gene in ensembl_exon_db:
ensembl_exon_db[gene].sort()
for (index,ed,id) in ensembl_exon_db[gene]:
filter_db[id] = []
try: os.mkdir(root_dir)
except Exception: None ### dir exists
print 'Image results being saved to the folder "ExonPlots" in the AltAnalyze results directory.'
importTableEntries(exp_file,filter_db,ensembl_exon_db,gene_db,root_dir,transpose,display,showIntrons,analysisType=analysisType) ### Make an SQLite call
if __name__ == '__main__':
file = "/Users/nsalomonis/Desktop/dataAnalysis/Sarwal/Diabetes-Blood/ExpressionInput/exp.diabetes.txt"
file = "/Users/nsalomonis/Desktop/User Diagnostics/Mm_spinal_cord_injury/AltResults/RawSpliceData/Mm/splicing-index/Mm_spinal.txt"
#file = "/Users/nsalomonis/Desktop/User Diagnostics/Mm_spinal_cord_injury/ExpressionInput/exp.Mm_spinal.txt"
file = "/Users/nsalomonis/Desktop/dataAnalysis/r4_Bruneau_TopHat/AltResults/RawSpliceData/Mm/splicing-index/test.txt"
file = '/Users/saljh8/Desktop/Archived/Desktop/dataAnalysis/CPMC/Liliana/CPMC_GB-samples/Cultured/AltResults/RawSpliceData/Hs/splicing-index/Hs_Exon_CBD_vs_Vehicle.p5_average.txt'
file = '/Volumes/SEQ-DATA/Grimes/14018_gmp-pro/Lattice/Full/AltResults/RawSpliceData/Mm/splicing-index/myeloblast.txt'
file = '/Volumes/SEQ-DATA/Grimes/14018_gmp-pro/Lattice/Full/ExpressionInput/exp.myeloblast.txt'
file = '/Volumes/salomonis1/projects/Grimes/GEC_14061/bams/ExpressionInput/counts.gec_14061.txt'
#file = '/Volumes/SEQ-DATA/SingleCell-Churko/ExpressionInput/exp.CM.txt'
outputRNASeqQC(file);sys.exit()
species='Hs'
Platform="RNASeq"
#Platform="exon"
ShowIntrons = 'yes'
displayExpressionGraph(species,Platform,file,'ENSG00000081189',True,showIntrons=ShowIntrons);sys.exit() #ENSG00000140009 ENSG00000152284 ENSG00000133794 (ARNTL)
outputRNASeqQC(file)
| apache-2.0 |
b0noI/AIF2 | src/test/integration/python/splitter_characters_grouper_search_step.py | 3 | 30374 | # data collected by PropertyBasedSettingsTest.experimentWith_splitter_characters_grouper_search_step
data = [
{"value": 0.000050, "errors": 53},
{"value": 0.000100, "errors": 53},
{"value": 0.000150, "errors": 53},
{"value": 0.000200, "errors": 53},
{"value": 0.000250, "errors": 53},
{"value": 0.000300, "errors": 53},
{"value": 0.000350, "errors": 53},
{"value": 0.000400, "errors": 53},
{"value": 0.000450, "errors": 53},
{"value": 0.000500, "errors": 53},
{"value": 0.000550, "errors": 53},
{"value": 0.000600, "errors": 53},
{"value": 0.000650, "errors": 53},
{"value": 0.000700, "errors": 53},
{"value": 0.000750, "errors": 53},
{"value": 0.000800, "errors": 53},
{"value": 0.000850, "errors": 53},
{"value": 0.000900, "errors": 53},
{"value": 0.000950, "errors": 53},
{"value": 0.001000, "errors": 53},
{"value": 0.001050, "errors": 53},
{"value": 0.001100, "errors": 53},
{"value": 0.001150, "errors": 53},
{"value": 0.001200, "errors": 53},
{"value": 0.001250, "errors": 53},
{"value": 0.001300, "errors": 53},
{"value": 0.001350, "errors": 53},
{"value": 0.001400, "errors": 53},
{"value": 0.001450, "errors": 53},
{"value": 0.001500, "errors": 53},
{"value": 0.001550, "errors": 53},
{"value": 0.001600, "errors": 53},
{"value": 0.001650, "errors": 53},
{"value": 0.001700, "errors": 53},
{"value": 0.001750, "errors": 53},
{"value": 0.001800, "errors": 53},
{"value": 0.001850, "errors": 53},
{"value": 0.001900, "errors": 53},
{"value": 0.001950, "errors": 53},
{"value": 0.002000, "errors": 53},
{"value": 0.002050, "errors": 53},
{"value": 0.002100, "errors": 53},
{"value": 0.002150, "errors": 53},
{"value": 0.002200, "errors": 53},
{"value": 0.002250, "errors": 53},
{"value": 0.002300, "errors": 53},
{"value": 0.002350, "errors": 53},
{"value": 0.002400, "errors": 53},
{"value": 0.002450, "errors": 53},
{"value": 0.002500, "errors": 53},
{"value": 0.002550, "errors": 53},
{"value": 0.002600, "errors": 53},
{"value": 0.002650, "errors": 53},
{"value": 0.002700, "errors": 53},
{"value": 0.002750, "errors": 53},
{"value": 0.002800, "errors": 53},
{"value": 0.002850, "errors": 53},
{"value": 0.002900, "errors": 53},
{"value": 0.002950, "errors": 53},
{"value": 0.003000, "errors": 53},
{"value": 0.003050, "errors": 53},
{"value": 0.003100, "errors": 53},
{"value": 0.003150, "errors": 54},
{"value": 0.003200, "errors": 54},
{"value": 0.003250, "errors": 54},
{"value": 0.003300, "errors": 54},
{"value": 0.003350, "errors": 54},
{"value": 0.003400, "errors": 54},
{"value": 0.003450, "errors": 54},
{"value": 0.003500, "errors": 54},
{"value": 0.003550, "errors": 54},
{"value": 0.003600, "errors": 54},
{"value": 0.003650, "errors": 54},
{"value": 0.003700, "errors": 54},
{"value": 0.003750, "errors": 54},
{"value": 0.003800, "errors": 54},
{"value": 0.003850, "errors": 54},
{"value": 0.003900, "errors": 54},
{"value": 0.003950, "errors": 54},
{"value": 0.004000, "errors": 54},
{"value": 0.004050, "errors": 54},
{"value": 0.004100, "errors": 54},
{"value": 0.004150, "errors": 54},
{"value": 0.004200, "errors": 54},
{"value": 0.004250, "errors": 54},
{"value": 0.004300, "errors": 54},
{"value": 0.004350, "errors": 54},
{"value": 0.004400, "errors": 54},
{"value": 0.004450, "errors": 54},
{"value": 0.004500, "errors": 54},
{"value": 0.004550, "errors": 54},
{"value": 0.004600, "errors": 54},
{"value": 0.004650, "errors": 54},
{"value": 0.004700, "errors": 54},
{"value": 0.004750, "errors": 54},
{"value": 0.004800, "errors": 54},
{"value": 0.004850, "errors": 54},
{"value": 0.004900, "errors": 54},
{"value": 0.004950, "errors": 54},
{"value": 0.005000, "errors": 54},
{"value": 0.005050, "errors": 54},
{"value": 0.005100, "errors": 54},
{"value": 0.005150, "errors": 54},
{"value": 0.005200, "errors": 54},
{"value": 0.005250, "errors": 54},
{"value": 0.005300, "errors": 54},
{"value": 0.005350, "errors": 54},
{"value": 0.005400, "errors": 54},
{"value": 0.005450, "errors": 54},
{"value": 0.005500, "errors": 54},
{"value": 0.005550, "errors": 54},
{"value": 0.005600, "errors": 54},
{"value": 0.005650, "errors": 54},
{"value": 0.005700, "errors": 54},
{"value": 0.005750, "errors": 54},
{"value": 0.005800, "errors": 54},
{"value": 0.005850, "errors": 54},
{"value": 0.005900, "errors": 54},
{"value": 0.005950, "errors": 54},
{"value": 0.006000, "errors": 54},
{"value": 0.006050, "errors": 54},
{"value": 0.006100, "errors": 54},
{"value": 0.006150, "errors": 54},
{"value": 0.006200, "errors": 54},
{"value": 0.006250, "errors": 54},
{"value": 0.006300, "errors": 54},
{"value": 0.006350, "errors": 54},
{"value": 0.006400, "errors": 54},
{"value": 0.006450, "errors": 54},
{"value": 0.006500, "errors": 54},
{"value": 0.006550, "errors": 54},
{"value": 0.006600, "errors": 54},
{"value": 0.006650, "errors": 54},
{"value": 0.006700, "errors": 54},
{"value": 0.006750, "errors": 54},
{"value": 0.006800, "errors": 54},
{"value": 0.006850, "errors": 54},
{"value": 0.006900, "errors": 54},
{"value": 0.006950, "errors": 54},
{"value": 0.007000, "errors": 54},
{"value": 0.007050, "errors": 54},
{"value": 0.007100, "errors": 54},
{"value": 0.007150, "errors": 54},
{"value": 0.007200, "errors": 54},
{"value": 0.007250, "errors": 54},
{"value": 0.007300, "errors": 54},
{"value": 0.007350, "errors": 54},
{"value": 0.007400, "errors": 54},
{"value": 0.007450, "errors": 54},
{"value": 0.007500, "errors": 54},
{"value": 0.007550, "errors": 54},
{"value": 0.007600, "errors": 54},
{"value": 0.007650, "errors": 54},
{"value": 0.007700, "errors": 54},
{"value": 0.007750, "errors": 54},
{"value": 0.007800, "errors": 54},
{"value": 0.007850, "errors": 54},
{"value": 0.007900, "errors": 54},
{"value": 0.007950, "errors": 54},
{"value": 0.008000, "errors": 54},
{"value": 0.008050, "errors": 54},
{"value": 0.008100, "errors": 54},
{"value": 0.008150, "errors": 54},
{"value": 0.008200, "errors": 54},
{"value": 0.008250, "errors": 54},
{"value": 0.008300, "errors": 54},
{"value": 0.008350, "errors": 54},
{"value": 0.008400, "errors": 54},
{"value": 0.008450, "errors": 54},
{"value": 0.008500, "errors": 54},
{"value": 0.008550, "errors": 54},
{"value": 0.008600, "errors": 54},
{"value": 0.008650, "errors": 54},
{"value": 0.008700, "errors": 54},
{"value": 0.008750, "errors": 54},
{"value": 0.008800, "errors": 54},
{"value": 0.008850, "errors": 54},
{"value": 0.008900, "errors": 54},
{"value": 0.008950, "errors": 54},
{"value": 0.009000, "errors": 54},
{"value": 0.009050, "errors": 54},
{"value": 0.009100, "errors": 54},
{"value": 0.009150, "errors": 54},
{"value": 0.009200, "errors": 54},
{"value": 0.009250, "errors": 54},
{"value": 0.009300, "errors": 54},
{"value": 0.009350, "errors": 54},
{"value": 0.009400, "errors": 54},
{"value": 0.009450, "errors": 54},
{"value": 0.009500, "errors": 54},
{"value": 0.009550, "errors": 54},
{"value": 0.009600, "errors": 54},
{"value": 0.009650, "errors": 54},
{"value": 0.009700, "errors": 54},
{"value": 0.009750, "errors": 54},
{"value": 0.009800, "errors": 54},
{"value": 0.009850, "errors": 54},
{"value": 0.009900, "errors": 54},
{"value": 0.009950, "errors": 54},
{"value": 0.010000, "errors": 54},
{"value": 0.010050, "errors": 54},
{"value": 0.010100, "errors": 54},
{"value": 0.010150, "errors": 54},
{"value": 0.010200, "errors": 54},
{"value": 0.010250, "errors": 54},
{"value": 0.010300, "errors": 54},
{"value": 0.010350, "errors": 54},
{"value": 0.010400, "errors": 54},
{"value": 0.010450, "errors": 54},
{"value": 0.010500, "errors": 54},
{"value": 0.010550, "errors": 54},
{"value": 0.010600, "errors": 54},
{"value": 0.010650, "errors": 54},
{"value": 0.010700, "errors": 54},
{"value": 0.010750, "errors": 54},
{"value": 0.010800, "errors": 54},
{"value": 0.010850, "errors": 54},
{"value": 0.010900, "errors": 54},
{"value": 0.010950, "errors": 54},
{"value": 0.011000, "errors": 54},
{"value": 0.011050, "errors": 54},
{"value": 0.011100, "errors": 54},
{"value": 0.011150, "errors": 54},
{"value": 0.011200, "errors": 54},
{"value": 0.011250, "errors": 54},
{"value": 0.011300, "errors": 54},
{"value": 0.011350, "errors": 54},
{"value": 0.011400, "errors": 54},
{"value": 0.011450, "errors": 54},
{"value": 0.011500, "errors": 54},
{"value": 0.011550, "errors": 54},
{"value": 0.011600, "errors": 54},
{"value": 0.011650, "errors": 54},
{"value": 0.011700, "errors": 54},
{"value": 0.011750, "errors": 54},
{"value": 0.011800, "errors": 54},
{"value": 0.011850, "errors": 54},
{"value": 0.011900, "errors": 54},
{"value": 0.011950, "errors": 54},
{"value": 0.012000, "errors": 54},
{"value": 0.012050, "errors": 54},
{"value": 0.012100, "errors": 54},
{"value": 0.012150, "errors": 54},
{"value": 0.012200, "errors": 54},
{"value": 0.012250, "errors": 54},
{"value": 0.012300, "errors": 54},
{"value": 0.012350, "errors": 54},
{"value": 0.012400, "errors": 54},
{"value": 0.012450, "errors": 54},
{"value": 0.012500, "errors": 55},
{"value": 0.012550, "errors": 55},
{"value": 0.012600, "errors": 55},
{"value": 0.012650, "errors": 55},
{"value": 0.012700, "errors": 55},
{"value": 0.012750, "errors": 55},
{"value": 0.012800, "errors": 55},
{"value": 0.012850, "errors": 55},
{"value": 0.012900, "errors": 55},
{"value": 0.012950, "errors": 55},
{"value": 0.013000, "errors": 55},
{"value": 0.013050, "errors": 55},
{"value": 0.013100, "errors": 55},
{"value": 0.013150, "errors": 55},
{"value": 0.013200, "errors": 55},
{"value": 0.013250, "errors": 55},
{"value": 0.013300, "errors": 55},
{"value": 0.013350, "errors": 55},
{"value": 0.013400, "errors": 55},
{"value": 0.013450, "errors": 55},
{"value": 0.013500, "errors": 55},
{"value": 0.013550, "errors": 55},
{"value": 0.013600, "errors": 55},
{"value": 0.013650, "errors": 55},
{"value": 0.013700, "errors": 55},
{"value": 0.013750, "errors": 55},
{"value": 0.013800, "errors": 55},
{"value": 0.013850, "errors": 55},
{"value": 0.013900, "errors": 55},
{"value": 0.013950, "errors": 55},
{"value": 0.014000, "errors": 55},
{"value": 0.014050, "errors": 55},
{"value": 0.014100, "errors": 55},
{"value": 0.014150, "errors": 55},
{"value": 0.014200, "errors": 55},
{"value": 0.014250, "errors": 55},
{"value": 0.014300, "errors": 55},
{"value": 0.014350, "errors": 55},
{"value": 0.014400, "errors": 55},
{"value": 0.014450, "errors": 55},
{"value": 0.014500, "errors": 55},
{"value": 0.014550, "errors": 55},
{"value": 0.014600, "errors": 55},
{"value": 0.014650, "errors": 55},
{"value": 0.014700, "errors": 55},
{"value": 0.014750, "errors": 55},
{"value": 0.014800, "errors": 55},
{"value": 0.014850, "errors": 55},
{"value": 0.014900, "errors": 55},
{"value": 0.014950, "errors": 55},
{"value": 0.015000, "errors": 55},
{"value": 0.015050, "errors": 55},
{"value": 0.015100, "errors": 55},
{"value": 0.015150, "errors": 55},
{"value": 0.015200, "errors": 55},
{"value": 0.015250, "errors": 55},
{"value": 0.015300, "errors": 55},
{"value": 0.015350, "errors": 55},
{"value": 0.015400, "errors": 55},
{"value": 0.015450, "errors": 55},
{"value": 0.015500, "errors": 55},
{"value": 0.015550, "errors": 55},
{"value": 0.015600, "errors": 55},
{"value": 0.015650, "errors": 55},
{"value": 0.015700, "errors": 55},
{"value": 0.015750, "errors": 55},
{"value": 0.015800, "errors": 55},
{"value": 0.015850, "errors": 55},
{"value": 0.015900, "errors": 55},
{"value": 0.015950, "errors": 55},
{"value": 0.016000, "errors": 55},
{"value": 0.016050, "errors": 55},
{"value": 0.016100, "errors": 55},
{"value": 0.016150, "errors": 55},
{"value": 0.016200, "errors": 55},
{"value": 0.016250, "errors": 55},
{"value": 0.016300, "errors": 55},
{"value": 0.016350, "errors": 55},
{"value": 0.016400, "errors": 55},
{"value": 0.016450, "errors": 55},
{"value": 0.016500, "errors": 55},
{"value": 0.016550, "errors": 55},
{"value": 0.016600, "errors": 55},
{"value": 0.016650, "errors": 55},
{"value": 0.016700, "errors": 55},
{"value": 0.016750, "errors": 55},
{"value": 0.016800, "errors": 55},
{"value": 0.016850, "errors": 55},
{"value": 0.016900, "errors": 55},
{"value": 0.016950, "errors": 55},
{"value": 0.017000, "errors": 55},
{"value": 0.017050, "errors": 55},
{"value": 0.017100, "errors": 55},
{"value": 0.017150, "errors": 55},
{"value": 0.017200, "errors": 55},
{"value": 0.017250, "errors": 55},
{"value": 0.017300, "errors": 55},
{"value": 0.017350, "errors": 55},
{"value": 0.017400, "errors": 55},
{"value": 0.017450, "errors": 55},
{"value": 0.017500, "errors": 55},
{"value": 0.017550, "errors": 55},
{"value": 0.017600, "errors": 55},
{"value": 0.017650, "errors": 55},
{"value": 0.017700, "errors": 55},
{"value": 0.017750, "errors": 55},
{"value": 0.017800, "errors": 55},
{"value": 0.017850, "errors": 55},
{"value": 0.017900, "errors": 55},
{"value": 0.017950, "errors": 55},
{"value": 0.018000, "errors": 55},
{"value": 0.018050, "errors": 55},
{"value": 0.018100, "errors": 55},
{"value": 0.018150, "errors": 55},
{"value": 0.018200, "errors": 55},
{"value": 0.018250, "errors": 55},
{"value": 0.018300, "errors": 55},
{"value": 0.018350, "errors": 55},
{"value": 0.018400, "errors": 55},
{"value": 0.018450, "errors": 55},
{"value": 0.018500, "errors": 55},
{"value": 0.018550, "errors": 55},
{"value": 0.018600, "errors": 55},
{"value": 0.018650, "errors": 55},
{"value": 0.018700, "errors": 55},
{"value": 0.018750, "errors": 55},
{"value": 0.018800, "errors": 55},
{"value": 0.018850, "errors": 55},
{"value": 0.018900, "errors": 55},
{"value": 0.018950, "errors": 55},
{"value": 0.019000, "errors": 55},
{"value": 0.019050, "errors": 55},
{"value": 0.019100, "errors": 55},
{"value": 0.019150, "errors": 55},
{"value": 0.019200, "errors": 55},
{"value": 0.019250, "errors": 55},
{"value": 0.019300, "errors": 55},
{"value": 0.019350, "errors": 55},
{"value": 0.019400, "errors": 55},
{"value": 0.019450, "errors": 55},
{"value": 0.019500, "errors": 55},
{"value": 0.019550, "errors": 55},
{"value": 0.019600, "errors": 55},
{"value": 0.019650, "errors": 55},
{"value": 0.019700, "errors": 55},
{"value": 0.019750, "errors": 55},
{"value": 0.019800, "errors": 55},
{"value": 0.019850, "errors": 55},
{"value": 0.019900, "errors": 55},
{"value": 0.019950, "errors": 55},
{"value": 0.020000, "errors": 55},
{"value": 0.020050, "errors": 55},
{"value": 0.020100, "errors": 55},
{"value": 0.020150, "errors": 55},
{"value": 0.020200, "errors": 55},
{"value": 0.020250, "errors": 55},
{"value": 0.020300, "errors": 55},
{"value": 0.020350, "errors": 56},
{"value": 0.020400, "errors": 56},
{"value": 0.020450, "errors": 56},
{"value": 0.020500, "errors": 56},
{"value": 0.020550, "errors": 56},
{"value": 0.020600, "errors": 56},
{"value": 0.020650, "errors": 56},
{"value": 0.020700, "errors": 56},
{"value": 0.020750, "errors": 56},
{"value": 0.020800, "errors": 56},
{"value": 0.020850, "errors": 56},
{"value": 0.020900, "errors": 56},
{"value": 0.020950, "errors": 56},
{"value": 0.021000, "errors": 56},
{"value": 0.021050, "errors": 56},
{"value": 0.021100, "errors": 56},
{"value": 0.021150, "errors": 56},
{"value": 0.021200, "errors": 56},
{"value": 0.021250, "errors": 56},
{"value": 0.021300, "errors": 56},
{"value": 0.021350, "errors": 56},
{"value": 0.021400, "errors": 56},
{"value": 0.021450, "errors": 56},
{"value": 0.021500, "errors": 56},
{"value": 0.021550, "errors": 56},
{"value": 0.021600, "errors": 56},
{"value": 0.021650, "errors": 56},
{"value": 0.021700, "errors": 56},
{"value": 0.021750, "errors": 56},
{"value": 0.021800, "errors": 56},
{"value": 0.021850, "errors": 56},
{"value": 0.021900, "errors": 56},
{"value": 0.021950, "errors": 56},
{"value": 0.022000, "errors": 56},
{"value": 0.022050, "errors": 56},
{"value": 0.022100, "errors": 56},
{"value": 0.022150, "errors": 56},
{"value": 0.022200, "errors": 56},
{"value": 0.022250, "errors": 56},
{"value": 0.022300, "errors": 56},
{"value": 0.022350, "errors": 56},
{"value": 0.022400, "errors": 56},
{"value": 0.022450, "errors": 56},
{"value": 0.022500, "errors": 56},
{"value": 0.022550, "errors": 56},
{"value": 0.022600, "errors": 56},
{"value": 0.022650, "errors": 56},
{"value": 0.022700, "errors": 56},
{"value": 0.022750, "errors": 56},
{"value": 0.022800, "errors": 56},
{"value": 0.022850, "errors": 56},
{"value": 0.022900, "errors": 56},
{"value": 0.022950, "errors": 56},
{"value": 0.023000, "errors": 56},
{"value": 0.023050, "errors": 56},
{"value": 0.023100, "errors": 56},
{"value": 0.023150, "errors": 56},
{"value": 0.023200, "errors": 56},
{"value": 0.023250, "errors": 56},
{"value": 0.023300, "errors": 56},
{"value": 0.023350, "errors": 56},
{"value": 0.023400, "errors": 56},
{"value": 0.023450, "errors": 56},
{"value": 0.023500, "errors": 56},
{"value": 0.023550, "errors": 56},
{"value": 0.023600, "errors": 56},
{"value": 0.023650, "errors": 56},
{"value": 0.023700, "errors": 56},
{"value": 0.023750, "errors": 56},
{"value": 0.023800, "errors": 56},
{"value": 0.023850, "errors": 56},
{"value": 0.023900, "errors": 56},
{"value": 0.023950, "errors": 56},
{"value": 0.024000, "errors": 56},
{"value": 0.024050, "errors": 56},
{"value": 0.024100, "errors": 56},
{"value": 0.024150, "errors": 56},
{"value": 0.024200, "errors": 56},
{"value": 0.024250, "errors": 56},
{"value": 0.024300, "errors": 56},
{"value": 0.024350, "errors": 56},
{"value": 0.024400, "errors": 56},
{"value": 0.024450, "errors": 56},
{"value": 0.024500, "errors": 56},
{"value": 0.024550, "errors": 56},
{"value": 0.024600, "errors": 56},
{"value": 0.024650, "errors": 56},
{"value": 0.024700, "errors": 56},
{"value": 0.024750, "errors": 56},
{"value": 0.024800, "errors": 56},
{"value": 0.024850, "errors": 56},
{"value": 0.024900, "errors": 56},
{"value": 0.024950, "errors": 56},
{"value": 0.025000, "errors": 56},
{"value": 0.025050, "errors": 56},
{"value": 0.025100, "errors": 56},
{"value": 0.025150, "errors": 56},
{"value": 0.025200, "errors": 56},
{"value": 0.025250, "errors": 56},
{"value": 0.025300, "errors": 56},
{"value": 0.025350, "errors": 56},
{"value": 0.025400, "errors": 56},
{"value": 0.025450, "errors": 56},
{"value": 0.025500, "errors": 56},
{"value": 0.025550, "errors": 56},
{"value": 0.025600, "errors": 56},
{"value": 0.025650, "errors": 56},
{"value": 0.025700, "errors": 56},
{"value": 0.025750, "errors": 56},
{"value": 0.025800, "errors": 56},
{"value": 0.025850, "errors": 56},
{"value": 0.025900, "errors": 56},
{"value": 0.025950, "errors": 56},
{"value": 0.026000, "errors": 56},
{"value": 0.026050, "errors": 56},
{"value": 0.026100, "errors": 56},
{"value": 0.026150, "errors": 56},
{"value": 0.026200, "errors": 56},
{"value": 0.026250, "errors": 56},
{"value": 0.026300, "errors": 56},
{"value": 0.026350, "errors": 56},
{"value": 0.026400, "errors": 56},
{"value": 0.026450, "errors": 56},
{"value": 0.026500, "errors": 56},
{"value": 0.026550, "errors": 56},
{"value": 0.026600, "errors": 56},
{"value": 0.026650, "errors": 56},
{"value": 0.026700, "errors": 56},
{"value": 0.026750, "errors": 56},
{"value": 0.026800, "errors": 56},
{"value": 0.026850, "errors": 56},
{"value": 0.026900, "errors": 56},
{"value": 0.026950, "errors": 56},
{"value": 0.027000, "errors": 56},
{"value": 0.027050, "errors": 56},
{"value": 0.027100, "errors": 56},
{"value": 0.027150, "errors": 56},
{"value": 0.027200, "errors": 56},
{"value": 0.027250, "errors": 56},
{"value": 0.027300, "errors": 56},
{"value": 0.027350, "errors": 56},
{"value": 0.027400, "errors": 56},
{"value": 0.027450, "errors": 56},
{"value": 0.027500, "errors": 56},
{"value": 0.027550, "errors": 56},
{"value": 0.027600, "errors": 56},
{"value": 0.027650, "errors": 56},
{"value": 0.027700, "errors": 56},
{"value": 0.027750, "errors": 56},
{"value": 0.027800, "errors": 56},
{"value": 0.027850, "errors": 56},
{"value": 0.027900, "errors": 56},
{"value": 0.027950, "errors": 56},
{"value": 0.028000, "errors": 56},
{"value": 0.028050, "errors": 56},
{"value": 0.028100, "errors": 56},
{"value": 0.028150, "errors": 56},
{"value": 0.028200, "errors": 56},
{"value": 0.028250, "errors": 56},
{"value": 0.028300, "errors": 56},
{"value": 0.028350, "errors": 56},
{"value": 0.028400, "errors": 56},
{"value": 0.028450, "errors": 56},
{"value": 0.028500, "errors": 56},
{"value": 0.028550, "errors": 56},
{"value": 0.028600, "errors": 56},
{"value": 0.028650, "errors": 56},
{"value": 0.028700, "errors": 56},
{"value": 0.028750, "errors": 56},
{"value": 0.028800, "errors": 56},
{"value": 0.028850, "errors": 56},
{"value": 0.028900, "errors": 56},
{"value": 0.028950, "errors": 56},
{"value": 0.029000, "errors": 56},
{"value": 0.029050, "errors": 56},
{"value": 0.029100, "errors": 56},
{"value": 0.029150, "errors": 56},
{"value": 0.029200, "errors": 56},
{"value": 0.029250, "errors": 56},
{"value": 0.029300, "errors": 56},
{"value": 0.029350, "errors": 56},
{"value": 0.029400, "errors": 56},
{"value": 0.029450, "errors": 56},
{"value": 0.029500, "errors": 56},
{"value": 0.029550, "errors": 56},
{"value": 0.029600, "errors": 56},
{"value": 0.029650, "errors": 56},
{"value": 0.029700, "errors": 56},
{"value": 0.029750, "errors": 56},
{"value": 0.029800, "errors": 56},
{"value": 0.029850, "errors": 56},
{"value": 0.029900, "errors": 56},
{"value": 0.029950, "errors": 56},
{"value": 0.030000, "errors": 56},
{"value": 0.030050, "errors": 56},
{"value": 0.030100, "errors": 56},
{"value": 0.030150, "errors": 56},
{"value": 0.030200, "errors": 56},
{"value": 0.030250, "errors": 56},
{"value": 0.030300, "errors": 56},
{"value": 0.030350, "errors": 56},
{"value": 0.030400, "errors": 56},
{"value": 0.030450, "errors": 56},
{"value": 0.030500, "errors": 56},
{"value": 0.030550, "errors": 56},
{"value": 0.030600, "errors": 56},
{"value": 0.030650, "errors": 56},
{"value": 0.030700, "errors": 56},
{"value": 0.030750, "errors": 56},
{"value": 0.030800, "errors": 56},
{"value": 0.030850, "errors": 56},
{"value": 0.031050, "errors": 56},
{"value": 0.031550, "errors": 56},
{"value": 0.032050, "errors": 56},
{"value": 0.032550, "errors": 56},
{"value": 0.033050, "errors": 56},
{"value": 0.033550, "errors": 56},
{"value": 0.034050, "errors": 56},
{"value": 0.034550, "errors": 56},
{"value": 0.035050, "errors": 56},
{"value": 0.035550, "errors": 56},
{"value": 0.036050, "errors": 56},
{"value": 0.036550, "errors": 56},
{"value": 0.037050, "errors": 56},
{"value": 0.037550, "errors": 56},
{"value": 0.038050, "errors": 56},
{"value": 0.038550, "errors": 56},
{"value": 0.039050, "errors": 56},
{"value": 0.039550, "errors": 56},
{"value": 0.040050, "errors": 56},
{"value": 0.040550, "errors": 56},
{"value": 0.041050, "errors": 60},
{"value": 0.041550, "errors": 60},
{"value": 0.042050, "errors": 60},
{"value": 0.042550, "errors": 60},
{"value": 0.043050, "errors": 60},
{"value": 0.043550, "errors": 60},
{"value": 0.044050, "errors": 60},
{"value": 0.044550, "errors": 60},
{"value": 0.045050, "errors": 60},
{"value": 0.045550, "errors": 60},
{"value": 0.046050, "errors": 60},
{"value": 0.046550, "errors": 60},
{"value": 0.047050, "errors": 60},
{"value": 0.047550, "errors": 60},
{"value": 0.048050, "errors": 60},
{"value": 0.048550, "errors": 60},
{"value": 0.049050, "errors": 60},
{"value": 0.049550, "errors": 60},
{"value": 0.050050, "errors": 60},
{"value": 0.050550, "errors": 60},
{"value": 0.051050, "errors": 60},
{"value": 0.051550, "errors": 60},
{"value": 0.052050, "errors": 60},
{"value": 0.052550, "errors": 60},
{"value": 0.053050, "errors": 60},
{"value": 0.053550, "errors": 60},
{"value": 0.054050, "errors": 60},
{"value": 0.054550, "errors": 60},
{"value": 0.055050, "errors": 60},
{"value": 0.055550, "errors": 60},
{"value": 0.056050, "errors": 60},
{"value": 0.056550, "errors": 60},
{"value": 0.057050, "errors": 60},
{"value": 0.057550, "errors": 60},
{"value": 0.058050, "errors": 60},
{"value": 0.058550, "errors": 60},
{"value": 0.059050, "errors": 60},
{"value": 0.059550, "errors": 60},
{"value": 0.060050, "errors": 60},
{"value": 0.060550, "errors": 60},
{"value": 0.061050, "errors": 60},
{"value": 0.061550, "errors": 60},
{"value": 0.062050, "errors": 60},
{"value": 0.062550, "errors": 60},
{"value": 0.063050, "errors": 60},
{"value": 0.063550, "errors": 60},
{"value": 0.064050, "errors": 60},
{"value": 0.064550, "errors": 60},
{"value": 0.065050, "errors": 60},
{"value": 0.065550, "errors": 60},
{"value": 0.066050, "errors": 60},
{"value": 0.066550, "errors": 60},
{"value": 0.067050, "errors": 60},
{"value": 0.067550, "errors": 60},
{"value": 0.068050, "errors": 60},
{"value": 0.068550, "errors": 60},
{"value": 0.069050, "errors": 60},
{"value": 0.069550, "errors": 60},
{"value": 0.070050, "errors": 60},
{"value": 0.070550, "errors": 60},
{"value": 0.071050, "errors": 60},
{"value": 0.071550, "errors": 60},
{"value": 0.072050, "errors": 60},
{"value": 0.072550, "errors": 60},
{"value": 0.073050, "errors": 60},
{"value": 0.073550, "errors": 60},
{"value": 0.074050, "errors": 60},
{"value": 0.074550, "errors": 60},
{"value": 0.075050, "errors": 60},
{"value": 0.075550, "errors": 60},
{"value": 0.076050, "errors": 60},
{"value": 0.076550, "errors": 60},
{"value": 0.077050, "errors": 60},
{"value": 0.077550, "errors": 60},
{"value": 0.078050, "errors": 60},
{"value": 0.078550, "errors": 60},
{"value": 0.079050, "errors": 60},
{"value": 0.079550, "errors": 60},
{"value": 0.080050, "errors": 60},
{"value": 0.080550, "errors": 60},
{"value": 0.081050, "errors": 60},
{"value": 0.081550, "errors": 65},
{"value": 0.082050, "errors": 65},
{"value": 0.082550, "errors": 65},
{"value": 0.083050, "errors": 65},
{"value": 0.083550, "errors": 65},
{"value": 0.084050, "errors": 65},
{"value": 0.084550, "errors": 65},
{"value": 0.085050, "errors": 65},
{"value": 0.085550, "errors": 65},
{"value": 0.086050, "errors": 65},
{"value": 0.086550, "errors": 65},
{"value": 0.087050, "errors": 65},
{"value": 0.087550, "errors": 65},
{"value": 0.088050, "errors": 65},
{"value": 0.088550, "errors": 65},
{"value": 0.089050, "errors": 65},
{"value": 0.089550, "errors": 65},
{"value": 0.090050, "errors": 65},
{"value": 0.090550, "errors": 65},
{"value": 0.091050, "errors": 65},
{"value": 0.091550, "errors": 65},
{"value": 0.092050, "errors": 65},
{"value": 0.092550, "errors": 65},
{"value": 0.093050, "errors": 65},
{"value": 0.093550, "errors": 65},
{"value": 0.094050, "errors": 65},
{"value": 0.094550, "errors": 65},
{"value": 0.095050, "errors": 65},
{"value": 0.095550, "errors": 65},
{"value": 0.096050, "errors": 65},
{"value": 0.096550, "errors": 65},
{"value": 0.097050, "errors": 65},
{"value": 0.097550, "errors": 65},
{"value": 0.098050, "errors": 65},
{"value": 0.098550, "errors": 65},
{"value": 0.099050, "errors": 65},
{"value": 0.099550, "errors": 65},
{"value": 0.100050, "errors": 65},
{"value": 0.100550, "errors": 65},
{"value": 0.101050, "errors": 65},
{"value": 0.101550, "errors": 65},
{"value": 0.102050, "errors": 65},
{"value": 0.102550, "errors": 65},
{"value": 0.103050, "errors": 65},
{"value": 0.103550, "errors": 65},
{"value": 0.104050, "errors": 65},
{"value": 0.104550, "errors": 65},
{"value": 0.105050, "errors": 65},
{"value": 0.105550, "errors": 65},
{"value": 0.106050, "errors": 65},
{"value": 0.106550, "errors": 65},
{"value": 0.107050, "errors": 65},
{"value": 0.107550, "errors": 65},
{"value": 0.108050, "errors": 65},
{"value": 0.108550, "errors": 65},
{"value": 0.109050, "errors": 65},
{"value": 0.109550, "errors": 65},
{"value": 0.110050, "errors": 65},
{"value": 0.110550, "errors": 65},
{"value": 0.111050, "errors": 65},
{"value": 0.111550, "errors": 65},
{"value": 0.112050, "errors": 65},
{"value": 0.112550, "errors": 65},
{"value": 0.113050, "errors": 65},
{"value": 0.113550, "errors": 65},
{"value": 0.114050, "errors": 65},
{"value": 0.114550, "errors": 65},
{"value": 0.115050, "errors": 65},
{"value": 0.115550, "errors": 65},
{"value": 0.116050, "errors": 65},
{"value": 0.116550, "errors": 65},
{"value": 0.117050, "errors": 65},
{"value": 0.117550, "errors": 65},
{"value": 0.118050, "errors": 65},
{"value": 0.118550, "errors": 65},
{"value": 0.119050, "errors": 65},
{"value": 0.119550, "errors": 65},
{"value": 0.120050, "errors": 65},
{"value": 0.120550, "errors": 65},
{"value": 0.121050, "errors": 65},
{"value": 0.121550, "errors": 65},
{"value": 0.122050, "errors": 65},
{"value": 0.122550, "errors": 65},
{"value": 0.123050, "errors": 65},
{"value": 0.123550, "errors": 65},
{"value": 0.124050, "errors": 65},
{"value": 0.124550, "errors": 65},
{"value": 0.125050, "errors": 65},
{"value": 0.125550, "errors": 65},
{"value": 0.126050, "errors": 65},
{"value": 0.126550, "errors": 65},
{"value": 0.127050, "errors": 65},
{"value": 0.127550, "errors": 65},
{"value": 0.128050, "errors": 65},
{"value": 0.128550, "errors": 65},
{"value": 0.129050, "errors": 65},
{"value": 0.129550, "errors": 65},
{"value": 0.130050, "errors": 65},
{"value": 0.130550, "errors": 65},
{"value": 0.131050, "errors": 65},
{"value": 0.131550, "errors": 65},
{"value": 0.132050, "errors": 65},
{"value": 0.132550, "errors": 65},
{"value": 0.133050, "errors": 65},
{"value": 0.133550, "errors": 65},
{"value": 0.134050, "errors": 65},
{"value": 0.134550, "errors": 65},
{"value": 0.135050, "errors": 65},
{"value": 0.135550, "errors": 65},
{"value": 0.136050, "errors": 65},
{"value": 0.136550, "errors": 65},
{"value": 0.137050, "errors": 65},
{"value": 0.137550, "errors": 65},
{"value": 0.138050, "errors": 65},
{"value": 0.138550, "errors": 65},
{"value": 0.139050, "errors": 65},
{"value": 0.139550, "errors": 65},
{"value": 0.140050, "errors": 65},
{"value": 0.140550, "errors": 65},
{"value": 0.141050, "errors": 65},
{"value": 0.141550, "errors": 65},
{"value": 0.142050, "errors": 65},
{"value": 0.142550, "errors": 65},
{"value": 0.143050, "errors": 65},
{"value": 0.143550, "errors": 65},
{"value": 0.144050, "errors": 65},
{"value": 0.144550, "errors": 65},
{"value": 0.145050, "errors": 65},
]
x = []
y = []
for value in data:
x.append(value["value"])
y.append(value["errors"])
from pandas import *
d = {"x": x, "y": y}
df = DataFrame(d)
import matplotlib.pyplot as plt
from pandas.tools.rplot import *
plt.plot(x, y, 'ro')
plt.ylabel('errors')
plt.xlabel('splitter_characters_grouper_search_step')
plt.title('splitter_characters_grouper_search_step vs errors count')
polynomial = Polynomial(x, y, 5)
new_x = []
new_y = []
current_x = 0.
while current_x < 0.15:
new_x.append(current_x)
new_y.append(polynomial.getval(current_x))
current_x += 0.00005
plt.plot(new_x, new_y, 'ro')
print (polynomial.getval(0.)) | mit |
dgwakeman/mne-python | mne/viz/tests/test_utils.py | 12 | 2643 | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: Simplified BSD
import os.path as op
import warnings
import numpy as np
from nose.tools import assert_true, assert_raises
from numpy.testing import assert_allclose
from mne.viz.utils import compare_fiff, _fake_click
from mne.viz import ClickableImage, add_background_image, mne_analyze_colormap
from mne.utils import run_tests_if_main
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
def test_mne_analyze_colormap():
"""Test mne_analyze_colormap
"""
assert_raises(ValueError, mne_analyze_colormap, [0])
assert_raises(ValueError, mne_analyze_colormap, [-1, 1, 2])
assert_raises(ValueError, mne_analyze_colormap, [0, 2, 1])
def test_compare_fiff():
import matplotlib.pyplot as plt
compare_fiff(raw_fname, cov_fname, read_limit=0, show=False)
plt.close('all')
def test_clickable_image():
"""Test the ClickableImage class."""
# Gen data and create clickable image
import matplotlib.pyplot as plt
im = np.random.randn(100, 100)
clk = ClickableImage(im)
clicks = [(12, 8), (46, 48), (10, 24)]
# Generate clicks
for click in clicks:
_fake_click(clk.fig, clk.ax, click, xform='data')
assert_allclose(np.array(clicks), np.array(clk.coords))
assert_true(len(clicks) == len(clk.coords))
# Exporting to layout
lt = clk.to_layout()
assert_true(lt.pos.shape[0] == len(clicks))
assert_allclose(lt.pos[1, 0] / lt.pos[2, 0],
clicks[1][0] / float(clicks[2][0]))
clk.plot_clicks()
plt.close('all')
def test_add_background_image():
"""Test adding background image to a figure."""
import matplotlib.pyplot as plt
f, axs = plt.subplots(1, 2)
x, y = np.random.randn(2, 10)
im = np.random.randn(10, 10)
axs[0].scatter(x, y)
axs[1].scatter(y, x)
for ax in axs:
ax.set_aspect(1)
# Background without changing aspect
ax_im = add_background_image(f, im)
assert_true(ax_im.get_aspect() == 'auto')
for ax in axs:
assert_true(ax.get_aspect() == 1)
# Background with changing aspect
ax_im_asp = add_background_image(f, im, set_ratios='auto')
assert_true(ax_im_asp.get_aspect() == 'auto')
for ax in axs:
assert_true(ax.get_aspect() == 'auto')
run_tests_if_main()
| bsd-3-clause |
ivastar/clear | survey_paper/flux_snr_exposuretime.py | 1 | 3427 | import astropy
from astropy.io import fits, ascii
import matplotlib.pyplot as plt
from astropy.table import Table, join
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import numpy as np
import matplotlib.colors as colors
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
new_cmap = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
plt.rcParams['text.usetex'] = True
plt.close('all')
grizli_cat_dir = '/Users/rsimons/Dropbox/clear/grizli_extractions_v3.0/grizli_v3.0_cats'
fig, axes = plt.subplots(2, 3, figsize = (12., 7.))
lam_bins_G102 = [(0.8, 0.9), (0.9, 1.0), (1.0, 1.15)]
lam_bins_G141 = [(1.15, 1.35), (1.35, 1.55), (1.55, 1.75)]
lines = [('Ha', 0.6563), ('OIII', 0.5007), ('Hb', 0.4861), ('OII', 0.3728)]
lam_bins_both = [lam_bins_G102, lam_bins_G141]
for g in np.arange(2):
lam_bins = lam_bins_both[g]
if g == 0:
t_str = 'G102'
cm = plt.cm.Blues_r
cm = truncate_colormap(cm, 0., 0.8)
(vmn, vmx) = (0., 15.)
if g == 1:
t_str = 'G141'
cm = plt.cm.Reds_r
cm = truncate_colormap(cm, 0., 0.8)
(vmn, vmx) = (0., 6.)
#cm = plt.cm.viridis
for f in ['S', 'N']:
cat = fits.open(grizli_cat_dir + '/' + 'GD%s_lines_grizli_master.fits'%f)
cat = cat[1].data
for ll in np.arange(len(lam_bins)):
ax = axes[g, ll]
(lmin, lmax) = lam_bins[ll]
ax.annotate('%.1f '%lmin + r'$<\lambda_{\text{obs}}<$' + ' %.1f'%lmax, (0.95, 0.05), \
xycoords = 'axes fraction', ha = 'right', va = 'bottom', fontsize = 16, fontweight = 'bold')
for line in lines:
lam_obs = line[-1] * (1+cat['z_MAP'])
gd = np.where((lam_obs > lmin) & (lam_obs < lmax) & (cat['%s_FLUX'%line[0]]> 0.))[0]
x = cat['%s_FLUX'%line[0]][gd] * 1.e-17
y = cat['%s_FLUX'%line[0]][gd]/cat['%s_FLUX_ERR'%line[0]][gd]
T_exp = cat['T_%s'%t_str][gd]/3600.
scat = ax.scatter(x, y, c = T_exp, s = 4., cmap = cm, \
norm = plt.Normalize(vmin = vmn, vmax = vmx), zorder = 10., \
edgecolor = 'black', linewidths = 0.1)
if (f == 'S'):# & (zz == 3):
#cax = fig.add_axes([0.10, 0.93, 0.15, 0.02])
bbox = (0.05, 0., 0.9, 0.98)
cax = inset_axes(ax, width="40%", height="5%", loc=2, bbox_to_anchor = bbox, bbox_transform = ax.transAxes)
cbar = plt.colorbar(scat, cax = cax, orientation = 'horizontal')
cbar.ax.tick_params(labelsize = 8)
cbar.set_label('%s Exposure Time (hr)'%t_str, fontsize = 10, labelpad = 2.)
for ax in axes.ravel():
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_ylim(0.1, 200)
ax.set_xlim(2.e-18, 2.e-15)
ax.set_yticks([1, 10, 100])
ax.set_yticklabels(['1', '10', '100'])
ax.plot([2.e-18, 2.e-15], [0.2, 200], 'k-', zorder = 1, alpha = 0.4)
for ax in axes[1]:
ax.set_xlabel('Line Flux (erg s$^{-1}$ cm$^{-2}$)')
for ax in axes[:,0]:
ax.set_ylabel('S/N')
fig.tight_layout()
fig.savefig('/Users/rsimons/Dropbox/clear/figures/survey_paper/flux_snr_exptime.png', dpi = 300)
| mit |
cpcloud/bokeh | bokeh/pivot_table.py | 3 | 8403 | from pandas import Series, DataFrame
from pandas.core.index import MultiIndex
from pandas.tools.merge import concat
from pandas.tools.util import cartesian_product
from pandas.compat import range, lrange, zip
from pandas import compat
import numpy as np
from six import string_types, iteritems
_aggregates = {
"count": len,
"counta": np.count_nonzero,
"countunique": lambda arr: len(np.unique(arr)),
"average": np.average,
"max": np.max,
"min": np.min,
"median": np.median,
"sum": np.sum,
"product": np.product,
"stdev": np.std,
"var": np.var,
}
def pivot_table(data, values=[], rows=[], cols=[], aggfunc=None, fill_value=0):
"""
Create a spreadsheet-style pivot table as a DataFrame. The levels in the
pivot table will be stored in MultiIndex objects (hierarchical indexes) on
the index and columns of the result DataFrame
Parameters
----------
data : DataFrame
values : column to aggregate, optional
rows : list of column names or arrays to group on
Keys to group on the x-axis of the pivot table
cols : list of column names or arrays to group on
Keys to group on the y-axis of the pivot table
aggfunc : function, default numpy.mean, or list of functions
If list of functions passed, the resulting pivot table will have
hierarchical columns whose top level are the function names (inferred
from the function objects themselves)
fill_value : scalar, default None
Value to replace missing values with
margins : boolean, default False
Add all row / columns (e.g. for subtotal / grand totals)
Examples
--------
>>> df
A B C D
0 foo one small 1
1 foo one large 2
2 foo one large 2
3 foo two small 3
4 foo two small 3
5 bar one large 4
6 bar one small 5
7 bar two small 6
8 bar two large 7
>>> table = pivot_table(df, values='D', rows=['A', 'B'],
... cols=['C'], aggfunc=np.sum)
>>> table
small large
foo one 1 4
two 6 NaN
bar one 5 4
two 6 7
Returns
-------
table : DataFrame
"""
assert len(values) <= 1
rows = _convert_by(rows)
cols = _convert_by(cols)
keys = rows + cols
if aggfunc is None:
aggfunc = len
elif isinstance(aggfunc, string_types):
aggfunc = _aggregates[aggfunc]
to_filter = []
for x in keys + values:
try:
if x in data:
to_filter.append(x)
except TypeError:
pass
if len(to_filter) < len(data.columns):
data = data[to_filter]
grouped = data.groupby(keys)
agged = grouped.agg(aggfunc)
if agged.index.nlevels > 1:
to_unstack = [ agged.index.names[i] for i in range(len(rows), len(keys)) ]
table = agged.unstack(to_unstack)
else:
table = agged
if isinstance(table, DataFrame):
if isinstance(table.columns, MultiIndex):
table = table.sortlevel(axis=1)
else:
table = table.sort_index(axis=1)
if fill_value is not None:
table = table.fillna(value=fill_value, downcast='infer')
table = _add_margins(table, data, values, rows=rows, cols=cols, aggfunc=aggfunc)
if rows and cols:
pass
elif rows:
pass
elif cols:
pass
else:
pass
if len(rows) == 0 and len(cols) > 0:
table = table.T
return table
def _add_margins(table, data, values, rows, cols, aggfunc):
grand_margin = _compute_grand_margin(data, values, aggfunc)
if not values and isinstance(table, Series):
# If there are no values and the table is a series, then there is only
# one column in the data. Compute grand margin and return it.
row_key = ('All',) + ('',) * (len(rows) - 1) if len(rows) > 1 else 'All'
return table.append(Series({row_key: grand_margin['All']}))
if values:
marginal_result_set = _generate_marginal_results(table, data, values, rows, cols, aggfunc, grand_margin)
if not isinstance(marginal_result_set, tuple):
return marginal_result_set
result, margin_keys, row_margin = marginal_result_set
else:
marginal_result_set = _generate_marginal_results_without_values(table, data, rows, cols, aggfunc)
if not isinstance(marginal_result_set, tuple):
return marginal_result_set
result, margin_keys, row_margin = marginal_result_set
key = ('All',) + ('',) * (len(rows) - 1) if len(rows) > 1 else 'All'
row_margin = row_margin.reindex(result.columns)
# populate grand margin
for k in margin_keys:
if isinstance(k, compat.string_types):
row_margin[k] = grand_margin[k]
else:
row_margin[k] = grand_margin[k[0]]
margin_dummy = DataFrame(row_margin, columns=[key]).T
row_names = result.index.names
result = result.append(margin_dummy)
result.index.names = row_names
return result
def _compute_grand_margin(data, values, aggfunc):
if values:
grand_margin = {}
for k, v in iteritems(data[values]):
try:
if isinstance(aggfunc, compat.string_types):
grand_margin[k] = getattr(v, aggfunc)()
else:
grand_margin[k] = aggfunc(v)
except TypeError:
pass
return grand_margin
else:
return {'All': aggfunc(data.index)}
def _generate_marginal_results(table, data, values, rows, cols, aggfunc, grand_margin):
if len(cols) > 0:
# need to "interleave" the margins
table_pieces = []
margin_keys = []
def _all_key(key):
return (key, 'All') + ('',) * (len(cols) - 1)
if len(rows) > 0:
margin = data[rows + values].groupby(rows).agg(aggfunc)
cat_axis = 1
for key, piece in table.groupby(level=0, axis=cat_axis):
all_key = _all_key(key)
piece[all_key] = margin[key]
table_pieces.append(piece)
margin_keys.append(all_key)
else:
margin = grand_margin
cat_axis = 0
for key, piece in table.groupby(level=0, axis=cat_axis):
all_key = _all_key(key)
table_pieces.append(piece)
table_pieces.append(Series(margin[key], index=[all_key]))
margin_keys.append(all_key)
result = concat(table_pieces, axis=cat_axis)
if len(rows) == 0:
return result
else:
result = table
margin_keys = table.columns
if len(cols) > 0:
row_margin = data[cols + values].groupby(cols).agg(aggfunc)
row_margin = row_margin.stack()
# slight hack
new_order = [len(cols)] + lrange(len(cols))
row_margin.index = row_margin.index.reorder_levels(new_order)
else:
row_margin = Series(np.nan, index=result.columns)
return result, margin_keys, row_margin
def _generate_marginal_results_without_values(table, data, rows, cols, aggfunc):
if len(cols) > 0:
# need to "interleave" the margins
margin_keys = []
def _all_key():
if len(cols) == 1:
return 'All'
return ('All', ) + ('', ) * (len(cols) - 1)
if len(rows) > 0:
margin = data[rows].groupby(rows).apply(aggfunc)
all_key = _all_key()
table[all_key] = margin
result = table
margin_keys.append(all_key)
else:
margin = data.groupby(level=0, axis=0).apply(aggfunc)
all_key = _all_key()
table[all_key] = margin
result = table
margin_keys.append(all_key)
return result
else:
result = table
margin_keys = table.columns
if len(cols):
row_margin = data[cols].groupby(cols).apply(aggfunc)
else:
row_margin = Series(np.nan, index=result.columns)
return result, margin_keys, row_margin
def _convert_by(by):
if by is None:
by = []
elif (np.isscalar(by) or isinstance(by, (np.ndarray, Series))
or hasattr(by, '__call__')):
by = [by]
else:
by = list(by)
return by
| bsd-3-clause |
mkenworthy/exorings | plot_fig4.py | 1 | 5681 | import sys, getopt
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.patches import Rectangle
from astropy.io import ascii
from scipy.interpolate import interp1d
import exorings3 as exorings
# set sensible imshow defaults
mpl.rc('image', interpolation='nearest', origin='lower', cmap='gray')
# no scientific notation for numbers on plots
mpl.rc('axes.formatter', limits=(-7, 7))
# use latex for labelling
mpl.rc('text', usetex=True)
mpl.rc('font', family='serif')
# load in J1407 binned photometry curve
tin = ascii.read("j1407_bincc.dat")
time = tin['time']
flux = tin['flux']
flux_err = tin['flux_rms']
# 54160 to 54300
goodp = (time > 54160) * (time < 54300)
flux_err = flux_err[goodp]
flux = flux[goodp]
time = time[goodp]
print ('number of photometric points: %d' % time.size)
vstar = -1.
try:
opts, args = getopt.getopt(sys.argv[1:], "hr:o:s:", ["rfile=", "ofile=", "vstar="])
except getopt.GetoptError:
print ('%s -s <velocity> -r <inputfile> -o <outputfile>' % sys.argv[0])
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print( help)
sys.exit()
elif opt in ("-r", "--rfile"):
fitsin = arg
elif opt in ("-o", "--ofile"):
plotout = arg
elif opt in ("-s", "--vstar"):
vstar = np.array(float(arg))
print ('ring file in is %s' % fitsin)
print ('plot file out is %s' % plotout)
(res, taun_rings, rad_rings, dstar) = exorings.read_ring_fits(fitsin)
exorings.print_ring_tx(rad_rings, exorings.y_to_tx(taun_rings))
# set up stellar disk
kern = exorings.make_star_limbd(21, 0.8)
# produce fine grained gradient and ring values
samp_t = np.arange(-100, 100, 0.001) + 54222.
(samp_r, samp_g) = exorings.ring_grad_line(samp_t, res[0], res[1], res[2], res[3])
hjd_minr = samp_t[np.argmin(samp_g)]
hjd_to_ring = interp1d(samp_t, samp_r, kind='linear')
sst = exorings.print_disk_parameters(res, hjd_minr, samp_r)
## Calculate the best model fit given the rings and disk parameters
strip, convo, g = exorings.ellipse_strip(rad_rings, exorings.y_to_tx(taun_rings), \
res[0], res[1], res[2], res[3], kern, dstar)
fit_time = g[0]
fit_flux = g[1]
### BEGIN THE PLOT ##################################################
datacolor = 'red'
modelcolor = 'green'
eb = dict(fmt='.', color=datacolor, ecolor=datacolor, capsize=0.0, \
marker='o', mfc=datacolor, mec=datacolor, ms=3, mew=0.001, \
elinewidth=0.5)
smalleb = dict(fmt='o', color='white', ecolor=datacolor, capsize=0.0, \
marker='o', mfc='white', mec=datacolor, ms=4, mew=1, elinewidth=2.0)
mdict = dict(color=modelcolor, zorder=-5)
ty = dict(color='black', fontsize=10, fontweight='bold', va='top', ha='right')
# set up plot area
fig = plt.figure(figsize=(10, 12))
# split into two panels - the top with the light curve and model
# fit and the bottom with the zoomed in plots
#gs = gridspec.GridSpec(2, 1, height_ratios=[1, 4], wspace=0.0, hspace=0.05)
gs = fig.add_gridspec(2, 1, height_ratios=[1, 4], wspace=0.0, hspace=0.05)
ax1 = plt.subplot(gs[0, :])
# the J1407 photometry
ax1.errorbar(time, flux, flux_err, zorder=-4, **eb)
# the ring model
ax1.plot(fit_time, fit_flux, **mdict)
ax1.axis((54180., 54260, 0., 1.19))
ax1.ticklabel_format(style='plain', useOffset=False, axis='x', scilimits=(-5, 10))
ax1.set_xlabel("Time [days]")
ax1.xaxis.set_label_position('top')
ax1.xaxis.tick_top()
# the vertical line marking t_tangential
ax1.vlines(hjd_minr, -1., 2., colors='k', linestyle='dashed')
# array of days that we want a zoom into
dt = np.array((-23, -22, -17, -16, -15, -14, -11, -10, -9, -8, -7, -6, \
+3, +5, +9, +10, +11, +24))
dt += 1
# disk parameters as a latex table
ax1.text(0.17, 0.60, sst, transform=ax1.transAxes, **ty)
# xdet and ydet are the sizes of the zoomed boxes
ep_zoom = 0.5
y_zoom = 0.4
fiddle_time = 0.3
import matplotlib.gridspec as gridspec
og = gs[1].subgridspec(3,6, wspace=0.0, hspace=0.0)
for i in np.arange(dt.size):
print ("image %d " % i)
print(i.dtype)
ep_center = hjd_minr + dt[i] + fiddle_time
ax = fig.add_subplot(og[i])
ax.errorbar(time,flux, flux_err, zorder=-4, **eb)
# first select all the pixels in that day range
# then centroid on that subset of pixels with the zoomed box
ep_day = (time < (ep_center+0.5)) * (time > (ep_center-0.5))
time_day = time[ep_day]
flux_day = flux[ep_day]
flux_err_day = flux_err[ep_day]
ax.errorbar(time_day, flux_day, flux_err_day, zorder=-3, **smalleb)
# the ring model
ax.plot(fit_time, fit_flux, linewidth=3, **mdict)
# get the center of the box from the median values of the selected
# day
day_center = np.median(time_day)
y_center = (np.max(flux_day) + np.min(flux_day))/2.
# label the top plot with a marker
ax1.scatter(day_center, 1.05, marker='v', color='k')
# corners of the zoomed box
ep_low = day_center - (ep_zoom/2.)
ep_hig = day_center + (ep_zoom/2.)
flux_low = y_center - (y_zoom/2.)
flux_hig = y_center + (y_zoom/2.)
#ax1.add_patch(Rectangle((ep_low, flux_low), ep_zoom, y_zoom, facecolor="grey",zorder=-10,linewidth=0))
if i == 0:
ax1.add_patch(Rectangle((ep_low, flux_low), ep_zoom, y_zoom, facecolor="none", zorder=-10, linewidth=1))
ax.text(0.1, 0.1, r'$\rm{width}=%4.2f$\ \rm{d}' % ep_zoom, transform=ax.transAxes)
ax.text(0.1, 0.22, r'$\rm{height}=%4.2f$\ \rm{T}' % y_zoom, transform=ax.transAxes)
ax.axis((ep_low, ep_hig, flux_low, flux_hig))
# label the delta day
ax.text(0.95, 0.95, dt[i], transform=ax.transAxes, **ty)
ax.set_xticks([])
ax.set_yticks([])
fig.savefig(plotout)
| isc |
blab/stability | augur/src/H3N2_process.py | 1 | 41159 | import time, argparse,re,os, socket
import matplotlib as mpl
if socket.gethostname() not in ['olt', 'rneher-iMac']:
mpl.use('pdf')
from virus_filter import flu_filter, fix_name
from virus_clean import virus_clean
from tree_refine import tree_refine
from tree_mutations import tree_mutations
from tree_stability import tree_stability
from tree_titer import HI_tree
from fitness_model import fitness_model
from process import process, virus_config
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.Align import MultipleSeqAlignment
import numpy as np
from itertools import izip
# HA2 AA sites are shifted by +329 relative to HA1
# So HA2:77V is 406V in HA1 numbering
virus_config.update({
# data source and sequence parsing/cleaning/processing
'virus':'H3N2',
'alignment_file': 'data/H3N2_IRD_sequence_october_clean.fasta',
# data/H3N2_gisaid_epiflu_sequence.fasta # data/H3N2_IRD_sequence_clean.fasta
'fasta_fields': {0: 'strain', 1: 'isolate_id', 3: 'passage', 5: 'date', 7: 'lab', 8: "accession"},
#'alignment_file':'data/H3N2_gisaid_epiflu_sequence.fasta',
'outgroup':'A/Beijing/32/1992',
'force_include':'data/H3N2_HI_strains.txt',
'force_include_all':False,
'date_spec':'year',
'max_global':True, # sample as evenly as possible from different geographic regions
'cds':[0,None], # define the HA1 start i n 0 numbering
'n_iqd':5,
'min_mutation_frequency':0.01,
# define relevant clades in canonical HA1 numbering (+1)
# numbering starting at HA1 start, adding sp to obtain numbering from methionine
'clade_designations': { "3c3.a":[('HA1', 128,'A'), ('HA1',142,'G'), ('HA1',159,'S')],
"3c3": [('HA1', 128,'A'), ('HA1',142,'G'), ('HA1',159,'F')],
"3c2.a": [('HA1', 144,'S'), ('HA1',159,'Y'), ('HA1',225,'D'), ('HA1', 311,'H'), ('HA2', 160,'N')],
"3c2": [('HA1', 144,'N'), ('HA1',159,'F'), ('HA1',225,'N'), ('HA2', 160,'N'), ('HA1', 142, 'R')],
"3c3.b": [('HA1', 83,'R'), ('HA1',261,'Q'), ('HA1',62,'K'), ('HA1', 122,'D')]
},
'epitope_masks_fname':'source-data/H3N2_epitope_masks.tsv',
'epitope_mask_version':'wolf',
'HI_fname':'data/H3N2_HI_titers.txt',
'html_vars': {'coloring': 'ep, ne, rb, lbi, dfreq, region, date, cHI, HI_dist',
'gtplaceholder': 'HA1 positions...',
'freqdefault': '3c2.a, 3c3.a, 3c3.b'},
'js_vars': {'LBItau': 0.0005, 'LBItime_window': 0.5, 'dfreq_dn':2},
'excluded_tables': ['NIMR_Sep2012_08.csv'], #, 'nimr-sep-2010-table8', 'nimr-sep-2010-table8','NIMR_Sep2012_11.csv'],
'layout':'auspice',
'min_aamuts': 1,
# 'predictors': ['dfreq', 'cHI'] # estimate
'predictors': { 'dfreq': [2.50, 2.84], 'cHI': [1.68, 0.45] } # fix predictor: [value, std deviation]
})
class H3N2_filter(flu_filter):
def __init__(self,min_length = 900, **kwargs):
'''
parameters
min_length -- minimal length for a sequence to be acceptable
'''
flu_filter.__init__(self, **kwargs)
self.min_length = min_length
self.vaccine_strains =[
{
"strain": "A/Wisconsin/67/2005",
"db": "IRD",
"accession": "CY163984",
"date": "2005-08-31",
"seq": "ATGAAGACTATCATTGCTTTGAGCTACATTCTATGTCTGGTTTTCGCTCAAAAACTTCCCGGAAATGACAACAGCACGGCAACGCTGTGCCTTGGGCACCATGCAGTACCAAACGGAACGATAGTGAAAACAATCACGAATGACCAAATTGAAGTTACTAATGCTACTGAGCTGGTTCAGAGTTCCTCAACAGGTGGAATATGCGACAGTCCTCATCAGATCCTTGATGGAGAAAACTGCACACTAATAGATGCTCTATTGGGAGACCCTCAGTGTGATGGCTTCCAAAATAAGAAATGGGACCTTTTTGTTGAACGCAGCAAAGCCTACAGCAACTGTTACCCTTATGATGTGCCGGATTATGCCTCCCTTAGGTCACTAGTTGCCTCATCCGGCACACTGGAGTTTAACGATGAAAGCTTCAATTGGACTGGAGTCACTCAAAATGGAACAAGCTCTTCTTGCAAAAGGAGATCTAATAACAGTTTCTTTAGTAGATTGAATTGGTTGACCCACTTAAAATTCAAATACCCAGCATTGAACGTGACTATGCCAAACAATGAAAAATTTGACAAATTGTACATTTGGGGGGTTCACCACCCGGTTACGGACAATGACCAAATCTTCCTGTATGCTCAAGCATCAGGAAGAATCACAGTCTCTACCAAAAGAAGCCAACAAACTGTAATCCCGAATATCGGATCTAGACCCAGAATAAGGAATATCCCCAGCAGAATAAGCATCTATTGGACAATAGTAAAACCGGGAGACATACTTTTGATTAACAGCACAGGGAATCTAATTGCTCCTAGGGGTTACTTCAAAATACGAAGTGGGAAAAGCTCAATAATGAGATCAGATGCACCCATTGGCAAATGCAATTCTGAATGCATCACTCCAAATGGAAGCATTCCCAATGACAAACCATTTCAAAATGTAAACAGGATCACATATGGGGCCTGTCCCAGATATGTTAAGCAAAACACTCTGAAATTGGCAACAGGGATGCGAAATGTACCAGAGAAACAAACTAGAGGCATATTTGGCGCAATCGCGGGTTTCATAGAAAATGGTTGGGAGGGAATGGTGGATGGTTGGTACGGTTTCAGGCATCAAAATTCTGAGGGAATAGGACAAGCAGCAGATCTCAAAAGCACTCAAGCAGCAATCAATCAAATCAATGGGAAGCTGAATAGGTTGATCGGGAAAACCAACGAGAAATTCCATCAGATTGAAAAAGAATTCTCAGAAGTAGAAGGGAGAATTCAGGACCTCGAGAAATATGTTGAGGACACTAAAATAGATCTCTGGTCATACAACGCGGAGCTTCTTGTTGCCCTGGAGAACCAACATACAATTGATCTAACTGACTCAGAAATGAACAAACTGTTTGAAAGAACAAAGAAGCAACTGAGGGAAAATGCTGAGGATATGGGCAATGGTTGTTTCAAAATATACCACAAATGTGACAATGCCTGCATAGGATCAATCAGAAATGGAACTTATGACCATGATGTATACAGAGATGAAGCATTAAACAACCGGTTCCAGATCAAAGGCGTTGAGCTGAAGTCAGGATACAAAGATTGGATCCTATGGATTTCCTTTGCCATATCATGTTTTTTGCTTTGTGTTGCTTTGTTGGGGTTCATCATGTGGGCCTGCCAAAAAGGCAACATTAGGTGCAACATTTGCATTTGA"
}, {
"strain": "A/Brisbane/10/2007",
"db": "IRD",
"accession": "CY113005",
"date": "2007-02-06",
"seq": "ATGAAGACTATCATTGCTTTGAGCTACATTCTATGTCTGGTTTTCACTCAAAAACTTCCCGGAAATGACAACAGCACGGCAACGCTGTGCCTTGGGCACCATGCAGTACCAAACGGAACGATAGTGAAAACAATCACGAATGACCAAATTGAAGTTACTAATGCTACTGAGCTGGTTCAGAGTTCCTCAACAGGTGAAATATGCGACAGTCCTCATCAGATCCTTGATGGAGAAAACTGCACACTAATAGATGCTCTATTGGGAGACCCTCAGTGTGATGGCTTCCAAAATAAGAAATGGGACCTTTTTGTTGAACGCAGCAAAGCCTACAGCAACTGTTACCCTTATGATGTGCCGGATTATGCCTCCCTTAGGTCACTAGTTGCCTCATCCGGCACACTGGAGTTTAACAATGAAAGCTTCAATTGGACTGGAGTCACTCAAAACGGAACAAGCTCTGCTTGCATAAGGAGATCTAATAACAGTTTCTTTAGTAGATTGAATTGGTTGACCCACTTAAAATTCAAATACCCAGCATTGAACGTGACTATGCCAAACAATGAAAAATTTGACAAATTGTACATTTGGGGGGTTCACCACCCGGGTACGGACAATGACCAAATCTTCCCGTATGCTCAAGCATCAGGAAGAATCACAGTCTCTACCAAAAGAAGCCAACAAACTGTAATCCCGAATATCGGATCTAGACCCAGAGTAAGGAATATCCCCAGCAGAATAAGCATCTATTGGACAATAGTAAAACCGGGAGACATACTTTTGATTAACAGCACAGGGAATCTAATTGCTCCTAGGGGTTACTTCAAAATACGAAGTGGGAAAAGCTCAATAATGAGATCAGATGCACCCATTGGCAAATGCAATTCTGAATGCATCACTCCAAACGGAAGCATTCCCAATGACAAACCATTCCAAAATGTAAACAGGATCACATACGGGGCCTGTCCCAGATATGTTAAGCAAAACACTCTGAAATTGGCAACAGGGATGCGAAATGTACCAGAGAAACAAACTAGAGGCATATTTGGCGCAATCGCGGGTTTCATAGAAAATGGTTGGGAGGGAATGGTGGATGGTTGGTACGGTTTCAGGCATCAAAATTCTGAGGGAATAGGACAAGCAGCAGATCTCAAAAGCACTCAAGCAGCAATCGATCAAATCAATGGGAAGCTGAATAGGTTGATCGGGAAAACCAACGAGAAATTCCATCAGATTGAAAAAGAATTCTCAGAAGTCGAAGGGAGAATTCAGGACCTTGAGAAATATGTTGAGGACACCAAAATAGATCTCTGGTCATACAACGCGGAGCTTCTTGTTGCCCTGGAGAACCAACATACAATTGATCTAACTGACTCAGAAATGAACAAACTGTTTGAAAAAACAAAGAAGCAACTGAGGGAAAATGCTGAGGATATGGGCAATGGTTGTTTCAAAATATACCACAAATGTGACAATGCCTGCATAGGATCAATCAGAAATGGAACTTATGACCACAATGTATACAGAGATGAAGCATTAAACAACCGGTTCCAGATCAAGGGCGTTGAGCTGAAGTCAGGATACAAAGATTGGATCCTATGGATTTCCTTTGCCATATCATGTTTTTTGCTTTGTGTTGCTTTGTTGGGGTTCATCATGTGGGCCTGCCAAAAAGGCAACATTAGGTGCAACATTTGCATTTGA"
}, {
"strain": "A/Perth/16/2009",
"db": "IRD",
"accession": "GQ293081",
"date": "2009-04-07",
"seq": "ATGAAGACTATCATTGCTTTGAGCTACATTCTATGTCTGGTTTTCGCTCAAAAACTTCCTGGAAATGACAACAGCACGGCAACGCTGTGCCTTGGGCACCATGCAGTACCAAACGGAACGATAGTGAAAACAATCACGAATGACCAAATTGAAGTTACTAATGCTACTGAGCTGGTTCAGAGTTCCTCAACAGGTGAAATATGCGACAGTCCTCATCAGATCCTTGATGGAAAAAACTGCACACTAATAGATGCTCTATTGGGAGACCCTCAGTGTGATGGCTTCCAAAATAAGAAATGGGACCTTTTTGTTGAACGCAGCAAAGCCTACAGCAACTGTTACCCTTATGATGTGCCGGATTATGCCTCCCTTAGGTCACTAGTTGCCTCATCCGGCACACTGGAGTTTAACAATGAAAGCTTCAATTGGACTGGAGTCACTCAAAACGGAACAAGCTCTGCTTGCATAAGGAGATCTAAAAACAGTTTCTTTAGTAGATTGAATTGGTTGACCCACTTAAACTTCAAATACCCAGCATTGAACGTGACTATGCCAAACAATGAACAATTTGACAAATTGTACATTTGGGGGGTTCACCACCCGGGTACGGACAAAGACCAAATCTTCCTGTATGCTCAAGCATCAGGAAGAATCACAGTCTCTACCAAAAGAAGCCAACAAACCGTAAGCCCGAATATCGGATCTAGACCCAGAGTAAGGAATATCCCTAGCAGAATAAGCATCTATTGGACAATAGTAAAACCGGGAGACATACTTTTGATTAACAGCACAGGGAATCTAATTGCTCCTAGGGGTTACTTCAAAATACGAAGTGGGAAAAGCTCAATAATGAGATCAGATGCACCCATTGGCAAATGCAATTCTGAATGCATCACTCCAAATGGAAGCATTCCCAATGACAAACCATTCCAAAATGTAAACAGGATCACATACGGGGCCTGTCCCAGATATGTTAAGCAAAACACTCTGAAATTGGCAACAGGGATGCGAAATGTACCAGAGAAACAAACTAGAGGCATATTTGGCGCAATCGCGGGTTTCATAGAAAATGGTTGGGAGGGAATGGTGGATGGTTGGTACGGTTTCAGGCATCAAAATTCTGAGGGAAGAGGACAAGCAGCAGATCTCAAAAGCACTCAAGCAGCAATCGATCAAATCAATGGGAAGCTGAATAGATTGATCGGGAAAACCAACGAGAAATTCCATCAGATTGAAAAAGAATTCTCAGAAGTCGAAGGGAGAATTCAGGACCTTGAGAAATATGTTGAGGACACTAAAATAGATCTCTGGTCATACAACGCGGAGCTTCTTGTTGCCCTGGAGAACCAACATACAATTGATCTAACTGACTCAGAAATGAACAAACTGTTTGAAAAAACAAAGAAGCAACTGAGGGAAAATGCTGAGGATATGGGCAATGGTTGTTTCAAAATATACCACAAATGTGACAATGCCTGCATAGGATCAATCAGAAATGGAACTTATGACCACGATGTATACAGAGATGAAGCATTAAACAACCGGTTTCAGATCAAGGGAGTTGAGCTGAAGTCAGGGTACAAAGATTGGATCCTATGGATTTCCTTTGCCATATCATGTTTTTTGCTTTGTGTTGCTTTGTTGGGGTTCATCATGTGGGCCTGCCAAAAAGGCAACATTAGGTGCAACATTTGCATTTGA"
}, {
"strain": "A/Victoria/361/2011",
"db": "IRD",
"accession": "GQ293081",
"date": "2011-10-24",
"seq": "ATGAAGACTATCATTGCTTTGAGCCACATTCTATGTCTGGTTTTCGCTCAAAAACTTCCTGGAAATGACAACAGCACGGCAACGCTGTGCCTTGGGCACCATGCAGTACCAAACGGAACGATAGTGAAAACAATCACGAATGACCAAATTGAAGTTACTAATGCTACTGAGCTGGTTCAGAATTCCTCAATAGGTGAAATATGCGACAGTCCTCATCAGATCCTTGATGGAGAAAACTGCACACTAATAGATGCTCTATTGGGAGACCCTCAGTGTGATGGCTTCCAAAATAAGAAATGGGACCTTTTTGTTGAACGAAGCAAAGCCTACAGCAACTGTTACCCTTATGATGTGCCGGATTATGCCTCCCTTAGGTCACTAGTTGCCTCATCCGGCACACTGGAGTTTAACAATGAAAGCTTCAATTGGACTGGAGTCACTCAAAACGGAACAAGTTCTGCTTGCATAAGGAGATCTAATAATAGTTTCTTTAGTAGATTAAATTGGTTGACCCGCTTAAACTTCAAATACCCAGCATTGAACGTGACTATGCCAAACAATGAACAATTTGACAAATTGTACATTTGGGGGGTTCACCACCCGGTTACGGACAAGGAACAAATCTTCCTGTATGCTCAATCATCAGGAAGAATCACAGTATCTACCAAAAGAAGCCAACAAGCTGTAATCCCGAATATCGGATATAGACCCAGAATAAGGAATATCCCTAGCAGAATAAGCATCTATTGGACAATAGTAAAACCGGGAGACATACTTTTGATTAACAGCACAGGGAATCTAATTGCTCCTAGGGGTTACTTCAAAATACGAAGTGGGAAAAGCTCAATAATGAGATCAGATGCACCCATTGGCAAATGCAATTCTGAATGCATCACTCCAAATGGAAGCATTCCCAATGACAAACCATTCCAAAATGTAAACAGGATCACATACGGGGCCTGTCCCAGATATGTTAAGCAAAGCACTCTGAAATTGGCAACAGGAATGCGAAATGTACCAGAGAAACAAACTAGAGGCATATTTGGCGCAATAGCGGGTTTCATAGAAAATGGTTGGGAGGGAATGGTGGATGGTTGGTACGGTTTCAGGCATCAAAATTCTGAGGGAAGAGGACAAGCAGCAGATCTCAAAAGCACTCAAGCAGCAATCGATCAAATCAATGGGAAGCTGAATCGATTGATCGGGAAAACCAACGAGAAATTCCATCAGATTGAAAAAGAATTCTCAGAAGTCGAAGGGAGAATTCAGGACCTTGAGAAATATGTTGAGGACACTAAAATAGATCTCTGGTCATACAACGCGGAGCTTCTTGTTGCCCTGGAGAACCAACATACAATTGATCTAACTGACTCAGAAATGAACAAACTGTTTGAAAAAACAAAGAAGCAACTAAGGGAAAATGCTGAGGATATGGGCAATGGTTGTTTCAAAATATACCACAAATGTGACAATGCCTGCATAGGATCAATCAGAAATGGAACTTATGACCACGATGTATACAGAGATGAAGCATTAAACAACCGGTTCCAGATCAAGGGAGTTGAGCTGAAGTCAGGGTACAAAGATTGGATCCTATGGATTTCCTTTGCCATATCATGTTTTTTGCTTTGTGTTGCTTTGTTGGGGTTCATCATGTGGGCCTGCCAAAAGGGCAACATTAGGTGCAACATTTGCATTTGA"
}, {
"strain": "A/Texas/50/2012",
"db": "GISAID",
"isolate_id": "EPI_ISL_129858",
"date": "2012-04-15",
"seq": "ATGAAGACTATCATTGCTTTGAGCTACATTCTATGTCTGGTTTTCGCTCAAAAACTTCCTGGAAATGACAATAGCACGGCAACGCTGTGCCTTGGGCACCATGCAGTACCAAACGGAACGATAGTGAAAACAATCACGAATGACCGAATTGAAGTTACTAATGCTACTGAACTGGTTCAGAATTCCTCAATAGGTGAAATATGCGACAGTCCTCATCAGATCCTTGATGGAGAAAACTGCACACTAATAGATGCTCTATTGGGAGACCCTCAGTGTGATGGCTTCCAAAATAAGAAATGGGACCTTTTTGTTGAACGAAGCAAAGCCTACAGCAACTGTTACCCTTATGATGTGCCGGATTATGCCTCCCTTAGGTCACTAGTTGCCTCATCCGGCACACTGGAGTTTAACAATGAAAGCTTCAATTGGAATGGAGTCACTCAAAACGGAACAAGTTCTGCTTGCATAAGGAGATCTAATAATAGTTTCTTTAGTAGATTAAATTGGTTGACCCACTTAAACTTCAAATACCCAGCATTGAACGTGACTATGCCAAACAATGAACAATTTGACAAATTGTACATTTGGGGGGTTCACCACCCGGGTACGGACAAGGACCAAATCTTCCTGTATGCTCAACCATCAGGAAGAATCACAGTATCTACCAAAAGAAGCCAACAAGCTGTAATCCCGAATATCGGATCTAGACCCAGAATAAGGAATATCCCTAGCAGAATAAGCATCTATTGGACAATAGTAAAACCGGGAGACATACTTTTGATTAACAGCACAGGGAATCTAATTGCTCCTAGGGGTTACTTCAAAATACGAAGTGGGAAAAGCTCAATAATGAGATCAGATGCACCCATTGGCAAATGCAAGTCTGAATGCATCACTCCAAATGGAAGCATTCCCAATGACAAACCATTCCAAAATGTAAACAGGATCACATACGGGGCCTGTCCCAGATATGTTAAGCAAAGCACTCTGAAATTGGCAACAGGAATGCGGAATGTACCAGAGAAACAAACTAGAGGCATATTTGGCGCAATAGCGGGTTTCATAGAAAATGGTTGGGAGGGAATGGTGGATGGTTGGTACGGTTTCAGGCATCAAAATTCTGAGGGAAGAGGACAAGCAGCAGATCTCAAAAGCACTCAAGCAGCAATCGATCAAATCAATGGGAAGCTGAATCGATTGATCGGGAAAACCAACGAGAAATTCCATCAGATTGAAAAAGAATTCTCAGAAGTAGAAGGGAGAATTCAGGACCTTGAGAAATATGTTGAGGACACTAAAATAGATCTCTGGTCATACAACGCGGAGCTTCTTGTTGCCCTGGAGAACCAACATACAATTGATCTAACTGACTCAGAAATGAACAAACTGTTTGAAAAAACAAAGAAGCAACTGAGGGAAAATGCTGAGGATATGGGCAATGGTTGTTTCAAAATATACCACAAATGTGACAATGCCTGCATAGGATCAATCAGAAATGGAACTTATGACCACGATGTATACAGAGATGAAGCATTAAACAACCGGTTCCAGATCAAGGGAGTTGAGCTGAAGTCAGGGTACAAAGATTGGATCCTATGGATTTCCTTTGCCATATCATGTTTTTTGCTTTGTGTTGCTTTGTTGGGGTTCATCATGTGGGCCTGCCAAAAGGGCAACATTAGGTGCAACATTTGCATTTGA",
}, {
"strain": "A/Switzerland/9715293/2013",
"db": "GISAID",
"isolate_id": "EPI_ISL_162149",
"date": "2013-12-06",
"seq": "ATGAAGACTATCATTGCTTTGAGCTACATTCTATGTCTGGTTTTCGCTCAAAAACTTCCTGGAAATGACAATAGCACGGCAACGCTGTGCCTTGGGCACCATGCAGTACCAAACGGAACGATAGTGAAAACAATCACGAATGACCGAATTGAAGTTACTAATGCTACTGAGCTGGTTCAGAATTCCTCAATAGGTGAAATATGCGACAGTCCTCATCAGATCCTTGATGGAGAAAACTGCACACTAATAGATGCTCTATTGGGAGACCCTCAGTGTGATGGCTTTCAAAATAAGAAATGGGACCTTTTTGTTGAACGAAGCAAAGCCTACAGCAACTGTTACCCTTATGATGTGCCGGATTATGCCTCCCTTAGGTCACTAGTTGCCTCATCCGGCACACTGGAGTTTAACAATGAAAGCTTCAATTGGGCTGGAGTCACTCAAAACGGAACAAGTTCTTCTTGCATAAGGGGATCTAATAGTAGTTTCTTTAGTAGATTAAATTGGTTGACCCACTTAAACTCCAAATACCCAGCATTAAACGTGACTATGCCAAACAATGAACAATTTGACAAATTGTACATTTGGGGGGTTCACCACCCGGGTACGGACAAGGACCAAATCTTCCTGTATGCACAATCATCAGGAAGAATCACAGTATCTACCAAAAGAAGCCAACAAGCTGTAATCCCGAATATCGGATCTAGACCCAGAATAAGGGATATCCCTAGCAGAATAAGCATCTATTGGACAATAGTAAAACCGGGAGACATACTTTTGATTAACAGCACAGGGAATCTAATTGCTCCTAGGGGTTACTTCAAAATACGAAGTGGGAAAAGCTCAATAATGAGATCAGATGCACCCATTGGCAAATGCAAGTCTGAATGCATCACTCCAAATGGAAGCATTCCCAATGACAAACCATTCCAAAATGTAAACAGGATCACATACGGGGCCTGTCCCAGATATGTTAAGCAAAGCACTCTGAAATTGGCAACAGGAATGCGAAATGTACCAGAGAGACAAACTAGAGGCATATTTGGCGCAATAGCGGGTTTCATAGAAAATGGTTGGGAGGGAATGGTGGATGGTTGGTACGGCTTCAGGCATCAAAATTCTGAGGGAAGAGGACAAGCAGCAGATCTCAAAAGCACTCAAGCAGCAATCGATCAAATCAATGGGAAGCTGAATCGATTGATCGGGAAAACCAACGAGAAATTCCATCAGATTGAAAAAGAATTCTCAGAAGTAGAAGGGAGAATTCAGGACCTTGAGAAATATGTTGAGGACACAAAAATAGATCTCTGGTCATACAACGCGGAGCTTCTTGTTGCCCTGGAGAACCAACATACAATTGATCTAACTGACTCAGAAATGAACAAACTGTTTGAAAAAACAAAGAAGCAACTGAGGGAAAATGCTGAGGATATGGGCAATGGTTGTTTCAAAATATACCACAAATGTGACAATGCCTGCATAGGATCAATCAGAAATGGAACTTATGACCACGATGTATACAGGGATGAAGCATTAAACAACCGGTTCCAGATCAAGGGAGTTGAGCTGAAGTCAGGGTACAAAGATTGGATCCTATGGATTTCCTTTGCCATATCATGTTTTTTGCTTTGTGTTGCTTTGTTGGGGTTCATCATGTGGGCCTGCCAAAAGGGCAACATTAGGTGCAACATTTGCATTTGA",
}, {
"strain": "A/HongKong/4801/2014",
"db": "GISAID",
"isolate_id": "EPI_ISL_165554",
"date": "2014-02-26",
"seq": "ATGAAGACTATCATTGCTTTGAGCTACATTCTATGTCTGGTTTTCGCTCAAAAAATTCCTGGAAATGACAATAGCACGGCAACGCTGTGCCTTGGGCACCATGCAGTACCAAACGGAACGATAGTGAAAACAATCACGAATGACCGAATTGAAGTTACTAATGCTACTGAGCTGGTTCAGAATTCCTCAATAGGTGAAATATGCGACAGTCCTCATCAGATCCTTGATGGAGAAAACTGCACACTAATAGATGCTCTATTGGGAGACCCTCAGTGTGATGGCTTTCAAAATAAGAAATGGGACCTTTTTGTTGAACGAAGCAAAGCCTACAGCAACTGTTACCCTTATGATGTGCCGGATTATGCCTCCCTTAGGTCACTAGTTGCCTCATCCGGCACACTGGAGTTTAACAATGAAAGCTTCAATTGGACTGGAGTCACTCAAAACGGAACAAGTTCTGCTTGCATAAGGAGATCTAGTAGTAGTTTCTTTAGTAGATTAAATTGGTTGACCCACTTAAACTACACATACCCAGCATTGAACGTGACTATGCCAAACAATGAACAATTTGACAAATTGTACATTTGGGGGGTTCACCACCCGGGTACGGACAAGGACCAAATCTTCCTGTATGCTCAATCATCAGGAAGAATCACAGTATCTACCAAAAGAAGCCAACAAGCTGTAATCCCAAATATCGGATCTAGACCCAGAATAAGGGATATCCCTAGCAGAATAAGCATCTATTGGACAATAGTAAAACCGGGAGACATACTTTTGATTAACAGCACAGGGAATCTAATTGCTCCTAGGGGTTACTTCAAAATACGAAGTGGGAAAAGCTCAATAATGAGATCAGATGCACCCATTGGCAAATGCAAGTCTGAATGCATCACTCCAAATGGAAGCATTCCCAATGACAAACCATTCCAAAATGTAAACAGGATCACATACGGGGCCTGTCCCAGATATGTTAAGCATAGCACTCTGAAATTGGCAACAGGAATGCGAAATGTACCAGAGAAACAAACTAGAGGCATATTTGGCGCAATAGCGGGTTTCATAGAAAATGGTTGGGAGGGAATGGTGGATGGTTGGTACGGTTTCAGGCATCAAAATTCTGAGGGAAGAGGACAAGCAGCAGATCTCAAAAGCACTCAAGCAGCAATCGATCAAATCAATGGGAAGCTGAATCGATTGATCGGGAAAACCAACGAGAAATTCCATCAGATTGAAAAAGAATTCTCAGAAGTAGAAGGAAGAATTCAGGACCTTGAGAAATATGTTGAGGACACTAAAATAGATCTCTGGTCATACAACGCGGAGCTTCTTGTTGCCCTGGAGAACCAACATACAATTGATCTAACTGACTCAGAAATGAACAAACTGTTTGAAAAAACAAAGAAGCAACTGAGGGAAAATGCTGAGGATATGGGCAATGGTTGTTTCAAAATATACCACAAATGTGACAATGCCTGCATAGGATCAATAAGAAATGGAACTTATGACCACAATGTGTACAGGGATGAAGCATTAAACAACCGGTTCCAGATCAAGGGAGTTGAGCTGAAGTCAGGGTACAAAGATTGGATCCTATGGATTTCCTTTGCCATATCATGTTTTTTGCTTTGTGTTGCTTTGTTGGGGTTCATCATGTGGGCCTGCCAAAAGGGCAACATTAGGTGCAACATTTGCATTTGA",
}
]
tmp_outgroup = SeqIO.read('source-data/H3N2_outgroup.gb', 'genbank')
#tmp_outgroup = SeqIO.read('source-data/H3N2_outgroup_1968_aichi.gb', 'genbank')
genome_annotation = tmp_outgroup.features
self.cds = {x.qualifiers['gene'][0]:x for x in genome_annotation
if 'gene' in x.qualifiers and x.type=='CDS' and
x.qualifiers['gene'][0] in ['SigPep', 'HA1', 'HA2']}
self.outgroup = {
'strain': 'A/Beijing/32/1992',
'db': 'IRD',
'accession': 'U26830',
'date': '1992-01-01',
'country': 'China',
'region': 'China',
'seq': str(tmp_outgroup.seq).upper()
}
'''
self.outgroup = {
'strain': 'A/Aichi/2/1968',
'db': 'IRD',
'accession': 'KF874500',
'date': '1968-01-01',
'country': 'Japan',
'region': 'Japan Korea',
'seq': str(tmp_outgroup.seq).upper()
}
'''
class H3N2_clean(virus_clean):
def __init__(self,**kwargs):
virus_clean.__init__(self, **kwargs)
def clean_outbreaks(self):
"""Remove duplicate strains, where the geographic location, date of sampling and sequence are identical"""
virus_hashes = set()
new_viruses = []
for v in self.viruses:
try:
geo = re.search(r'A/([^/]+)/', v.strain).group(1)
except:
print "clean outbreaks:, couldn't parse geo of ",v.strain
continue
if geo:
vhash = (geo, v.date, str(v.seq))
if vhash not in virus_hashes:
new_viruses.append(v)
virus_hashes.add(vhash)
self.viruses = MultipleSeqAlignment(new_viruses)
def clean_reassortants(self):
from seq_util import hamming_distance as distance
"""Remove viruses from the outbreak of triple reassortant pH1N1"""
remove_viruses = []
reassortant_seqs = [
"ATGAAGACTATCATTGCTTTTAGCTGCATTTTATGTCTGATTTTCGCTCAAAAACTTCCCGGAAGTGACAACAGCATGGCAACGCTGTGCCTGGGACACCATGCAGTGCCAAACGGAACATTAGTGAAAACAATCACGGATGACCAAATTGAAGTGACTAATGCTACTGAGCTGGTCCAGAGTTCCTCAACAGGTGGAATATGCAACAGTCCTCACCAAATCCTTGATGGGAAAAATTGCACACTGATAGATGCTCTATTGGGGGACCCTCATTGTGATGACTTCCAAAACAAGGAATGGGACCTTTTTGTTGAACGAAGCACAGCCTACAGCAACTGTTACCCTTATTACGTGCCGGATTATGCCACCCTTAGATCATTAGTTGCCTCATCCGGCAACCTGGAATTTACCCAAGAAAGCTTCAATTGGACTGGAGTTGCTCAAGGCGGATCAAGCTATGCCTGCAGAAGGGGATCTGTTAACAGTTTCTTTAGTAGATTGAATTGGTTGTATAACTTGAATTACAAGTATCCAGAGCAGAACGTAACTATGCCAAACAATGACAAATTTGACAAATTGTACATTTGGGGGGTTCACCACCCGGGTACGGACAAGGACCAAACCAACCTATATGTCCAAGCATCAGGGAGAGTTATAGTCTCTACCAAAAGAAGCCAACAAACTGTAATCCCGAATATCGGGTCTAGACCCTGGGTAAGGGGTGTCTCCAGCATAATAAGCATCTATTGGACGATAGTAAAACCGGGAGACATACTTTTGATTAACAGCACAGGGAATCTAATTGCCCCTCGGGGTTACTTCAAAATACAAAGTGGGAAAAGCTCAATAATGAGATCAGATGCACACATTGATGAATGCAATTCTGAATGCATTACTCCAAATGGAAGCATTCCCAATGACAAACCTTTTCAAAATGTAAACAAGATCACATATGGAGCCTGTCCCAGATATGTTAAGCAAAACACCCTGAAATTGGCAACAGGAATGCGGAATGTACCAGAGAAACAAACTAGAGGCATATTCGGCGCAATTGCAGGTTTCATAGAAAATGGTTGGGAGGGAATGGTAGACGGTTGGTACGGTTTCAGGCATCAGAATTCTGAAGGCACAGGACAAGCAGCAGATCTTAAAAGCACTCAAGCAGCAATCAACCAAATCACCGGGAAACTAAATAGAGTAATCAAGAAAACAAACGAGAAATTCCATCAAATCGAAAAAGAATTCTCAGAAGTAGAAGGAAGAATTCAGGACCTAGAGAAATACGTTGAAGACACTAAAATAGATCTCTGGTCTTACAACGCTGAGATTCTTGTTGCCCTGGAGAACCAACATACAATTGATTTAACCGACTCAGAGATGAGCAAACTGTTCGAAAGAACAAGAAGGCAACTGCGGGAAAATGCTGAGGACATGGGCAATGGTTGCTTCAAAATATACCACAAATGTGACAATGCCTGCATAGGATCAATCAGAAATGGAACTTATGACCATGATATATACAGAAACGAGGCATTAAACAATCGGTTCCAGATCAAAGGTGTTCAGCTAAAGTCAGGATACAAAGATTGGATCCTATGGATTTCCTTTGCCATATCATGCTTTTTGCTTTGTGTTGTTCTGCTGGGGTTCATTATGTGGGCCTGCCAAAAAGGCAACATTAGGTGCAACATTTGCATTTGA",
"ATGAAGACTATCATTGCTTTTAGCTGCATCTTATGTCAGATCTCCGCTCAAAAACTCCCCGGAAGTGACAACAGCATGGCAACGCTGTGCCTGGGGCATCACGCAGTACCAAACGGAACGTTAGTGAAAACAATAACAGATGACCAAATTGAAGTGACTAATGCTACTGAGCTGGTCCAGAGTACCTCAAAAGGTGAAATATGCAGTAGTCCTCACCAAATCCTTGATGGAAAAAATTGTACACTGATAGATGCTCTATTGGGAGACCCTCATTGTGATGACTTCCAAAACAAGAAATGGGACCTTTTTGTTGAACGAAGCACAGCTTACAGCAACTGTTACCCTTATTATGTGCCGGATTATGCCTCCCTTAGGTCACTAGTTGCCTCATCCGGCACCCTGGAATTTACTCAAGAAAGCTTCAATTGGACTGGGGTTGCTCAAGACGGAGCAAGCTATTCTTGCAGAAGGGAATCTGAAAACAGTTTCTTTAGTAGATTGAATTGGTTATATAGTTTGAATTACAAATATCCAGCGCTGAACGTAACTATGCCAAACAATGACAAATTTGACAAATTGTACATTTGGGGGGTACACCACCCGGGTACGGACAAGGACCAAACCAGTCTATATATTCAAGCATCAGGGAGAGTTACAGTCTCCACCAAATGGAGCCAACAAACTGTAATCCCGAATATCGGGTCTAGACCCTGGATAAGGGGTGTCTCCAGCATAATAAGCATCTATTGGACAATAGTAAAACCGGGAGACATACTTTTGATTAACAGCACAGGGAATCTAATTGCCCCTCGGGGTTACTTCAAAATACAAAGTGGGAAAAGCTCAATAATGAGGTCAGATGCACACATTGGCAACTGCAACTCTGAATGCATTACCCCAAATGGAAGCATTCCCAACGACAAACCTTTTCAAAATGTAAACAGAATAACATATGGGGCCTGTCCCAGATATGTTAAGCAAAACACTCTGAAATTAGCAACAGGAATGCGGAATGTACCAGAGAAACAAACTAGAGGCATATTCGGCGCAATCGCAGGTTTCATAGAAAATGGTTGGGAAGGGATGGTGGACGGTTGGTATGGTTTCAGGCATCAAAACTCTGAAGGCACAGGGCAAGCAGCAGATCTTAAAAGCACTCAAGCGGCAATCAACCAAATCACCGGGAAACTAAATAGAGTAATCAAGAAGACGAATGAAAAATTCCATCAGATCGAAAAAGAATTCTCAGAAGTAGAAGGGAGAATTCAGGACCTAGAGAGATACGTTGAAGACACTAAAATAGACCTCTGGTCTTACAACGCGGAGCTTCTTGTTGCCCTGGAGAACCAACATACAATTGATTTAACTGACTCAGAAATGAACAAACTGTTCGAAAGGACAAGGAAGCAACTGCGGGAAAATGCTGAGGACATGGGCAATGGATGCTTTAAAATATATCACAAATGTGACAATGCCTGCATAGGATCAATCAGAAATGGAACTTATGACCATGATGTATACAGAGACGAAGCAGTAAACAATCGGTTCCAGATCAAAGGTGTTCAGCTGAAGTTAGGATACAAAGATTGGATCCTATGGATTTCCTTTGCCATATCATGCTTTTTGCTTTGTGCTGTTCTGCTAGGATTCATTATGTGGGCATGCCAAAAAGGCAACATTAGGTGCAACATTTGCATTTGA",
"ATGAAGACTAGTAGTTCTGCTATATACATTGCAA------------------------CCGCAAATG---------CAGACACATTATGTATAGGTTATCATGCAGTACTAGAAAAGAATGTAACAGTAACACACTCTGTTAACCAAACTGAGAGGGGTAGCCCCATTGCATTTG--------------------GGTAAATGTAACATTGCTGGCTGGATCC------------------------------------TGGGAAATCCAGAGTGTGACACTCTCCACAGCAAGCTCATGGTCCTACATCGTGGAAACATCTAAGACAATGGAACGTGCTACCCAGGAGATTTCATCAATTATGAGGAGCTAAGGTCATCATTTGAAAGGTTTGAGATATTACAAGTTCATGGCCCAATCATGACTCGAACAAAGGTTCCTCAAGCTGGAGCAA---------------------------AAAGCTTCTACAAAAATTTAATATGGCTAGTTAAAAAAGGAAATTCATACCCAA------------------------------AGCTCAGCAAATCCTACATTTGGGGCATTCACCATCCATCTACTAGTGCTGACCAA-------CAAAGTCTCTATCAGAGTGCAGATGCATATGTTTTATCAAAATACAGCAAGAAGTTCAAG--CCGGAAATAGCAGTAAGACCCAAAGTGAGGGATCAAGAAGGGAGAATGAACTATTACTGGACACTAGTAGAGCCGGGAGACAAAATAACATTCGAAGCAACTGGAAATCTATTGGTACCGAGATATGCATTCGCAATGGAAA----GAAATGCTGGATTATCATTTCAGATACACCAGTCCACGATTGCAATACAACTTGTCAGACACCCAAGGGTGCTATAAACACCAGCCTCCCATTTCAGAATATACATCCGATCACAATTGGAAAATGTCCCAAATATGTAAAAAGCACAAAATTGAGACTGGCCACAGGATTGAGGAATGTCCCGTCTATTCAATCTAGAGGCCTATTTGGGGCCATTGCCGGTTTCATTGAAGGGGGGTGGACAGGGATGGTAGATGGATGGTACGGTTATCACCATCAAAATGCGCAGGGGTCAGGATATGCAGCCGACCTGAAGAGCACACAGAATGCCATTGACAAGATTACTAACAAAGTAAATTCTGTTATTGAAAAGATGAATACACAGTTCACAGCAGTAGGTAAAGAGTTCAACCACCTGGAAAAAAGAATAGAGAATTTAAATAAAAAAGTTGATGATGGTTTCCTGGACATTTGGACTTACAATGCCGAACTGTTGGTTCTATTGGAAAATGAAAGAACTTTGGACTACCACGATTCAAATGTGAAAAACTTATATGAAAAGGTAAGAAGCCAGTTAAAAAACAATGCCAAGGAAATTGGAAACGGCTGCTTTGAATTTTACCACAAATGCGATAACACGTGCATGGAAAGTGTCAAAAATGGGACTTATGACTACCCAAAATACTCAGAGGAAGCAAAATTAAACAGAGAAGAAATAGATGGGGTAAAGCTGGAATCAACAAGGATTTACCAGATTTTGGCGATCTATTCAACTGTCGCCAGTTCATTGGTACTGGTAGTCTCCCTGGGGGCAATCATCTGGATGTGCTCTAATGGGTCTCTACAGTGTAGAATATGTATTTAA",
"ATGAAGACTATCATTGCTTTGAGCTACATTTTATGTCTGGTTTTCGCTCAAAAACTTCCCGGAAATGACAACAGCACGGCAACGCTGTGCCTGGGGCACCATGCAGTGCCAAACGGAACGCTAGTGAAAACAATCACGAATGACCAAATTGAAGTAACTAATGCTACTGAGCTGGTTCAGAGTTCCTCAACAGGTAGAATATGCGACAGTCCTCACCAAATCCTTGATGGAGAAAACTGCACACTAATAGATGCTCTATTGGGAGACCCTCATTGTGATGGCTTCCAAAACAAGGAATGGGACCTTTTTGTTGAACGCAGCAAAGCCTACAGCAACTGTTACCCTTATGATGTGCCGGATTATGTCTCCCTTAGGTCACTAGTTGCCTCATCAGGCACGCTGGAGTTTAACAATGAAAGCTTCAATTGGACTGGAGTCGCTCAGAATGGAACAAGCTCTGCTTGCAAAAGGAGATCCGATAAAAGTTTCTTTAGTAGATTGAATTGGTTGCACCAATTAAAATACAAATATCCAGCACTGAACGTGACTATGCCAAACAATGAAAAATTTGACAAATTGTACATTTGGGGGGTTCACCACCCGGGTACAGACAGTGACCAAATCAGCCTATATGCTCAAGCATCAGGGAGAGTCACAGTCTCTACCAAAAGAAGCCAACAAACTGTAATCCCGAATATCGGATCTGGACCCTGGGTAAGGGGTGTCTCCAGCAGAATAAGCATCTATTGGACAATAGTAAAACCGGGAGACATACTTTTGATTAACAGCACAGGGAATCTAATTGCTCCTCGGGGTTACTTCAAAATACGAAGTGGGAAAAGCTCAATAATGAGGTCAGATGCACCCATTGGCAAATGCAATTCTGAATGCATCACTCCAAATGGAAGCATTCCCAATGGCAAACCATTTCAAAATGTAAACAGGATCACATATGGGGCCTGTCCCAGATATGTTAAGCAAAACACTCTGAAATTGGCAACAGGGATGCGGAATGTGCCAGAGAAACAAACTAGAGGCATATTCGGTGCAATCGCGGGCTTCATAGAAAATGGTTGGGAGGGAATGATGGACGGTTGGTACGGTTTCAGGCATCAGAATTCTGAGGGCACAGGGCAAGCAGCAGATCTTAAAAGCACTCAAGCAGCAATCAACCAAATCAACGGGAAACTGAATAGGTTAATCGAGAAAACGAACGAGAAATTCCATCAAATTGAAAAAGAATTCTCAGAAGTAGAAGGGAGAATTCAGGACCTCGAGAAATATGTCGAGGACACTAAAATAGATCTCTGGTCGTACAATGCGGAGCTTCTTGTTGCCCTGGAGAACCAACATACAATTGATCTAACTGACTCAGAAATGAACAAACTGTTTGAAAGAACAAAGAAGCAACTGAGGGAAAATGCTGAGGATATGGGCAATGGTTGTTTCAAAATATACCACAAATGTGACAATGCCTGCATAGGGTCAATCAGAAATGGAACTTATGACCATGATGTATACAGAGACGAAGCATTGAACAACCGGTTCCAGATCAAAGGTGTTGAGCTGAAGTCAGGATACAAAGATTGGATCCTATGGATTTCCTTTGCCATATCATGTTTTTTGCTTTGTATTGTTTTACTGGGGTTCATCATGTGGGCCTGCCAAAAAGGCAACATTAGGTGCAACATTTGCATTTGA",
"--------------------------------------------------------------------------------AACGCTATGCCTGGGACACCATGCAGTACCAAATGGAACGTTAGTGAAAACAATCACGGATGACCAAATTGAAGTGACTAATGCTACTGAGCTGGTTCAAAGTTCCTCAACAGGTAGAATATGTAACAGTCCTCACCACATCCTTGATGGGAAAAATTGCACACTGATAGATGCTCTATTGGGAGACCCTCATTGTGATGACTTCCAAAACAAGGAATGGGACCTTTTTGTTGAACGAAGCACAGCCTACAGCAACTGCTACCCTTATTATGTGCCGGATTATGCCTCCCTTAGGTCACTAGTTGCCTCATCCGGCACCCTGGAATTCACCCAAGAAAGCTTCAATTGGACCGGAGTTACTCAAGATGGATCAAGCTATACTTGCAGAAGGAAATCTGTTAACAGTTTCTTTAGTAGATTAAATTGGTTGCATAATTTGGACTACAAATATCCAGCGCTGAACGTAACTATGCCAAACAATGACAAATTTGACAAATTGTACATTTGGGGGGTTCACCACCCGGGTACGGACAGGGACCAAACCAACCTATATGTTCAAGCATCAGGGAGAGTTACAGTCTCCACAAAAAGAAGCCAACAAACTGTAATCCCGAACATCGGATCTAGACCCTGGGTAAGGGGTGTCTCCAGCATAATAAGCATCTATTGGACAATAGTAAAACCGGGAGACATACTTTTGATTAACAGCACAGGAAATCTAATTGCCCCTCGGGGTTACTTCAAAATACAAAGTGGGAAAAGCTCAATAATGAGATCAGATGCACCCATTGGCAACTGCAATTCTGAATGCATTACTCCAAATGGAAGCATTCCCAATGACAAACCTTTTCAAAATGTAAACAGGATCACATATGGGGCCTGTCCAAGATATGTTAAGCAAAACACTCTGAAATTGGCAACAGGGATGCGGAATGTACCAGAGAAACAAACTAGAGGCATATTCGGCGCAATCGCAGGCTTCATAGAAAATGGTTGGGAGGGGATGGTGGACGGTTGGTACGGTTTCAGGCATCAAAATTCTGAAGGCACAGGACAAGCAGCAGATCTTAAAAGTACTCAAGCAGCAATCAACCAAATCACCGGGAAACTGAATAGAGTAATCAAGAAAACGAACGAGAAATTCCATCAAATCGAAAAAGAATTCTCAGAAGTAGAAGGGAGAATTCAGGACCTAGAGAAATACGTTGAAGACACTAAAATAGATCTCTGGTCTTACAACGCGGAGCTTCTTGTTGCCCTGGAGAACCAACATACAATTGATTTAACTGACTCAGAAATGAACAAACTGTTCGAAAGAACAAGGAAGCAACTGCGGGAAAATGCTGAGGACATGGGCAATGGTTGCTTCAAAATATACCACAAATGTGACAATGCCTGCATAGGATCAATCAGAAATGGAACTTATGACCATGATGTATACAGAGACGAGGCATTAAACAATCGGTTCCAGATCAAAAGTGTTCAGCTGAAGTCAGGATACAAAGATTGGATCCTATGGATTTCCTTTGCCATGTCATGCTTTTTGCTTTGTGTTGTTCTGCTGGGGTTCATTATGTGGACCTGCCAAAAAGGCAACATTAAGTGCAACATTTGCATTTGA",
"------------------------------------------------CAAAAACTTCCCGGAAATGACAACAGCACGGCAACGCTGTGCCTTGGGCACCATGCAGTACCAAACGGAACGATAGTGAAAACAATCACGAATGACCAAATTGAAGTTACTAATGCTACTGAGCTGGTTCAGAGTTCCTCAACAGGTGGAATATGCGACAGTCCTCATCAGATCCTTGATGGAGAAAACTGCACACTAATAGATGCTCTATTGGGAGACCCTCAGTGTGATGGCTTCCAAAATAAGAAATGGGACCTTTTTGTTGAACGCAGCAAAGCCTACAGCAACTGTTACCCTTATGATGTGCCGGATTATGCCTCCCTTAGGTCACTAGTTGCCTCATCCGGCACACTGGAGTTTAACAATGAAAGCTTCGATTGGACTGGAGTCACTCAGAATGGAACAAGCTCTGCTTGCAAAAGGAGATCTAATAAAAGTTTCTTTAGTAGATTGAATTGGTTGACCCACTTAAAATACAAATACCCAGCATTGAACGTGACTATGCCAAACAATGAAAAATTTGACAAATTGTACATTTGGGGGGTTCACCACCCGGGTACGGACAGTGACCAAATCAGCCTATATGCTCAAGCATCAGGAAGAATCACAGTCTCTACCAAAAGAAGCCAACAAACTGTAATCCCGAATATCGGATCTAGACCCAGGGTAAGGGATGTCTCCAGCCGAATAAGCATCTATTGGACAATAGTAAAACCGGGAGACATACTTTTGATTAACAGCACAGGGAATCTAATTGCTCCTCGGGGTTACTTCAAAATACGAAGTGGGAAAAGCTCAATAATGAGATCAGATGCACCCATTGGCAAATGCAATTCTGAATGCATCACTCCAAATGGAAGCATTCCCAATGACAAACCATTTCAAAATGTAAACAGGATCACATATGGGGCCTGTCCCAGATATGTTAAGCAAAACACTCTGAAATTGGCAACAGGGATGCGAAATGTACCAGAGAAACAAACTAGAGGCATATTTGGCGCAATCGCGGGTTTCATAGAAAATGGTTGGGAGGGAATGGTGGACGGTTGGTACGGTTTCAGGCATCAAAATTCTGAGGGCACAGGACAAGCAGCAGATCTCAAAAGCNCTCAAGCAGCAATGAAGACTATCATTG--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------",
"ATGAAGACCATCATTGCTTTGAGCTACATTTTCTGTCTGGCTCTCGGCCAAGACCTTCCAGGAAATGACAACAGCACAGCAACGCTGTGCCTGGGACATCATGCGGTGCCAAACGGAACACTAGTGAAAACAATCACAGATGATCAGATTGAAGTGACTAATGCTACTGAGCTAGTTCAGAGCTCCTCAACGGGGAAAATATGCAACAATCCTCATCGAATCCTTGATGGAATAGACTGCACACTGATAGATGCTCTATTGGGGGACCCTCATTGTGATGTTTTTCAAAATGAGACATGGGACCTTTTCGTTGAACGCAGCAAAGCTTTCAGCAACTGTTACCCTTATGATGTGCCAGATTATGCCTCCCTTAGGTCACTAGTTGCCTCGTCAGGCACTCTGGAGTTTATCACTGAGGGTTTCACTTGGACTGGGGTCACTCAGAATGGGGGAAGCAATGCTTGCAAAAGGGGACCTGGTAGCGGTTTTTTCAGTAGACTGAACTGGTTGACCAAATCAGGAAGCACATATCCAGTGCTGAACGTGACTATGCCAAACAATGACAATTTTGACAAACTATACATTTGGGGGGTTCACCACCCGAGCACGAACCAAGAACAAACCAGCCTGTATGTTCAAGCATCAGGGAGAGTCACAGTCTCTACCAGGAGAAGCCGGCAAACTATAATCCCGAATATCGGGTCCAGACCCTGGGTAAGGGGTCTGTCTAGTAGAATAAGCATCTATTGGACAATAGTTAAGCCGGGAGACGTACTGGTAATTAATAGTAATGGGAACCTAATCGCTCCTCGGGGTTATTTCAAAATGCGCACTGGGAAAAGCTCAATAATGAGGTCAGATGCACCTATTGATACCTGTATTTCTGAATGCATCACTCCAAATGGAAGCATTCCCAATGACAAGCCCTTTCAAAACGTAAACAAGATCACATATGGAGCATGCCCCAAGTATGTTAAGCAAAACACCCTGAAGTTGGCAACAGGGATGCGGAATGTACCAGAGAAACAAACTAGAGGCCTATTCGGCGCAATAGCAGGTTTCATAGAAAATGGTTGGGAGGGAATGATAGACGGTTGGTACGGTTTCAGGCATCAAAATTCTGAGGGCACAGGACAAGCAGCAGATCTTAAAAGCACTCAAGCAGCCATCGACCAAATCAATGGGAAATTGAACAGGGTAATCGAGAAGACGAACGAGAAATTCCATCAAATCGAAAAGGAATTCTCAGAAGTAGAAGGGAGAATTCAGGACCTCGAGAAATACGTTGAAGACACTAAAATAGATCTCTGGTCTTACAATGCGGAGCTTCTTGTCGCTCTGGAGAATCAACATACAATTGACCTGACTGACTCGGAAATGAACAAGCTGTTTGAAAAAACAAGGAGGCAACTGAGGGAAAATGCTGAAGACATGGGCAATGGTTGCTTCAAAATATACCACAAATGTGACAACGCTTGCATAGAGTCAATCAGAAATGGGACTTATGACCATAATGTATACAGAGACGAAGCATTAAACAACCGGTTTCAGATCAAAGGTGTTGAACTGAAGTCTGGATACAAAGACTGGATCCTGTGGATTTCCTTTGCCATATCATGCTTTTTGCTTTGTGTTGTTTTGCTGGGGTTCATCATGTGGGCCTGCCAGAGAGGCAACATTAGGTGCAACATTTGCATTTGA"
]
for reassortant_seq in reassortant_seqs:
for v in self.viruses:
dist = distance(Seq(reassortant_seq), v)
if (dist < 0.02) and v.num_date>2005:
remove_viruses.append(v)
if self.verbose>1:
print "\tremoving", v.strain
self.viruses = MultipleSeqAlignment([v for v in self.viruses if v not in remove_viruses])
def clean_outliers(self):
"""Remove single outlying viruses"""
remove_viruses = []
outlier_strains = ["A/Sari/388/2006", "A/SaoPaulo/36178/2015", "A/Pennsylvania/40/2010", "A/Pennsylvania/14/2010", "A/Pennsylvania/09/2011", "A/OSAKA/31/2005", "A/Ohio/34/2012", "A/Kenya/170/2011", "A/Kenya/168/2011", "A/Indiana/21/2013", "A/Indiana/13/2012", "A/Indiana/11/2013", "A/Indiana/08/2012", "A/Indiana/06/2013", "A/India/6352/2012", "A/HuNan/01/2014", "A/Helsinki/942/2013", "A/Guam/AF2771/2011", "A/Chile/8266/2003", "A/Busan/15453/2009", "A/Nepal/142/2011", "A/Kenya/155/2011", "A/Guam/AF2771/2011"]
for outlier_strain in outlier_strains:
for v in self.viruses:
if (v.strain == outlier_strain):
remove_viruses.append(v)
if self.verbose > 1:
print "\tremoving", v.strain
self.viruses = MultipleSeqAlignment([v for v in self.viruses if v not in remove_viruses])
def clean(self):
self.clean_generic()
self.clean_outbreaks()
#print "Number of viruses after outbreak filtering:",len(self.viruses)
self.clean_reassortants()
print "Number of viruses after reassortant filtering:",len(self.viruses)
self.clean_outliers()
print "Number of viruses after outlier filtering:",len(self.viruses)
class H3N2_refine(tree_refine):
def __init__(self, **kwargs):
tree_refine.__init__(self, **kwargs)
self.epitope_mask = ""
if "epitope_masks_fname" in self.kwargs and "epitope_mask_version" in self.kwargs:
epitope_map = {}
with open(self.kwargs["epitope_masks_fname"]) as f:
for line in f:
(key, value) = line.split()
epitope_map[key] = value
if self.kwargs["epitope_mask_version"] in epitope_map:
self.epitope_mask = epitope_map[self.kwargs["epitope_mask_version"]]
self.epitope_mask = np.fromstring(self.epitope_mask, dtype='S1') # epitope_mask is numpy array
def refine(self):
self.refine_generic() # -> all nodes now have aa_seq, xvalue, yvalue, trunk, and basic virus properties
self.add_H3N2_attributes()
def epitope_sites(self, aa):
aaa = np.fromstring(aa, 'S1')
return ''.join(aaa[self.epitope_mask[:len(aa)] == '1'])
def nonepitope_sites(self, aa):
aaa = np.fromstring(aa, 'S1')
return ''.join(aaa[self.epitope_mask[:len(aa)] == '0'])
def receptor_binding_sites(self, aa):
'''
Receptor binding site mutations from Koel et al. 2014
These are (145, 155, 156, 158, 159, 189, 193) in canonical HA numbering
need to subtract one since python arrays start at 0
'''
sp = 16
rbs = map(lambda x:x+sp-1, [145, 155, 156, 158, 159, 189, 193])
return ''.join([aa[pos] for pos in rbs])
def get_total_peptide(self, node):
'''
the concatenation of signal peptide, HA1, HA1
'''
return node.aa_seq['SigPep']+node.aa_seq['HA1'] + node.aa_seq['HA2']
def epitope_distance(self, aaA, aaB):
"""Return distance of sequences aaA and aaB by comparing epitope sites"""
epA = self.epitope_sites(aaA)
epB = self.epitope_sites(aaB)
distance = sum(a != b for a, b in izip(epA, epB))
return distance
def nonepitope_distance(self, aaA, aaB):
"""Return distance of sequences aaA and aaB by comparing non-epitope sites"""
neA = self.nonepitope_sites(aaA)
neB = self.nonepitope_sites(aaB)
distance = sum(a != b for a, b in izip(neA, neB))
return distance
def receptor_binding_distance(self, aaA, aaB):
"""Return distance of sequences aaA and aaB by comparing receptor binding sites"""
neA = self.receptor_binding_sites(aaA)
neB = self.receptor_binding_sites(aaB)
distance = sum(a != b for a, b in izip(neA, neB))
return distance
def add_H3N2_attributes(self):
root = self.tree.seed_node
root_total_aa_seq = self.get_total_peptide(root)
for node in self.tree.postorder_node_iter():
total_aa_seq = self.get_total_peptide(node)
node.ep = self.epitope_distance(total_aa_seq, root_total_aa_seq)
node.ne = self.nonepitope_distance(total_aa_seq, root_total_aa_seq)
node.rb = self.receptor_binding_distance(total_aa_seq, root_total_aa_seq)
class H3N2_mutations(tree_mutations):
def __init__(self, **kwargs):
tree_mutations.__init__(self, **kwargs)
def mutations(self):
self.catalog_mutations()
class H3N2_stability(tree_stability):
def __init__(self, **kwargs):
tree_stability.__init__(self, **kwargs)
def stability(self):
self.calculate_stability()
class H3N2_HI(HI_tree):
def __init__(self, **kwargs):
HI_tree.__init__(self, **kwargs)
class H3N2_fitness(fitness_model):
def __init__(self, **kwargs):
if 'predictors' in self.kwargs:
predictor_input = self.kwargs['predictors']
fitness_model.__init__(self, predictor_input = predictor_input, **kwargs)
else:
fitness_model.__init__(self, **kwargs)
def annotate_fitness(self, estimate_frequencies = True):
self.predict(estimate_frequencies=estimate_frequencies)
class H3N2_process(process, H3N2_filter, H3N2_clean, H3N2_refine, H3N2_HI, H3N2_fitness, H3N2_mutations, H3N2_stability):
"""docstring for H3N2_process, H3N2_filter"""
def __init__(self,verbose = 0, force_include = None,
force_include_all = False, max_global= True, **kwargs):
self.force_include = force_include
self.force_include_all = force_include_all
self.max_global = max_global
process.__init__(self, **kwargs)
H3N2_filter.__init__(self,**kwargs)
H3N2_clean.__init__(self,**kwargs)
H3N2_refine.__init__(self,**kwargs)
H3N2_mutations.__init__(self, **kwargs)
H3N2_stability.__init__(self, **kwargs)
H3N2_HI.__init__(self,**kwargs)
H3N2_fitness.__init__(self,**kwargs)
self.verbose = verbose
def run(self, steps, viruses_per_month=50, raxml_time_limit = 1.0, lam_HI=2.0, lam_pot=0.3, lam_avi=2):
if 'filter' in steps:
print "--- Virus filtering at " + time.strftime("%H:%M:%S") + " ---"
self.filter()
if self.force_include is not None and os.path.isfile(self.force_include):
with open(self.force_include) as infile:
forced_strains = [fix_name(line.strip().split('\t')[0]).upper() for line in infile]
else:
forced_strains = []
self.subsample(viruses_per_month,
prioritize=forced_strains, all_priority=self.force_include_all,
region_specific = self.max_global)
self.add_older_vaccine_viruses(dt = 3)
self.dump()
else:
self.load()
if 'align' in steps:
self.align() # -> self.viruses is an alignment object
if 'clean' in steps:
print "--- Clean at " + time.strftime("%H:%M:%S") + " ---"
self.clean() # -> every node as a numerical date
self.dump()
if 'tree' in steps:
print "--- Tree infer at " + time.strftime("%H:%M:%S") + " ---"
self.infer_tree(raxml_time_limit) # -> self has a tree
self.dump()
if 'ancestral' in steps:
print "--- Infer ancestral sequences " + time.strftime("%H:%M:%S") + " ---"
self.infer_ancestral() # -> every node has a sequence
self.dump()
if 'refine' in steps:
print "--- Tree refine at " + time.strftime("%H:%M:%S") + " ---"
self.refine()
self.dump()
if 'frequencies' in steps:
print "--- Estimating frequencies at " + time.strftime("%H:%M:%S") + " ---"
self.determine_variable_positions()
self.estimate_frequencies(tasks = ["mutations", "tree"])
if 'genotype_frequencies' in steps:
self.estimate_frequencies(tasks = ["genotypes"])
self.dump()
method = 'nnl1reg'
if 'HI' in steps:
print "--- Adding HI titers to the tree " + time.strftime("%H:%M:%S") + " ---"
try:
self.determine_variable_positions()
self.map_HI(training_fraction=1.0, method = 'nnl1reg',
lam_HI=lam_HI, lam_avi=lam_avi, lam_pot=lam_pot, map_to_tree=True)
self.map_HI(training_fraction=1.0, method = 'nnl1reg', force_redo=True,
lam_HI=lam_HI, lam_avi=lam_avi, lam_pot=lam_pot, map_to_tree=False)
except:
print("HI modeling failed!")
#freqs = self.determine_HI_mutation_frequencies(threshold = 0.1)
#self.frequencies["mutations"]["global"].update(freqs)
self.dump()
if 'fitness' in steps:
print "--- Estimating fitnesses at " + time.strftime("%H:%M:%S") + " ---"
self.annotate_fitness()
self.dump()
if 'mutations' in steps:
print "--- Tree mutations at " + time.strftime("%H:%M:%S") + " ---"
self.mutations()
self.dump()
if 'stability' in steps:
print "--- Stability at " + time.strftime("%H:%M:%S") + " ---"
self.stability()
self.dump()
if 'export' in steps:
self.add_titers()
self.temporal_regional_statistics()
# exporting to json, including the H3N2 specific fields
self.export_to_auspice(tree_fields = [
'ep', 'ne', 'rb', 'aa_muts','accession','isolate_id', 'lab','db', 'country', 'dfreq', 'fitness', 'pred_distance',
'dHI', 'cHI', 'mHI', 'mean_HI_titers', 'HI_titers', 'HI_titers_raw', 'serum', 'HI_info',
'avidity_tree', 'avidity_mut', 'potency_mut', 'potency_tree', 'mean_potency_mut', 'mean_potency_tree', 'autologous_titers'],
annotations = ['3c2.a', '3c3.a', '3c3.b'])
if params.html:
self.generate_indexHTML()
self.export_HI_mutation_effects()
if 'HIvalidate' in steps:
from diagnostic_figures import tree_additivity_symmetry, fmts
print "--- generating validation figures " + time.strftime("%H:%M:%S") + " ---"
print "-- number of non-zero branch parameters: ",np.sum([n.dHI>1e-3 for n in self.tree.postorder_node_iter()])
print "-- number of non-zero mutation parameters: ",np.sum([val>1e-3 for val in self.mutation_effects.values()])
for model in ['tree', 'mutation']:
try:
tree_additivity_symmetry(self, model)
for fmt in fmts: plt.savefig(self.htmlpath()+'HI_symmetry_'+model+fmt)
except:
print("Can't generate symmetry/additivity figures")
try:
self.slopes_muts = slope_vs_mutation(self)
except:
print("Couldn't derive slopes, probably to small time interval")
self.generate_validation_figures(method)
if __name__=="__main__":
all_steps = ['filter', 'align', 'clean', 'tree', 'ancestral', 'refine',
'frequencies','HI', 'mutations', 'stability', 'export', 'HIvalidate']
from process import parser
import matplotlib.pyplot as plt
plt.ion()
params = parser.parse_args()
lt = time.localtime()
num_date = round(lt.tm_year+(lt.tm_yday-1.0)/365.0,2)
params.time_interval = (num_date-params.years_back, num_date)
if params.interval is not None and len(params.interval)==2 and params.interval[0]<params.interval[1]:
params.time_interval = (params.interval[0], params.interval[1])
dt= params.time_interval[1]-params.time_interval[0]
params.pivots_per_year = 12.0 if dt<5 else 6.0
steps = all_steps[all_steps.index(params.start):(all_steps.index(params.stop)+1)]
if params.skip is not None:
for tmp_step in params.skip:
if tmp_step in steps:
print "skipping",tmp_step
steps.remove(tmp_step)
# add all arguments to virus_config (possibly overriding)
virus_config.update(params.__dict__)
virus_config['serum_Kc'] = 0.003
# pass all these arguments to the processor: will be passed down as kwargs through all classes
myH3N2 = H3N2_process(**virus_config)
if params.test:
myH3N2.load()
else:
myH3N2.run(steps, viruses_per_month = virus_config['viruses_per_month'],
raxml_time_limit = virus_config['raxml_time_limit'],
lam_HI = virus_config['lam_HI'],
lam_avi = virus_config['lam_avi'],
lam_pot = virus_config['lam_pot'],
) | agpl-3.0 |
LiaoPan/scikit-learn | examples/ensemble/plot_partial_dependence.py | 249 | 4456 | """
========================
Partial Dependence Plots
========================
Partial dependence plots show the dependence between the target function [1]_
and a set of 'target' features, marginalizing over the
values of all other features (the complement features). Due to the limits
of human perception the size of the target feature set must be small (usually,
one or two) thus the target features are usually chosen among the most
important features
(see :attr:`~sklearn.ensemble.GradientBoostingRegressor.feature_importances_`).
This example shows how to obtain partial dependence plots from a
:class:`~sklearn.ensemble.GradientBoostingRegressor` trained on the California
housing dataset. The example is taken from [HTF2009]_.
The plot shows four one-way and one two-way partial dependence plots.
The target variables for the one-way PDP are:
median income (`MedInc`), avg. occupants per household (`AvgOccup`),
median house age (`HouseAge`), and avg. rooms per household (`AveRooms`).
We can clearly see that the median house price shows a linear relationship
with the median income (top left) and that the house price drops when the
avg. occupants per household increases (top middle).
The top right plot shows that the house age in a district does not have
a strong influence on the (median) house price; so does the average rooms
per household.
The tick marks on the x-axis represent the deciles of the feature values
in the training data.
Partial dependence plots with two target features enable us to visualize
interactions among them. The two-way partial dependence plot shows the
dependence of median house price on joint values of house age and avg.
occupants per household. We can clearly see an interaction between the
two features:
For an avg. occupancy greater than two, the house price is nearly independent
of the house age, whereas for values less than two there is a strong dependence
on age.
.. [HTF2009] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning Ed. 2", Springer, 2009.
.. [1] For classification you can think of it as the regression score before
the link function.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.datasets.california_housing import fetch_california_housing
# fetch California housing dataset
cal_housing = fetch_california_housing()
# split 80/20 train-test
X_train, X_test, y_train, y_test = train_test_split(cal_housing.data,
cal_housing.target,
test_size=0.2,
random_state=1)
names = cal_housing.feature_names
print('_' * 80)
print("Training GBRT...")
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,
learning_rate=0.1, loss='huber',
random_state=1)
clf.fit(X_train, y_train)
print("done.")
print('_' * 80)
print('Convenience plot with ``partial_dependence_plots``')
print
features = [0, 5, 1, 2, (5, 1)]
fig, axs = plot_partial_dependence(clf, X_train, features, feature_names=names,
n_jobs=3, grid_resolution=50)
fig.suptitle('Partial dependence of house value on nonlocation features\n'
'for the California housing dataset')
plt.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle
print('_' * 80)
print('Custom 3d plot via ``partial_dependence``')
print
fig = plt.figure()
target_feature = (1, 5)
pdp, (x_axis, y_axis) = partial_dependence(clf, target_feature,
X=X_train, grid_resolution=50)
XX, YY = np.meshgrid(x_axis, y_axis)
Z = pdp.T.reshape(XX.shape).T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu)
ax.set_xlabel(names[target_feature[0]])
ax.set_ylabel(names[target_feature[1]])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle('Partial dependence of house value on median age and '
'average occupancy')
plt.subplots_adjust(top=0.9)
plt.show()
| bsd-3-clause |
dfci/pancanmet_analysis | oldcode/mapallmetabolon_Jun012015.py | 1 | 10447 | # A script to map all metabolon data onto one consensus matrix
import os, sys, csv, numpy as np, scipy as sp, scipy.stats as st, matplotlib.pyplot as plt, pdb, pandas as pd
#####################################################################################
# Functions
def createnewentry( dictlist, current, nextentry, studymap, nextentryflag):
# We have a totally new metabolite, update accordingly
for item in range(len(dictlist)):
for currentitem in current[item].split('|'):
dictlist[item][ currentitem ] = nextentry
nextentryflag = 1
studymap.append(nextentry)
return dictlist, current, nextentry, studymap, nextentryflag
#####################################################################################
#studies = ['BLCA','BRCA','BRCATang','COAD','KICH','KIRC','LGG','OV','PAAD','PAADHussein1','PAADHussein2','PRAD', 'PRADLODA','STAD']
#studies = ['BLCA','BRCA','BRCATang','COAD','KIRC','LGG','OV','PAAD','PAADHussain1','PAADHussain2','PRAD', 'PRADLODA','STAD'] # No KICH
studies = ['BLCA','BRCA'] # No KICH
# Initialize dictionaries
pubchemdict = dict()
keggdict = dict()
hmdbdict = dict()
chebidict = dict()
namedict = dict()
dictlist = [namedict, pubchemdict, keggdict, hmdbdict, chebidict]
# Initialize the counter for which row in the data we are adding next
nextentry = 0
# Initialize storage for the final indices to map to
finalmap = []
##########################################################################################
# Part 1: Go through each study and try to merge metabolites
##########################################################################################
# For each study, open the final metabolon id's file, assign a unique row to each metabolite
studyctr = 0
errorctr = 0
for study in studies:
# Open the file
f = open( '../data/studies/' + study + '/' + study + '_FinalMetIDs.csv', 'rU' )
r = csv.reader( f, delimiter = ',' )
rowctr = 0
# The variable study map indicates to which row of the final dataset we will write the metabolites in the current dataset
studymap = []
# For each metabolite
for row in r:
# Set a flag indicating whether we have added a new metabolite
nextentryflag = 0
if rowctr == 0:
rowctr += 1
continue # Skip the header
# Get the relevant data
current = [row[0].title(),row[1],row[2],row[3],row[4]]
key2add = [ [],[],[],[],[]]
# Initialize an array storing the row each metabolite maps to
map = []
# For each dictionary
for item in range(len(dictlist)):
for currentitem in current[item].split('|'):
if currentitem in dictlist[item].keys() and currentitem != '' and currentitem != 'NA' and currentitem != 'nan':
# We have already mapped it, make sure it maps with all other keys
map.append( dictlist[item][currentitem] )
else:
# This item is not in the dictionary, but we could potentially add it
key2add[item].append( currentitem )
# Check if we have consensus using the mappings
if row[0] == 'allo-threonine' or row[0] == 'threonine':
pdb.set_trace()
if len(map) == 0:
# We have a totally new metabolite, update accordingly
# for item in range(len(dictlist)):
#
# for currentitem in current[item].split('|'):
#
# dictlist[item][ currentitem ] = nextentry
#
# nextentryflag = 1
# studymap.append(nextentry)
# Create a new entry
dictlist, current, nextentry, studymap, nextentryflag = createnewentry( dictlist, current, nextentry, studymap, nextentryflag)
else:
# Check for consensus and that this metabolite hasn't already been mapped in this study
if len( np.unique(map) ) == 1 and map[0] not in studymap:
# We have perfect consensus, update the studymap
studymap.append(map[0])
# Add all keys in key2add
for item in range(len(key2add)):
for key in key2add[item]:
dictlist[item][key] = map[0]
else:
invname = {v:k for k,v in namedict.items()}
print 'Error, no consensus in mapping for metabolite ' + current[0]+ ' in ' + study + '!'
nameoptions = [invname[item] for item in map]
print nameoptions
# Use name if possible
if current[0] in namedict.keys():
studymap.append( namedict[current[0]] )
print 'Using name to match ' + current[0] + '\n'
else:
print 'No consensus and not in namedict, requesting user input for ' + current[0] + '!\n'
usr_input = raw_input('Please enter index (first item is index 0) of metabolite name to use for ' + current[0] + ',given the following options, or type NEW to generate a new metabolite: \n' + '\n'.join( nameoptions ) + '\n' )
if usr_input != 'NEW':
studymap.append( map[int(usr_input)] )
######################################################################
# This is new as of May 29, 2015
else:
# We have a totally new metabolite, update accordingly
for item in range(len(dictlist)):
for currentitem in current[item].split('|'):
dictlist[item][ currentitem ] = nextentry
nextentryflag = 1
studymap.append(nextentry)
errorctr += 1
######################################################################
# Update nextentry
nextentry = nextentry + nextentryflag
# Update rowctr
rowctr += 1
# Update study counter
studyctr += 1
# Update finalmap
finalmap.append( studymap )
# Close the file
f.close()
#print(keggdict)
#pdb.set_trace()
##########################################################################################
# Part 2: Return to each study and actually merge using finalmap
##########################################################################################
# First, check that all names match appropriately
# Initialize data array
alldata = np.zeros([ nextentry, len(studies) ], dtype = 'S100' )
studyctr = 0
mettypedict = dict()
submettypedict = dict()
for study in studies:
# Open the file
f = open( '../data/studies/' + study + '/' + study + '_FinalMetIDs.csv', 'rU' )
r = csv.reader( f, delimiter = ',' )
rowctr = 0
# For each metabolite
for row in r:
if rowctr == 0:
rowctr += 1
continue
alldata[ finalmap[studyctr][rowctr-1], studyctr ] = row[0]
mettypedict[ row[0].title() ] = row[1]
submettypedict[ row[0].title() ] = row[2]
rowctr += 1
studyctr += 1
f.close()
# Print out
f = open('../data/merged_metabolomics/mergednames.csv', 'w')
w = csv.writer( f, delimiter = ',' )
w.writerow( studies )
for row in range(alldata.shape[0]):
w.writerow( alldata[row,:] )
f.close()
# Merge into metabolite names
metnames = []
for row in range(alldata.shape[0]):
notblank = [item for item in np.unique(alldata[row,:]) if item != '']
if len(np.unique(notblank)) == 1:
metnames.append( notblank[0].title() )
else:
print 'Not a unique metabolite name, using ' + notblank[0].title()
metnames.append( notblank[0].title() )
# Assuming everything looks good, merge all the data
studyctr = 0
studyrow= []
tumorrow = []
patnames = []
for study in studies:
# Open the file
f = open( '../data/metabolomics/' + study + '/' + study + '_metdata.csv', 'rU' )
r = csv.reader( f, delimiter = ',' )
rowctr = 0
# For each metabolite
for row in r:
if rowctr == 0:
# Store patient names
patnames_study = [':'.join( [study,row[item]] ) for item in np.arange(1,len(row)) ]
if rowctr == 1:
tumortype = ['Normal' if 'NORMAL' in item.upper() else item.title() for item in row[1:]]
if studyctr == 0:
# Initialize data array
alldata = np.zeros( ( nextentry, len(tumortype) ) )
else:
tempdata = np.zeros( ( nextentry, len(tumortype) ) )
if rowctr > 1:
if studyctr == 0:
alldata[ finalmap[studyctr][rowctr - 2], : ] = row[1:]
else:
tempdata[ finalmap[studyctr][rowctr - 2], : ] = row[1:]
rowctr += 1
# Stack data if necessary
if studyctr != 0:
alldata = np.hstack(( alldata, tempdata ))
# Add data to the study row
studyrow = studyrow + [study] * (len(row) - 1)
# Add data to the tumor row
tumorrow = tumorrow + tumortype
# Merge patnames with type of tissue and
patnames = patnames + [':'.join([patnames_study[item],tumortype[item]]) for item in range(len(patnames_study))]
studyctr += 1
# Convert into pandas dataframe
alldf = pd.DataFrame( alldata )
studydf = pd.DataFrame( studyrow )
studydf = studydf.transpose()
tumordf = pd.DataFrame( tumorrow )
studydf = studydf.append( tumordf.transpose() )
studydf = studydf.append( alldf )
alldf = studydf
alldf.index = ['Study','TissueType'] + metnames
# Add a column corresponding to metabolite type and metabolite subtype
mettypename = ['',''] + [mettypedict[item] for item in metnames]
submettypename = ['',''] + [submettypedict[item] for item in metnames]
alldf[ alldf == 0 ] = np.nan
alldf[ alldf == '' ] = np.nan
alldf[ alldf == 'nan' ] = np.nan
print alldf.shape
# Finally, if there are any duplicate names, get merge the rows
dupidx = np.setdiff1d(np.arange(len(alldf.index)), np.unique(alldf.index, return_index=True)[1])
dup = [alldf.index[item] for item in dupidx ]
dropidx = []
for name in dup:
idx = [item for item in range(len(alldf.index)) if alldf.index[item] == name]
print alldf.index[idx[0]],alldf.index[idx[1]]
# The last two columns are metabolite type names, so be careful
newdata = [alldf.ix[idx[0],item] if ~np.isnan(alldf.ix[idx[0],item]) else alldf.ix[idx[1],item] for item in range( alldf.shape[1]-2 ) ]
newdata = newdata + [alldf.ix[idx[0],alldf.shape[1]-2], alldf.ix[idx[0],alldf.shape[1]-1] ]
alldf.ix[idx[0],:] = newdata
dropidx.append( idx[1] )
# Drop duplicate rows
idx2keep = np.setdiff1d( range(alldf.shape[0]),dupidx )
alldf = alldf.ix[idx2keep,:]
# Add column names
alldf.columns = patnames
# Save to csv
alldf.to_csv( '../data/merged_metabolomics/alldata.csv', na_rep = 'NA')
##########################################################################################
# Part 3: Write out a list of IDs for each metabolite
##########################################################################################
invdictlist = []
for item in dictlist:
invdictlist.append( {k:v for v,k in item.items()} )
# Write a file with the ids
f = open('../data/merged_metabolomics/merged_IDs.csv', 'w')
w = csv.writer( f, delimiter = ',' )
for i in range(alldf.shape[0]):
w.writerow( [hotdict.get(i,'NA') for hotdict in invdictlist] )
f.close() | lgpl-3.0 |
keir-rex/zipline | zipline/finance/performance/tracker.py | 14 | 23349 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Performance Tracking
====================
+-----------------+----------------------------------------------------+
| key | value |
+=================+====================================================+
| period_start | The beginning of the period to be tracked. datetime|
| | in pytz.utc timezone. Will always be 0:00 on the |
| | date in UTC. The fact that the time may be on the |
| | prior day in the exchange's local time is ignored |
+-----------------+----------------------------------------------------+
| period_end | The end of the period to be tracked. datetime |
| | in pytz.utc timezone. Will always be 23:59 on the |
| | date in UTC. The fact that the time may be on the |
| | next day in the exchange's local time is ignored |
+-----------------+----------------------------------------------------+
| progress | percentage of test completed |
+-----------------+----------------------------------------------------+
| capital_base | The initial capital assumed for this tracker. |
+-----------------+----------------------------------------------------+
| cumulative_perf | A dictionary representing the cumulative |
| | performance through all the events delivered to |
| | this tracker. For details see the comments on |
| | :py:meth:`PerformancePeriod.to_dict` |
+-----------------+----------------------------------------------------+
| todays_perf | A dictionary representing the cumulative |
| | performance through all the events delivered to |
| | this tracker with datetime stamps between last_open|
| | and last_close. For details see the comments on |
| | :py:meth:`PerformancePeriod.to_dict` |
| | TODO: adding this because we calculate it. May be |
| | overkill. |
+-----------------+----------------------------------------------------+
| cumulative_risk | A dictionary representing the risk metrics |
| _metrics | calculated based on the positions aggregated |
| | through all the events delivered to this tracker. |
| | For details look at the comments for |
| | :py:meth:`zipline.finance.risk.RiskMetrics.to_dict`|
+-----------------+----------------------------------------------------+
"""
from __future__ import division
import logbook
import pickle
from six import iteritems
from datetime import datetime
import numpy as np
import pandas as pd
from pandas.tseries.tools import normalize_date
import zipline.finance.risk as risk
from zipline.finance.trading import TradingEnvironment
from . period import PerformancePeriod
from zipline.utils.serialization_utils import (
VERSION_LABEL
)
from . position_tracker import PositionTracker
log = logbook.Logger('Performance')
class PerformanceTracker(object):
"""
Tracks the performance of the algorithm.
"""
def __init__(self, sim_params):
self.sim_params = sim_params
env = TradingEnvironment.instance()
self.period_start = self.sim_params.period_start
self.period_end = self.sim_params.period_end
self.last_close = self.sim_params.last_close
first_open = self.sim_params.first_open.tz_convert(env.exchange_tz)
self.day = pd.Timestamp(datetime(first_open.year, first_open.month,
first_open.day), tz='UTC')
self.market_open, self.market_close = env.get_open_and_close(self.day)
self.total_days = self.sim_params.days_in_period
self.capital_base = self.sim_params.capital_base
self.emission_rate = sim_params.emission_rate
all_trading_days = env.trading_days
mask = ((all_trading_days >= normalize_date(self.period_start)) &
(all_trading_days <= normalize_date(self.period_end)))
self.trading_days = all_trading_days[mask]
self.dividend_frame = pd.DataFrame()
self._dividend_count = 0
self.position_tracker = PositionTracker()
self.perf_periods = []
if self.emission_rate == 'daily':
self.all_benchmark_returns = pd.Series(
index=self.trading_days)
self.cumulative_risk_metrics = \
risk.RiskMetricsCumulative(self.sim_params)
elif self.emission_rate == 'minute':
self.all_benchmark_returns = pd.Series(index=pd.date_range(
self.sim_params.first_open, self.sim_params.last_close,
freq='Min'))
self.cumulative_risk_metrics = \
risk.RiskMetricsCumulative(self.sim_params,
create_first_day_stats=True)
self.minute_performance = PerformancePeriod(
# initial cash is your capital base.
self.capital_base,
# the cumulative period will be calculated over the
# entire test.
self.period_start,
self.period_end,
# don't save the transactions for the cumulative
# period
keep_transactions=False,
keep_orders=False,
# don't serialize positions for cumualtive period
serialize_positions=False
)
self.minute_performance.position_tracker = self.position_tracker
self.perf_periods.append(self.minute_performance)
# this performance period will span the entire simulation from
# inception.
self.cumulative_performance = PerformancePeriod(
# initial cash is your capital base.
self.capital_base,
# the cumulative period will be calculated over the entire test.
self.period_start,
self.period_end,
# don't save the transactions for the cumulative
# period
keep_transactions=False,
keep_orders=False,
# don't serialize positions for cumualtive period
serialize_positions=False,
)
self.cumulative_performance.position_tracker = self.position_tracker
self.perf_periods.append(self.cumulative_performance)
# this performance period will span just the current market day
self.todays_performance = PerformancePeriod(
# initial cash is your capital base.
self.capital_base,
# the daily period will be calculated for the market day
self.market_open,
self.market_close,
keep_transactions=True,
keep_orders=True,
serialize_positions=True,
)
self.todays_performance.position_tracker = self.position_tracker
self.perf_periods.append(self.todays_performance)
self.saved_dt = self.period_start
# one indexed so that we reach 100%
self.day_count = 0.0
self.txn_count = 0
self.account_needs_update = True
self._account = None
def __repr__(self):
return "%s(%r)" % (
self.__class__.__name__,
{'simulation parameters': self.sim_params})
@property
def progress(self):
if self.emission_rate == 'minute':
# Fake a value
return 1.0
elif self.emission_rate == 'daily':
return self.day_count / self.total_days
def set_date(self, date):
if self.emission_rate == 'minute':
self.saved_dt = date
self.todays_performance.period_close = self.saved_dt
def update_dividends(self, new_dividends):
"""
Update our dividend frame with new dividends. @new_dividends should be
a DataFrame with columns containing at least the entries in
zipline.protocol.DIVIDEND_FIELDS.
"""
# Mark each new dividend with a unique integer id. This ensures that
# we can differentiate dividends whose date/sid fields are otherwise
# identical.
new_dividends['id'] = np.arange(
self._dividend_count,
self._dividend_count + len(new_dividends),
)
self._dividend_count += len(new_dividends)
self.dividend_frame = pd.concat(
[self.dividend_frame, new_dividends]
).sort(['pay_date', 'ex_date']).set_index('id', drop=False)
def initialize_dividends_from_other(self, other):
"""
Helper for copying dividends to a new PerformanceTracker while
preserving dividend count. Useful if a simulation needs to create a
new PerformanceTracker mid-stream and wants to preserve stored dividend
info.
Note that this does not copy unpaid dividends.
"""
self.dividend_frame = other.dividend_frame
self._dividend_count = other._dividend_count
def handle_sid_removed_from_universe(self, sid):
"""
This method handles any behaviors that must occur when a SID leaves the
universe of the TradingAlgorithm.
Parameters
__________
sid : int
The sid of the Asset being removed from the universe.
"""
# Drop any dividends for the sid from the dividends frame
self.dividend_frame = self.dividend_frame[
self.dividend_frame.sid != sid
]
def update_performance(self):
# calculate performance as of last trade
for perf_period in self.perf_periods:
perf_period.calculate_performance()
def get_portfolio(self, performance_needs_update):
if performance_needs_update:
self.update_performance()
self.account_needs_update = True
return self.cumulative_performance.as_portfolio()
def get_account(self, performance_needs_update):
if performance_needs_update:
self.update_performance()
self.account_needs_update = True
if self.account_needs_update:
self._update_account()
return self._account
def _update_account(self):
self._account = self.cumulative_performance.as_account()
self.account_needs_update = False
def to_dict(self, emission_type=None):
"""
Creates a dictionary representing the state of this tracker.
Returns a dict object of the form described in header comments.
"""
# Default to the emission rate of this tracker if no type is provided
if emission_type is None:
emission_type = self.emission_rate
_dict = {
'period_start': self.period_start,
'period_end': self.period_end,
'capital_base': self.capital_base,
'cumulative_perf': self.cumulative_performance.to_dict(),
'progress': self.progress,
'cumulative_risk_metrics': self.cumulative_risk_metrics.to_dict()
}
if emission_type == 'daily':
_dict['daily_perf'] = self.todays_performance.to_dict()
elif emission_type == 'minute':
_dict['minute_perf'] = self.todays_performance.to_dict(
self.saved_dt)
else:
raise ValueError("Invalid emission type: %s" % emission_type)
return _dict
def process_trade(self, event):
# update last sale, and pay out a cash adjustment
cash_adjustment = self.position_tracker.update_last_sale(event)
if cash_adjustment != 0:
for perf_period in self.perf_periods:
perf_period.handle_cash_payment(cash_adjustment)
def process_transaction(self, event):
self.txn_count += 1
self.position_tracker.execute_transaction(event)
for perf_period in self.perf_periods:
perf_period.handle_execution(event)
def process_dividend(self, dividend):
log.info("Ignoring DIVIDEND event.")
def process_split(self, event):
leftover_cash = self.position_tracker.handle_split(event)
if leftover_cash > 0:
for perf_period in self.perf_periods:
perf_period.handle_cash_payment(leftover_cash)
def process_order(self, event):
for perf_period in self.perf_periods:
perf_period.record_order(event)
def process_commission(self, event):
self.position_tracker.handle_commission(event)
for perf_period in self.perf_periods:
perf_period.handle_commission(event)
def process_benchmark(self, event):
if self.sim_params.data_frequency == 'minute' and \
self.sim_params.emission_rate == 'daily':
# Minute data benchmarks should have a timestamp of market
# close, so that calculations are triggered at the right time.
# However, risk module uses midnight as the 'day'
# marker for returns, so adjust back to midnight.
midnight = pd.tseries.tools.normalize_date(event.dt)
else:
midnight = event.dt
if midnight not in self.all_benchmark_returns.index:
raise AssertionError(
("Date %s not allocated in all_benchmark_returns. "
"Calendar seems to mismatch with benchmark. "
"Benchmark container is=%s" %
(midnight,
self.all_benchmark_returns.index)))
self.all_benchmark_returns[midnight] = event.returns
def process_close_position(self, event):
# CLOSE_POSITION events that contain prices that must be handled as
# a final trade event
if 'price' in event:
self.process_trade(event)
txn = self.position_tracker.\
maybe_create_close_position_transaction(event)
if txn:
self.process_transaction(txn)
def check_upcoming_dividends(self, next_trading_day):
"""
Check if we currently own any stocks with dividends whose ex_date is
the next trading day. Track how much we should be payed on those
dividends' pay dates.
Then check if we are owed cash/stock for any dividends whose pay date
is the next trading day. Apply all such benefits, then recalculate
performance.
"""
if len(self.dividend_frame) == 0:
# We don't currently know about any dividends for this simulation
# period, so bail.
return
# Dividends whose ex_date is the next trading day. We need to check if
# we own any of these stocks so we know to pay them out when the pay
# date comes.
ex_date_mask = (self.dividend_frame['ex_date'] == next_trading_day)
dividends_earnable = self.dividend_frame[ex_date_mask]
# Dividends whose pay date is the next trading day. If we held any of
# these stocks on midnight before the ex_date, we need to pay these out
# now.
pay_date_mask = (self.dividend_frame['pay_date'] == next_trading_day)
dividends_payable = self.dividend_frame[pay_date_mask]
position_tracker = self.position_tracker
if len(dividends_earnable):
position_tracker.earn_dividends(dividends_earnable)
if not len(dividends_payable):
return
net_cash_payment = position_tracker.pay_dividends(dividends_payable)
for period in self.perf_periods:
# notify periods to update their stats
period.handle_dividends_paid(net_cash_payment)
def check_asset_auto_closes(self, next_trading_day):
"""
Check if the position tracker currently owns any Assets with an
auto-close date that is the next trading day. Close those positions.
Parameters
----------
next_trading_day : pandas.Timestamp
The next trading day of the simulation
"""
auto_close_events = self.position_tracker.auto_close_position_events(
next_trading_day=next_trading_day
)
for event in auto_close_events:
self.process_close_position(event)
def handle_minute_close(self, dt):
"""
Handles the close of the given minute. This includes handling
market-close functions if the given minute is the end of the market
day.
Parameters
__________
dt : Timestamp
The minute that is ending
Returns
_______
(dict, dict/None)
A tuple of the minute perf packet and daily perf packet.
If the market day has not ended, the daily perf packet is None.
"""
self.update_performance()
todays_date = normalize_date(dt)
account = self.get_account(False)
self.minute_performance.rollover()
bench_returns = self.all_benchmark_returns.loc[todays_date:dt]
# cumulative returns
bench_since_open = (1. + bench_returns).prod() - 1
self.cumulative_risk_metrics.update(todays_date,
self.todays_performance.returns,
bench_since_open,
account)
minute_packet = self.to_dict(emission_type='minute')
# if this is the close, update dividends for the next day.
# Return the performance tuple
if dt == self.market_close:
return (minute_packet, self._handle_market_close(todays_date))
else:
return (minute_packet, None)
def handle_market_close_daily(self):
"""
Function called after handle_data when running with daily emission
rate.
"""
self.update_performance()
completed_date = self.day
account = self.get_account(False)
# update risk metrics for cumulative performance
self.cumulative_risk_metrics.update(
completed_date,
self.todays_performance.returns,
self.all_benchmark_returns[completed_date],
account)
return self._handle_market_close(completed_date)
def _handle_market_close(self, completed_date):
# increment the day counter before we move markers forward.
self.day_count += 1.0
# Get the next trading day and, if it is past the bounds of this
# simulation, return the daily perf packet
next_trading_day = TradingEnvironment.instance().\
next_trading_day(completed_date)
# Check if any assets need to be auto-closed before generating today's
# perf period
if next_trading_day:
self.check_asset_auto_closes(next_trading_day=next_trading_day)
# Take a snapshot of our current performance to return to the
# browser.
daily_update = self.to_dict(emission_type='daily')
# On the last day of the test, don't create tomorrow's performance
# period. We may not be able to find the next trading day if we're at
# the end of our historical data
if self.market_close >= self.last_close:
return daily_update
# move the market day markers forward
env = TradingEnvironment.instance()
self.market_open, self.market_close = \
env.next_open_and_close(self.day)
self.day = env.next_trading_day(self.day)
# Roll over positions to current day.
self.todays_performance.rollover()
self.todays_performance.period_open = self.market_open
self.todays_performance.period_close = self.market_close
# If the next trading day is irrelevant, then return the daily packet
if (next_trading_day is None) or (next_trading_day >= self.last_close):
return daily_update
# Check for any dividends and auto-closes, then return the daily perf
# packet
self.check_upcoming_dividends(next_trading_day=next_trading_day)
return daily_update
def handle_simulation_end(self):
"""
When the simulation is complete, run the full period risk report
and send it out on the results socket.
"""
log_msg = "Simulated {n} trading days out of {m}."
log.info(log_msg.format(n=int(self.day_count), m=self.total_days))
log.info("first open: {d}".format(
d=self.sim_params.first_open))
log.info("last close: {d}".format(
d=self.sim_params.last_close))
bms = pd.Series(
index=self.cumulative_risk_metrics.cont_index,
data=self.cumulative_risk_metrics.benchmark_returns_cont)
ars = pd.Series(
index=self.cumulative_risk_metrics.cont_index,
data=self.cumulative_risk_metrics.algorithm_returns_cont)
acl = self.cumulative_risk_metrics.algorithm_cumulative_leverages
self.risk_report = risk.RiskReport(
ars,
self.sim_params,
benchmark_returns=bms,
algorithm_leverages=acl)
risk_dict = self.risk_report.to_dict()
return risk_dict
def __getstate__(self):
state_dict = \
{k: v for k, v in iteritems(self.__dict__)
if not k.startswith('_')}
state_dict['dividend_frame'] = pickle.dumps(self.dividend_frame)
state_dict['_dividend_count'] = self._dividend_count
# we already store perf periods as attributes
del state_dict['perf_periods']
STATE_VERSION = 3
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 3
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("PerformanceTracker saved state is too old.")
self.__dict__.update(state)
# Handle the dividend frame specially
self.dividend_frame = pickle.loads(state['dividend_frame'])
# properly setup the perf periods
self.perf_periods = []
p_types = ['cumulative', 'todays', 'minute']
for p_type in p_types:
name = p_type + '_performance'
period = getattr(self, name, None)
if period is None:
continue
period._position_tracker = self.position_tracker
self.perf_periods.append(period)
| apache-2.0 |
IndraVikas/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 162 | 9771 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold._utils import _binary_search_perplexity
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
fun = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[0]
grad = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
for init in ('random', 'pca'):
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
init=init, random_state=0)
X_embedded = tsne.fit_transform(X)
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0)
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
metric="precomputed", random_state=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
# 'init' must be 'pca' or 'random'.
assert_raises_regexp(ValueError, "'init' must be either 'pca' or 'random'",
TSNE, init="not available")
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_verbose():
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
| bsd-3-clause |
msmbuilder/msmbuilder | msmbuilder/tests/test_agglomerative.py | 6 | 4311 | import numpy as np
from mdtraj.testing import eq
from sklearn.base import clone
from sklearn.metrics import adjusted_rand_score
from msmbuilder.cluster import LandmarkAgglomerative
from msmbuilder.example_datasets import AlanineDipeptide
random = np.random.RandomState(2)
def test_1():
x = [random.randn(10, 2), random.randn(10, 2)]
n_clusters = 2
model1 = LandmarkAgglomerative(n_clusters=n_clusters)
model2 = LandmarkAgglomerative(n_clusters=n_clusters,
n_landmarks=sum(len(s) for s in x))
labels0 = clone(model1).fit(x).predict(x)
labels1 = model1.fit_predict(x)
labels2 = model2.fit_predict(x)
assert len(labels0) == 2
assert len(labels1) == 2
assert len(labels2) == 2
eq(labels0[0], labels1[0])
eq(labels0[1], labels1[1])
eq(labels0[0], labels2[0])
eq(labels0[1], labels2[1])
assert len(np.unique(np.concatenate(labels0))) == n_clusters
def test_2():
# this should be a really easy clustering problem
x = [random.randn(20, 2) + 10, random.randn(20, 2)]
n_clusters = 2
model1 = LandmarkAgglomerative(n_clusters=n_clusters)
model2 = LandmarkAgglomerative(n_clusters=n_clusters,
landmark_strategy='random',
random_state=random, n_landmarks=20)
labels1 = model1.fit_predict(x)
labels2 = model2.fit_predict(x)
assert adjusted_rand_score(np.concatenate(labels1),
np.concatenate(labels2)) == 1.0
def test_callable_metric():
def my_euc(target, ref, i):
return np.sqrt(np.sum((target - ref[i]) ** 2, axis=1))
model1 = LandmarkAgglomerative(n_clusters=10, n_landmarks=20,
metric='euclidean')
model2 = LandmarkAgglomerative(n_clusters=10, n_landmarks=20, metric=my_euc)
data = np.random.RandomState(0).randn(100, 2)
eq(model1.fit_predict([data])[0], model2.fit_predict([data])[0])
def test_1_ward():
x = [random.randn(10, 2), random.randn(10, 2)]
n_clusters = 2
model1 = LandmarkAgglomerative(n_clusters=n_clusters,
linkage='ward')
model2 = LandmarkAgglomerative(n_clusters=n_clusters,
linkage='ward',
n_landmarks=sum(len(s) for s in x))
labels0 = clone(model1).fit(x).predict(x)
labels1 = model1.fit_predict(x)
labels2 = model2.fit_predict(x)
assert len(labels0) == 2
assert len(labels1) == 2
assert len(labels2) == 2
eq(labels0[0], labels1[0])
eq(labels0[1], labels1[1])
eq(labels0[0], labels2[0])
eq(labels0[1], labels2[1])
assert len(np.unique(np.concatenate(labels0))) == n_clusters
def test_2_ward():
# this should be a really easy clustering problem
x = [random.randn(20, 2) + 10, random.randn(20, 2)]
n_clusters = 2
model1 = LandmarkAgglomerative(n_clusters=n_clusters,
linkage='ward')
model2 = LandmarkAgglomerative(n_clusters=n_clusters,
linkage='ward',
landmark_strategy='random',
random_state=random, n_landmarks=20)
labels1 = model1.fit_predict(x)
labels2 = model2.fit_predict(x)
assert adjusted_rand_score(np.concatenate(labels1),
np.concatenate(labels2)) == 1.0
def test_alanine_dipeptide():
# test for rmsd metric compatibility with ward clustering
# keep n_landmarks small or this will get really slow
trajectories = AlanineDipeptide().get_cached().trajectories
n_clusters = 4
model = LandmarkAgglomerative(n_clusters=n_clusters, n_landmarks=20,
linkage='ward', metric='rmsd')
labels = model.fit_predict(trajectories[0][0:100])
assert len(np.unique(np.concatenate(labels))) <= n_clusters
def test_cluster_centers():
x = [random.randn(20, 2) + 10, random.randn(20, 2)]
n_clusters = np.random.randint(2, 7)
model = LandmarkAgglomerative(n_clusters=n_clusters,
linkage='ward')
labels = model.fit_predict(x)
print(model.cluster_centers_)
assert model.cluster_centers_.shape == (n_clusters, 2)
| lgpl-2.1 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/stats/fama_macbeth.py | 7 | 7274 | from pandas.core.base import StringMixin
from pandas.compat import StringIO, range
import numpy as np
from pandas.core.api import Series, DataFrame
import pandas.stats.common as common
from pandas.util.decorators import cache_readonly
# flake8: noqa
def fama_macbeth(**kwargs):
"""Runs Fama-MacBeth regression.
Parameters
----------
Takes the same arguments as a panel OLS, in addition to:
nw_lags_beta: int
Newey-West adjusts the betas by the given lags
"""
window_type = kwargs.get('window_type')
if window_type is None:
klass = FamaMacBeth
else:
klass = MovingFamaMacBeth
return klass(**kwargs)
class FamaMacBeth(StringMixin):
def __init__(self, y, x, intercept=True, nw_lags=None,
nw_lags_beta=None,
entity_effects=False, time_effects=False, x_effects=None,
cluster=None, dropped_dummies=None, verbose=False):
import warnings
warnings.warn("The pandas.stats.fama_macbeth module is deprecated and will be "
"removed in a future version. We refer to external packages "
"like statsmodels, see here: "
"http://www.statsmodels.org/stable/index.html",
FutureWarning, stacklevel=4)
if dropped_dummies is None:
dropped_dummies = {}
self._nw_lags_beta = nw_lags_beta
from pandas.stats.plm import MovingPanelOLS
self._ols_result = MovingPanelOLS(
y=y, x=x, window_type='rolling', window=1,
intercept=intercept,
nw_lags=nw_lags, entity_effects=entity_effects,
time_effects=time_effects, x_effects=x_effects, cluster=cluster,
dropped_dummies=dropped_dummies, verbose=verbose)
self._cols = self._ols_result._x.columns
@cache_readonly
def _beta_raw(self):
return self._ols_result._beta_raw
@cache_readonly
def _stats(self):
return _calc_t_stat(self._beta_raw, self._nw_lags_beta)
@cache_readonly
def _mean_beta_raw(self):
return self._stats[0]
@cache_readonly
def _std_beta_raw(self):
return self._stats[1]
@cache_readonly
def _t_stat_raw(self):
return self._stats[2]
def _make_result(self, result):
return Series(result, index=self._cols)
@cache_readonly
def mean_beta(self):
return self._make_result(self._mean_beta_raw)
@cache_readonly
def std_beta(self):
return self._make_result(self._std_beta_raw)
@cache_readonly
def t_stat(self):
return self._make_result(self._t_stat_raw)
@cache_readonly
def _results(self):
return {
'mean_beta': self._mean_beta_raw,
'std_beta': self._std_beta_raw,
't_stat': self._t_stat_raw,
}
@cache_readonly
def _coef_table(self):
buffer = StringIO()
buffer.write('%13s %13s %13s %13s %13s %13s\n' %
('Variable', 'Beta', 'Std Err', 't-stat', 'CI 2.5%', 'CI 97.5%'))
template = '%13s %13.4f %13.4f %13.2f %13.4f %13.4f\n'
for i, name in enumerate(self._cols):
if i and not (i % 5):
buffer.write('\n' + common.banner(''))
mean_beta = self._results['mean_beta'][i]
std_beta = self._results['std_beta'][i]
t_stat = self._results['t_stat'][i]
ci1 = mean_beta - 1.96 * std_beta
ci2 = mean_beta + 1.96 * std_beta
values = '(%s)' % name, mean_beta, std_beta, t_stat, ci1, ci2
buffer.write(template % values)
if self._nw_lags_beta is not None:
buffer.write('\n')
buffer.write('*** The Std Err, t-stat are Newey-West '
'adjusted with Lags %5d\n' % self._nw_lags_beta)
return buffer.getvalue()
def __unicode__(self):
return self.summary
@cache_readonly
def summary(self):
template = """
----------------------Summary of Fama-MacBeth Analysis-------------------------
Formula: Y ~ %(formulaRHS)s
# betas : %(nu)3d
----------------------Summary of Estimated Coefficients------------------------
%(coefTable)s
--------------------------------End of Summary---------------------------------
"""
params = {
'formulaRHS': ' + '.join(self._cols),
'nu': len(self._beta_raw),
'coefTable': self._coef_table,
}
return template % params
class MovingFamaMacBeth(FamaMacBeth):
def __init__(self, y, x, window_type='rolling', window=10,
intercept=True, nw_lags=None, nw_lags_beta=None,
entity_effects=False, time_effects=False, x_effects=None,
cluster=None, dropped_dummies=None, verbose=False):
if dropped_dummies is None:
dropped_dummies = {}
self._window_type = common._get_window_type(window_type)
self._window = window
FamaMacBeth.__init__(
self, y=y, x=x, intercept=intercept,
nw_lags=nw_lags, nw_lags_beta=nw_lags_beta,
entity_effects=entity_effects, time_effects=time_effects,
x_effects=x_effects, cluster=cluster,
dropped_dummies=dropped_dummies, verbose=verbose)
self._index = self._ols_result._index
self._T = len(self._index)
@property
def _is_rolling(self):
return self._window_type == 'rolling'
def _calc_stats(self):
mean_betas = []
std_betas = []
t_stats = []
# XXX
mask = self._ols_result._rolling_ols_call[2]
obs_total = mask.astype(int).cumsum()
start = self._window - 1
betas = self._beta_raw
for i in range(start, self._T):
if self._is_rolling:
begin = i - start
else:
begin = 0
B = betas[max(obs_total[begin] - 1, 0): obs_total[i]]
mean_beta, std_beta, t_stat = _calc_t_stat(B, self._nw_lags_beta)
mean_betas.append(mean_beta)
std_betas.append(std_beta)
t_stats.append(t_stat)
return np.array([mean_betas, std_betas, t_stats])
_stats = cache_readonly(_calc_stats)
def _make_result(self, result):
return DataFrame(result, index=self._result_index, columns=self._cols)
@cache_readonly
def _result_index(self):
mask = self._ols_result._rolling_ols_call[2]
# HACK XXX
return self._index[mask.cumsum() >= self._window]
@cache_readonly
def _results(self):
return {
'mean_beta': self._mean_beta_raw[-1],
'std_beta': self._std_beta_raw[-1],
't_stat': self._t_stat_raw[-1],
}
def _calc_t_stat(beta, nw_lags_beta):
N = len(beta)
B = beta - beta.mean(0)
C = np.dot(B.T, B) / N
if nw_lags_beta is not None:
for i in range(nw_lags_beta + 1):
cov = np.dot(B[i:].T, B[:(N - i)]) / N
weight = i / (nw_lags_beta + 1)
C += 2 * (1 - weight) * cov
mean_beta = beta.mean(0)
std_beta = np.sqrt(np.diag(C)) / np.sqrt(N)
t_stat = mean_beta / std_beta
return mean_beta, std_beta, t_stat
| gpl-3.0 |
psci2195/espresso-ffans | testsuite/scripts/test_importlib_wrapper.py | 1 | 9916 | # Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import importlib_wrapper as iw
import sys
class importlib_wrapper(ut.TestCase):
def test_substitute_variable_values(self):
str_inp = "n_steps=5000\nnsteps == 5\n"
str_exp = "n_steps = 10; _n_steps__original=5000\nnsteps == 5\n"
str_out = iw.substitute_variable_values(str_inp, n_steps=10)
self.assertEqual(str_out, str_exp)
str_out = iw.substitute_variable_values(str_inp, n_steps='10',
strings_as_is=True)
self.assertEqual(str_out, str_exp)
str_inp = "N=5000\nnsteps == 5\n"
str_exp = "N = 10\nnsteps == 5\n"
str_out = iw.substitute_variable_values(str_inp, N=10, keep_original=0)
self.assertEqual(str_out, str_exp)
# test exceptions
str_inp = "n_steps=5000\nnsteps == 5\n"
self.assertRaises(AssertionError, iw.substitute_variable_values,
str_inp, other_var=10)
str_inp = "other_var == 5\n"
self.assertRaises(AssertionError, iw.substitute_variable_values,
str_inp, other_var=10)
def test_set_cmd(self):
original_sys_argv = list(sys.argv)
sys.argv = [0, "test"]
# test substitutions
str_inp = "import sys\nimport argparse"
str_exp = "import sys\nsys.argv = ['a.py', '1', '2']\nimport argparse"
str_out, sys_argv = iw.set_cmd(str_inp, "a.py", (1, 2))
self.assertEqual(str_out, str_exp)
self.assertEqual(sys_argv, [0, "test"])
str_inp = "import argparse"
str_exp = "import argparse\nimport sys\nsys.argv = ['a.py', '1', '2']"
str_out, sys_argv = iw.set_cmd(str_inp, "a.py", ["1", 2])
self.assertEqual(str_out, str_exp)
self.assertEqual(sys_argv, [0, "test"])
# test exceptions
str_inp = "import re"
self.assertRaises(AssertionError, iw.set_cmd, str_inp, "a.py", (1, 2))
# restore sys.argv
sys.argv = original_sys_argv
def test_disable_matplotlib_gui(self):
str_inp = "\nimport matplotlib as mp\nmp.use('PS')\n"
str_exp = ("\nimport matplotlib as _mpl;_mpl.use('Agg');"
"import matplotlib as mp\n\n")
str_out = iw.disable_matplotlib_gui(str_inp)
self.assertEqual(str_out, str_exp)
str_inp = "\nimport matplotlib.pyplot as plt\nplt.ion()\n"
str_exp = ("\nimport matplotlib as _mpl;_mpl.use('Agg');"
"import matplotlib.pyplot as plt\nplt.ioff()\n")
str_out = iw.disable_matplotlib_gui(str_inp)
self.assertEqual(str_out, str_exp)
def test_set_random_seeds(self):
# ESPResSo seed
str_es_sys = "system = espressomd.System(box_l=[box_l] * 3)\n"
str_inp = str_es_sys + "system.set_random_state_PRNG()"
str_exp = str_es_sys + "system.set_random_state_PRNG()"
str_out = iw.set_random_seeds(str_inp)
self.assertEqual(str_out, str_exp)
str_inp = str_es_sys + "system.random_number_generator_state = 7 * [0]"
str_exp = str_es_sys + "system.set_random_state_PRNG();" + \
" _random_seed_es__original = 7 * [0]"
str_out = iw.set_random_seeds(str_inp)
self.assertEqual(str_out, str_exp)
str_inp = str_es_sys + "system.seed = 42"
str_exp = str_es_sys + "system.set_random_state_PRNG();" + \
" _random_seed_es__original = 42"
str_out = iw.set_random_seeds(str_inp)
self.assertEqual(str_out, str_exp)
# NumPy seed
str_lambda = "(lambda *args, **kwargs: None)"
str_inp = "\nnp.random.seed(seed=system.seed)"
str_exp = "\n_random_seed_np = " + str_lambda + "(seed=system.seed)"
str_out = iw.set_random_seeds(str_inp)
self.assertEqual(str_out, str_exp)
str_inp = "\nnumpy.random.seed(42)"
str_exp = "\n_random_seed_np = " + str_lambda + "(42)"
str_out = iw.set_random_seeds(str_inp)
self.assertEqual(str_out, str_exp)
def test_mock_es_visualization(self):
statement = "import espressomd.visualization"
expected = """
try:
import espressomd.visualization
if hasattr(espressomd.visualization.mayaviLive, 'deferred_ImportError') or \\
hasattr(espressomd.visualization.openGLLive, 'deferred_ImportError'):
raise ImportError()
except ImportError:
from unittest.mock import MagicMock
import espressomd
espressomd.visualization = MagicMock()
"""
self.assertEqual(iw.mock_es_visualization(statement), expected[1:])
statement = "import espressomd.visualization as test"
expected = """
try:
import espressomd.visualization as test
if hasattr(test.mayaviLive, 'deferred_ImportError') or \\
hasattr(test.openGLLive, 'deferred_ImportError'):
raise ImportError()
except ImportError:
from unittest.mock import MagicMock
import espressomd
test = MagicMock()
"""
self.assertEqual(iw.mock_es_visualization(statement), expected[1:])
statement = "from espressomd import visualization"
expected = """
try:
from espressomd import visualization
if hasattr(visualization.mayaviLive, 'deferred_ImportError') or \\
hasattr(visualization.openGLLive, 'deferred_ImportError'):
raise ImportError()
except ImportError:
from unittest.mock import MagicMock
import espressomd
visualization = MagicMock()
"""
self.assertEqual(iw.mock_es_visualization(statement), expected[1:])
statement = "from espressomd import visualization as test"
expected = """
try:
from espressomd import visualization as test
if hasattr(test.mayaviLive, 'deferred_ImportError') or \\
hasattr(test.openGLLive, 'deferred_ImportError'):
raise ImportError()
except ImportError:
from unittest.mock import MagicMock
import espressomd
test = MagicMock()
"""
self.assertEqual(iw.mock_es_visualization(statement), expected[1:])
statement = "from espressomd import visualization_mayavi"
expected = """
try:
from espressomd import visualization_mayavi
except ImportError:
from unittest.mock import MagicMock
import espressomd
visualization_mayavi = MagicMock()
"""
self.assertEqual(iw.mock_es_visualization(statement), expected[1:])
statement = "from espressomd import visualization_mayavi as test"
expected = """
try:
from espressomd import visualization_mayavi as test
except ImportError:
from unittest.mock import MagicMock
import espressomd
test = MagicMock()
"""
self.assertEqual(iw.mock_es_visualization(statement), expected[1:])
statement = "from espressomd.visualization_mayavi import mayaviLive"
expected = """
try:
from espressomd.visualization_mayavi import mayaviLive
except ImportError:
from unittest.mock import MagicMock
import espressomd
mayaviLive = MagicMock()
"""
self.assertEqual(iw.mock_es_visualization(statement), expected[1:])
statement = "from espressomd.visualization_mayavi import mayaviLive as test"
expected = """
try:
from espressomd.visualization_mayavi import mayaviLive as test
except ImportError:
from unittest.mock import MagicMock
import espressomd
test = MagicMock()
"""
self.assertEqual(iw.mock_es_visualization(statement), expected[1:])
statement = "from espressomd.visualization_mayavi import a as b, c"
expected = """
try:
from espressomd.visualization_mayavi import a as b
except ImportError:
from unittest.mock import MagicMock
import espressomd
b = MagicMock()
try:
from espressomd.visualization_mayavi import c
except ImportError:
from unittest.mock import MagicMock
import espressomd
c = MagicMock()
"""
self.assertEqual(iw.mock_es_visualization(statement), expected[1:])
statement = "from espressomd.visualization import openGLLive"
expected = """
try:
from espressomd.visualization import openGLLive
if hasattr(openGLLive, 'deferred_ImportError'):
raise openGLLive.deferred_ImportError
except ImportError:
from unittest.mock import MagicMock
import espressomd
openGLLive = MagicMock()
"""
self.assertEqual(iw.mock_es_visualization(statement), expected[1:])
statement = "from espressomd.visualization import openGLLive as test"
expected = """
try:
from espressomd.visualization import openGLLive as test
if hasattr(test, 'deferred_ImportError'):
raise test.deferred_ImportError
except ImportError:
from unittest.mock import MagicMock
import espressomd
test = MagicMock()
"""
self.assertEqual(iw.mock_es_visualization(statement), expected[1:])
# test exceptions
statements_without_namespace = [
"from espressomd.visualization import *",
"from espressomd.visualization_opengl import *",
"from espressomd.visualization_mayavi import *"
]
for s in statements_without_namespace:
self.assertRaises(AssertionError, iw.mock_es_visualization, s)
if __name__ == "__main__":
ut.main()
| gpl-3.0 |
cybernet14/scikit-learn | sklearn/naive_bayes.py | 70 | 28476 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <vincent.michel@inria.fr>
# Minor fixes by Fabian Pedregosa
# Amit Aides <amitibo@tx.technion.ac.il>
# Yehuda Finkelstein <yehudaf@tx.technion.ac.il>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .utils.fixes import in1d
from .utils.validation import check_is_fitted
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit: bool
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
epsilon = 1e-9
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_prior_ = np.zeros(n_classes)
self.class_count_ = np.zeros(n_classes)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= epsilon
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += epsilon
self.class_prior_[:] = self.class_count_ / np.sum(self.class_count_)
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_)
- np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes]
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T)
+ self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,]
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
| bsd-3-clause |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/scipy/interpolate/interpolate.py | 14 | 104866 | """ Classes for interpolating values.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['interp1d', 'interp2d', 'spline', 'spleval', 'splmake', 'spltopp',
'ppform', 'lagrange', 'PPoly', 'BPoly', 'NdPPoly',
'RegularGridInterpolator', 'interpn']
import itertools
from numpy import (shape, sometrue, array, transpose, searchsorted,
ones, logical_or, atleast_1d, atleast_2d, ravel,
dot, poly1d, asarray, intp)
import numpy as np
import scipy.linalg
import scipy.special as spec
from scipy.special import comb
import math
import warnings
import functools
import operator
from scipy._lib.six import xrange, integer_types, string_types
from . import fitpack
from . import dfitpack
from . import _fitpack
from .polyint import _Interpolator1D
from . import _ppoly
from .fitpack2 import RectBivariateSpline
from .interpnd import _ndim_coords_from_arrays
def reduce_sometrue(a):
all = a
while len(shape(all)) > 1:
all = sometrue(all, axis=0)
return all
def prod(x):
"""Product of a list of numbers; ~40x faster vs np.prod for Python tuples"""
if len(x) == 0:
return 1
return functools.reduce(operator.mul, x)
def lagrange(x, w):
"""
Return a Lagrange interpolating polynomial.
Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating
polynomial through the points ``(x, w)``.
Warning: This implementation is numerically unstable. Do not expect to
be able to use more than about 20 points even if they are chosen optimally.
Parameters
----------
x : array_like
`x` represents the x-coordinates of a set of datapoints.
w : array_like
`w` represents the y-coordinates of a set of datapoints, i.e. f(`x`).
Returns
-------
lagrange : numpy.poly1d instance
The Lagrange interpolating polynomial.
"""
M = len(x)
p = poly1d(0.0)
for j in xrange(M):
pt = poly1d(w[j])
for k in xrange(M):
if k == j:
continue
fac = x[j]-x[k]
pt *= poly1d([1.0, -x[k]])/fac
p += pt
return p
# !! Need to find argument for keeping initialize. If it isn't
# !! found, get rid of it!
class interp2d(object):
"""
interp2d(x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=nan)
Interpolate over a 2-D grid.
`x`, `y` and `z` are arrays of values used to approximate some function
f: ``z = f(x, y)``. This class returns a function whose call method uses
spline interpolation to find the value of new points.
If `x` and `y` represent a regular grid, consider using
RectBivariateSpline.
Methods
-------
__call__
Parameters
----------
x, y : array_like
Arrays defining the data point coordinates.
If the points lie on a regular grid, `x` can specify the column
coordinates and `y` the row coordinates, for example::
>>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]]
Otherwise, `x` and `y` must specify the full coordinates for each
point, for example::
>>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,2,3,4,5,6]
If `x` and `y` are multi-dimensional, they are flattened before use.
z : array_like
The values of the function to interpolate at the data points. If
`z` is a multi-dimensional array, it is flattened before use. The
length of a flattened `z` array is either
len(`x`)*len(`y`) if `x` and `y` specify the column and row coordinates
or ``len(z) == len(x) == len(y)`` if `x` and `y` specify coordinates
for each point.
kind : {'linear', 'cubic', 'quintic'}, optional
The kind of spline interpolation to use. Default is 'linear'.
copy : bool, optional
If True, the class makes internal copies of x, y and z.
If False, references may be used. The default is to copy.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data (x,y), a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If omitted (None), values outside
the domain are extrapolated.
See Also
--------
RectBivariateSpline :
Much faster 2D interpolation if your input data is on a grid
bisplrep, bisplev :
Spline interpolation based on FITPACK
BivariateSpline : a more recent wrapper of the FITPACK routines
interp1d : one dimension version of this function
Notes
-----
The minimum number of data points required along the interpolation
axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for
quintic interpolation.
The interpolator is constructed by `bisplrep`, with a smoothing factor
of 0. If more control over smoothing is needed, `bisplrep` should be
used directly.
Examples
--------
Construct a 2-D grid and interpolate on it:
>>> from scipy import interpolate
>>> x = np.arange(-5.01, 5.01, 0.25)
>>> y = np.arange(-5.01, 5.01, 0.25)
>>> xx, yy = np.meshgrid(x, y)
>>> z = np.sin(xx**2+yy**2)
>>> f = interpolate.interp2d(x, y, z, kind='cubic')
Now use the obtained interpolation function and plot the result:
>>> import matplotlib.pyplot as plt
>>> xnew = np.arange(-5.01, 5.01, 1e-2)
>>> ynew = np.arange(-5.01, 5.01, 1e-2)
>>> znew = f(xnew, ynew)
>>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-')
>>> plt.show()
"""
def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=None):
x = ravel(x)
y = ravel(y)
z = asarray(z)
rectangular_grid = (z.size == len(x) * len(y))
if rectangular_grid:
if z.ndim == 2:
if z.shape != (len(y), len(x)):
raise ValueError("When on a regular grid with x.size = m "
"and y.size = n, if z.ndim == 2, then z "
"must have shape (n, m)")
if not np.all(x[1:] >= x[:-1]):
j = np.argsort(x)
x = x[j]
z = z[:, j]
if not np.all(y[1:] >= y[:-1]):
j = np.argsort(y)
y = y[j]
z = z[j, :]
z = ravel(z.T)
else:
z = ravel(z)
if len(x) != len(y):
raise ValueError(
"x and y must have equal lengths for non rectangular grid")
if len(z) != len(x):
raise ValueError(
"Invalid length for input z for non rectangular grid")
try:
kx = ky = {'linear': 1,
'cubic': 3,
'quintic': 5}[kind]
except KeyError:
raise ValueError("Unsupported interpolation type.")
if not rectangular_grid:
# TODO: surfit is really not meant for interpolation!
self.tck = fitpack.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0)
else:
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(
x, y, z, None, None, None, None,
kx=kx, ky=ky, s=0.0)
self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)],
kx, ky)
self.bounds_error = bounds_error
self.fill_value = fill_value
self.x, self.y, self.z = [array(a, copy=copy) for a in (x, y, z)]
self.x_min, self.x_max = np.amin(x), np.amax(x)
self.y_min, self.y_max = np.amin(y), np.amax(y)
def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
"""Interpolate the function.
Parameters
----------
x : 1D array
x-coordinates of the mesh on which to interpolate.
y : 1D array
y-coordinates of the mesh on which to interpolate.
dx : int >= 0, < kx
Order of partial derivatives in x.
dy : int >= 0, < ky
Order of partial derivatives in y.
assume_sorted : bool, optional
If False, values of `x` and `y` can be in any order and they are
sorted first.
If True, `x` and `y` have to be arrays of monotonically
increasing values.
Returns
-------
z : 2D array with shape (len(y), len(x))
The interpolated values.
"""
x = atleast_1d(x)
y = atleast_1d(y)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y should both be 1-D arrays")
if not assume_sorted:
x = np.sort(x)
y = np.sort(y)
if self.bounds_error or self.fill_value is not None:
out_of_bounds_x = (x < self.x_min) | (x > self.x_max)
out_of_bounds_y = (y < self.y_min) | (y > self.y_max)
any_out_of_bounds_x = np.any(out_of_bounds_x)
any_out_of_bounds_y = np.any(out_of_bounds_y)
if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y):
raise ValueError("Values out of range; x must be in %r, y in %r"
% ((self.x_min, self.x_max),
(self.y_min, self.y_max)))
z = fitpack.bisplev(x, y, self.tck, dx, dy)
z = atleast_2d(z)
z = transpose(z)
if self.fill_value is not None:
if any_out_of_bounds_x:
z[:, out_of_bounds_x] = self.fill_value
if any_out_of_bounds_y:
z[out_of_bounds_y, :] = self.fill_value
if len(z) == 1:
z = z[0]
return array(z)
def _check_broadcast_up_to(arr_from, shape_to, name):
"""Helper to check that arr_from broadcasts up to shape_to"""
shape_from = arr_from.shape
if len(shape_to) >= len(shape_from):
for t, f in zip(shape_to[::-1], shape_from[::-1]):
if f != 1 and f != t:
break
else: # all checks pass, do the upcasting that we need later
if arr_from.size != 1 and arr_from.shape != shape_to:
arr_from = np.ones(shape_to, arr_from.dtype) * arr_from
return arr_from.ravel()
# at least one check failed
raise ValueError('%s argument must be able to broadcast up '
'to shape %s but had shape %s'
% (name, shape_to, shape_from))
def _do_extrapolate(fill_value):
"""Helper to check if fill_value == "extrapolate" without warnings"""
return (isinstance(fill_value, string_types) and
fill_value == 'extrapolate')
class interp1d(_Interpolator1D):
"""
Interpolate a 1-D function.
`x` and `y` are arrays of values used to approximate some function f:
``y = f(x)``. This class returns a function whose call method uses
interpolation to find the value of new points.
Parameters
----------
x : (N,) array_like
A 1-D array of real values.
y : (...,N,...) array_like
A N-D array of real values. The length of `y` along the interpolation
axis must be equal to the length of `x`.
kind : str or int, optional
Specifies the kind of interpolation as a string
('linear', 'nearest', 'zero', 'slinear', 'quadratic, 'cubic'
where 'slinear', 'quadratic' and 'cubic' refer to a spline
interpolation of first, second or third order) or as an integer
specifying the order of the spline interpolator to use.
Default is 'linear'.
axis : int, optional
Specifies the axis of `y` along which to interpolate.
Interpolation defaults to the last axis of `y`.
copy : bool, optional
If True, the class makes internal copies of x and y.
If False, references to `x` and `y` are used. The default is to copy.
bounds_error : bool, optional
If True, a ValueError is raised any time interpolation is attempted on
a value outside of the range of x (where extrapolation is
necessary). If False, out of bounds values are assigned `fill_value`.
By default, an error is raised unless `fill_value="extrapolate"`.
fill_value : array-like or (array-like, array_like) or "extrapolate", optional
- if a ndarray (or float), this value will be used to fill in for
requested points outside of the data range. If not provided, then
the default is NaN. The array-like must broadcast properly to the
dimensions of the non-interpolation axes.
- If a two-element tuple, then the first element is used as a
fill value for ``x_new < x[0]`` and the second element is used for
``x_new > x[-1]``. Anything that is not a 2-element tuple (e.g.,
list or ndarray, regardless of shape) is taken to be a single
array-like argument meant to be used for both bounds as
``below, above = fill_value, fill_value``.
.. versionadded:: 0.17.0
- If "extrapolate", then points outside the data range will be
extrapolated. ("nearest" and "linear" kinds only.)
.. versionadded:: 0.17.0
assume_sorted : bool, optional
If False, values of `x` can be in any order and they are sorted first.
If True, `x` has to be an array of monotonically increasing values.
Methods
-------
__call__
See Also
--------
splrep, splev
Spline interpolation/smoothing based on FITPACK.
UnivariateSpline : An object-oriented wrapper of the FITPACK routines.
interp2d : 2-D interpolation
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import interpolate
>>> x = np.arange(0, 10)
>>> y = np.exp(-x/3.0)
>>> f = interpolate.interp1d(x, y)
>>> xnew = np.arange(0, 9, 0.1)
>>> ynew = f(xnew) # use interpolation function returned by `interp1d`
>>> plt.plot(x, y, 'o', xnew, ynew, '-')
>>> plt.show()
"""
def __init__(self, x, y, kind='linear', axis=-1,
copy=True, bounds_error=None, fill_value=np.nan,
assume_sorted=False):
""" Initialize a 1D linear interpolation class."""
_Interpolator1D.__init__(self, x, y, axis=axis)
self.bounds_error = bounds_error # used by fill_value setter
self.copy = copy
if kind in ['zero', 'slinear', 'quadratic', 'cubic']:
order = {'nearest': 0, 'zero': 0, 'slinear': 1,
'quadratic': 2, 'cubic': 3}[kind]
kind = 'spline'
elif isinstance(kind, int):
order = kind
kind = 'spline'
elif kind not in ('linear', 'nearest'):
raise NotImplementedError("%s is unsupported: Use fitpack "
"routines for other types." % kind)
x = array(x, copy=self.copy)
y = array(y, copy=self.copy)
if not assume_sorted:
ind = np.argsort(x)
x = x[ind]
y = np.take(y, ind, axis=axis)
if x.ndim != 1:
raise ValueError("the x array must have exactly one dimension.")
if y.ndim == 0:
raise ValueError("the y array must have at least one dimension.")
# Force-cast y to a floating-point type, if it's not yet one
if not issubclass(y.dtype.type, np.inexact):
y = y.astype(np.float_)
# Backward compatibility
self.axis = axis % y.ndim
# Interpolation goes internally along the first axis
self.y = y
self._y = self._reshape_yi(self.y)
self.x = x
del y, x # clean up namespace to prevent misuse; use attributes
self._kind = kind
self.fill_value = fill_value # calls the setter, can modify bounds_err
# Adjust to interpolation kind; store reference to *unbound*
# interpolation methods, in order to avoid circular references to self
# stored in the bound instance methods, and therefore delayed garbage
# collection. See: http://docs.python.org/2/reference/datamodel.html
if kind in ('linear', 'nearest'):
# Make a "view" of the y array that is rotated to the interpolation
# axis.
minval = 2
if kind == 'nearest':
# Do division before addition to prevent possible integer overflow
self.x_bds = self.x / 2.0
self.x_bds = self.x_bds[1:] + self.x_bds[:-1]
self._call = self.__class__._call_nearest
else:
# Check if we can delegate to numpy.interp (2x-10x faster).
cond = self.x.dtype == np.float_ and self.y.dtype == np.float_
cond = cond and self.y.ndim == 1
cond = cond and not _do_extrapolate(fill_value)
if cond:
self._call = self.__class__._call_linear_np
else:
self._call = self.__class__._call_linear
else:
minval = order + 1
self._spline = splmake(self.x, self._y, order=order)
self._call = self.__class__._call_spline
if len(self.x) < minval:
raise ValueError("x and y arrays must have at "
"least %d entries" % minval)
@property
def fill_value(self):
# backwards compat: mimic a public attribute
return self._fill_value_orig
@fill_value.setter
def fill_value(self, fill_value):
# extrapolation only works for nearest neighbor and linear methods
if _do_extrapolate(fill_value):
if self._kind not in ('nearest', 'linear'):
raise ValueError("Extrapolation does not work with "
"kind=%s" % self._kind)
if self.bounds_error:
raise ValueError("Cannot extrapolate and raise "
"at the same time.")
self.bounds_error = False
self._extrapolate = True
else:
broadcast_shape = (self.y.shape[:self.axis] +
self.y.shape[self.axis + 1:])
if len(broadcast_shape) == 0:
broadcast_shape = (1,)
# it's either a pair (_below_range, _above_range) or a single value
# for both above and below range
if isinstance(fill_value, tuple) and len(fill_value) == 2:
below_above = [np.asarray(fill_value[0]),
np.asarray(fill_value[1])]
names = ('fill_value (below)', 'fill_value (above)')
for ii in range(2):
below_above[ii] = _check_broadcast_up_to(
below_above[ii], broadcast_shape, names[ii])
else:
fill_value = np.asarray(fill_value)
below_above = [_check_broadcast_up_to(
fill_value, broadcast_shape, 'fill_value')] * 2
self._fill_value_below, self._fill_value_above = below_above
self._extrapolate = False
if self.bounds_error is None:
self.bounds_error = True
# backwards compat: fill_value was a public attr; make it writeable
self._fill_value_orig = fill_value
def _call_linear_np(self, x_new):
# Note that out-of-bounds values are taken care of in self._evaluate
return np.interp(x_new, self.x, self.y)
def _call_linear(self, x_new):
# 2. Find where in the orignal data, the values to interpolate
# would be inserted.
# Note: If x_new[n] == x[m], then m is returned by searchsorted.
x_new_indices = searchsorted(self.x, x_new)
# 3. Clip x_new_indices so that they are within the range of
# self.x indices and at least 1. Removes mis-interpolation
# of x_new[n] = x[0]
x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int)
# 4. Calculate the slope of regions that each x_new value falls in.
lo = x_new_indices - 1
hi = x_new_indices
x_lo = self.x[lo]
x_hi = self.x[hi]
y_lo = self._y[lo]
y_hi = self._y[hi]
# Note that the following two expressions rely on the specifics of the
# broadcasting semantics.
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
# 5. Calculate the actual value for each entry in x_new.
y_new = slope*(x_new - x_lo)[:, None] + y_lo
return y_new
def _call_nearest(self, x_new):
""" Find nearest neighbour interpolated y_new = f(x_new)."""
# 2. Find where in the averaged data the values to interpolate
# would be inserted.
# Note: use side='left' (right) to searchsorted() to define the
# halfway point to be nearest to the left (right) neighbour
x_new_indices = searchsorted(self.x_bds, x_new, side='left')
# 3. Clip x_new_indices so that they are within the range of x indices.
x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp)
# 4. Calculate the actual value for each entry in x_new.
y_new = self._y[x_new_indices]
return y_new
def _call_spline(self, x_new):
return spleval(self._spline, x_new)
def _evaluate(self, x_new):
# 1. Handle values in x_new that are outside of x. Throw error,
# or return a list of mask array indicating the outofbounds values.
# The behavior is set by the bounds_error variable.
x_new = asarray(x_new)
y_new = self._call(self, x_new)
if not self._extrapolate:
below_bounds, above_bounds = self._check_bounds(x_new)
if len(y_new) > 0:
# Note fill_value must be broadcast up to the proper size
# and flattened to work here
y_new[below_bounds] = self._fill_value_below
y_new[above_bounds] = self._fill_value_above
return y_new
def _check_bounds(self, x_new):
"""Check the inputs for being in the bounds of the interpolated data.
Parameters
----------
x_new : array
Returns
-------
out_of_bounds : bool array
The mask on x_new of values that are out of the bounds.
"""
# If self.bounds_error is True, we raise an error if any x_new values
# fall outside the range of x. Otherwise, we return an array indicating
# which values are outside the boundary region.
below_bounds = x_new < self.x[0]
above_bounds = x_new > self.x[-1]
# !! Could provide more information about which values are out of bounds
if self.bounds_error and below_bounds.any():
raise ValueError("A value in x_new is below the interpolation "
"range.")
if self.bounds_error and above_bounds.any():
raise ValueError("A value in x_new is above the interpolation "
"range.")
# !! Should we emit a warning if some values are out of bounds?
# !! matlab does not.
return below_bounds, above_bounds
class _PPolyBase(object):
"""Base class for piecewise polynomials."""
__slots__ = ('c', 'x', 'extrapolate', 'axis')
def __init__(self, c, x, extrapolate=None, axis=0):
self.c = np.asarray(c)
self.x = np.ascontiguousarray(x, dtype=np.float64)
if extrapolate is None:
extrapolate = True
elif extrapolate != 'periodic':
extrapolate = bool(extrapolate)
self.extrapolate = extrapolate
if not (0 <= axis < self.c.ndim - 1):
raise ValueError("%s must be between 0 and %s" % (axis, c.ndim-1))
self.axis = axis
if axis != 0:
# roll the interpolation axis to be the first one in self.c
# More specifically, the target shape for self.c is (k, m, ...),
# and axis !=0 means that we have c.shape (..., k, m, ...)
# ^
# axis
# So we roll two of them.
self.c = np.rollaxis(self.c, axis+1)
self.c = np.rollaxis(self.c, axis+1)
if self.x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if self.x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if self.c.ndim < 2:
raise ValueError("c must have at least 2 dimensions")
if self.c.shape[0] == 0:
raise ValueError("polynomial must be at least of order 0")
if self.c.shape[1] != self.x.size-1:
raise ValueError("number of coefficients != len(x)-1")
if np.any(self.x[1:] - self.x[:-1] < 0):
raise ValueError("x-coordinates are not in increasing order")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
@classmethod
def construct_fast(cls, c, x, extrapolate=None, axis=0):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
`c` and `x` must be arrays of the correct shape and type. The
`c` array can only be of dtypes float and complex, and `x`
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
self.axis = axis
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _ensure_c_contiguous(self):
"""
c and x may be modified by the user. The Cython code expects
that they are C contiguous.
"""
if not self.x.flags.c_contiguous:
self.x = self.x.copy()
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
def extend(self, c, x, right=True):
"""
Add additional breakpoints and coefficients to the polynomial.
Parameters
----------
c : ndarray, size (k, m, ...)
Additional coefficients for polynomials in intervals
``self.x[-1] <= x < x_right[0]``, ``x_right[0] <= x < x_right[1]``,
..., ``x_right[m-2] <= x < x_right[m-1]``
x : ndarray, size (m,)
Additional breakpoints. Must be sorted and either to
the right or to the left of the current breakpoints.
right : bool, optional
Whether the new intervals are to the right or to the left
of the current intervals.
"""
c = np.asarray(c)
x = np.asarray(x)
if c.ndim < 2:
raise ValueError("invalid dimensions for c")
if x.ndim != 1:
raise ValueError("invalid dimensions for x")
if x.shape[0] != c.shape[1]:
raise ValueError("x and c have incompatible sizes")
if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim:
raise ValueError("c and self.c have incompatible shapes")
if right:
if x[0] < self.x[-1]:
raise ValueError("new x are not to the right of current ones")
else:
if x[-1] > self.x[0]:
raise ValueError("new x are not to the left of current ones")
if c.size == 0:
return
dtype = self._get_dtype(c.dtype)
k2 = max(c.shape[0], self.c.shape[0])
c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:],
dtype=dtype)
if right:
c2[k2-self.c.shape[0]:, :self.c.shape[1]] = self.c
c2[k2-c.shape[0]:, self.c.shape[1]:] = c
self.x = np.r_[self.x, x]
else:
c2[k2-self.c.shape[0]:, :c.shape[1]] = c
c2[k2-c.shape[0]:, c.shape[1]:] = self.c
self.x = np.r_[x, self.x]
self.c = c2
def __call__(self, x, nu=0, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative.
Parameters
----------
x : array_like
Points to evaluate the interpolant at.
nu : int, optional
Order of derivative to evaluate. Must be non-negative.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
y : array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
x = np.asarray(x)
x_shape, x_ndim = x.shape, x.ndim
x = np.ascontiguousarray(x.ravel(), dtype=np.float_)
# With periodic extrapolation we map x to the segment
# [self.x[0], self.x[-1]].
if extrapolate == 'periodic':
x = self.x[0] + (x - self.x[0]) % (self.x[-1] - self.x[0])
extrapolate = False
out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype)
self._ensure_c_contiguous()
self._evaluate(x, nu, extrapolate, out)
out = out.reshape(x_shape + self.c.shape[2:])
if self.axis != 0:
# transpose to move the calculated values to the interpolation axis
l = list(range(out.ndim))
l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
out = out.transpose(l)
return out
class PPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial in the ith interval is ``x[i] <= xp < x[i+1]``::
S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1))
where ``k`` is the degree of the polynomial. This representation
is the local power basis.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. These must be sorted in
increasing order.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
derivative
antiderivative
integrate
solve
roots
extend
from_spline
from_bernstein_basis
construct_fast
See also
--------
BPoly : piecewise polynomials in the Bernstein basis
Notes
-----
High-order polynomials in the power basis can be numerically
unstable. Precision problems can start to appear for orders
larger than 20-30.
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e. compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k - n representing the derivative
of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if nu < 0:
return self.antiderivative(-nu)
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
c2 = self.c[:-nu,:].copy()
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu)
c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)]
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivativative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e. compute
the first integral. If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:],
dtype=self.c.dtype)
c[:-nu] = self.c
# divide by the correct rising factorials
factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu)
c[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
self._ensure_c_contiguous()
_ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1),
self.x, nu - 1)
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
# construct a compatible polynomial
return self.construct_fast(c, self.x, extrapolate, self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over [a, b]
"""
if extrapolate is None:
extrapolate = self.extrapolate
# Swap integration bounds if needed
sign = 1
if b < a:
a, b = b, a
sign = -1
range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype)
self._ensure_c_contiguous()
# Compute the integral.
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
xs, xe = self.x[0], self.x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
if n_periods > 0:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, xs, xe, False, out=range_int)
range_int *= n_periods
else:
range_int.fill(0)
# Map a to [xs, xe], b is always a + left.
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
remainder_int = np.empty_like(range_int)
if b <= xe:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, xe, False, out=remainder_int)
range_int += remainder_int
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, xs, xs + left + a - xe, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, bool(extrapolate), out=range_int)
# Return
range_int *= sign
return range_int.reshape(self.c.shape[2:])
def solve(self, y=0., discontinuity=True, extrapolate=None):
"""
Find real solutions of the the equation ``pp(x) == y``.
Parameters
----------
y : float, optional
Right-hand side. Default is zero.
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
Notes
-----
This routine works only on real-valued polynomials.
If the piecewise polynomial contains sections that are
identically zero, the root list will contain the start point
of the corresponding interval, followed by a ``nan`` value.
If the polynomial is discontinuous across a breakpoint, and
there is a sign change across the breakpoint, this is reported
if the `discont` parameter is True.
Examples
--------
Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals
``[-2, 1], [1, 2]``:
>>> from scipy.interpolate import PPoly
>>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2])
>>> pp.roots()
array([-1., 1.])
"""
if extrapolate is None:
extrapolate = self.extrapolate
self._ensure_c_contiguous()
if np.issubdtype(self.c.dtype, np.complexfloating):
raise ValueError("Root finding is only for "
"real-valued polynomials")
y = float(y)
r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, y, bool(discontinuity),
bool(extrapolate))
if self.c.ndim == 2:
return r[0]
else:
r2 = np.empty(prod(self.c.shape[2:]), dtype=object)
# this for-loop is equivalent to ``r2[...] = r``, but that's broken
# in numpy 1.6.0
for ii, root in enumerate(r):
r2[ii] = root
return r2.reshape(self.c.shape[2:])
def roots(self, discontinuity=True, extrapolate=None):
"""
Find real roots of the the piecewise polynomial.
Parameters
----------
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
See Also
--------
PPoly.solve
"""
return self.solve(0, discontinuity, extrapolate)
@classmethod
def from_spline(cls, tck, extrapolate=None):
"""
Construct a piecewise polynomial from a spline
Parameters
----------
tck
A spline, as returned by `splrep`
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
t, c, k = tck
cvals = np.empty((k + 1, len(t)-1), dtype=c.dtype)
for m in xrange(k, -1, -1):
y = fitpack.splev(t[:-1], tck, der=m)
cvals[k - m, :] = y/spec.gamma(m+1)
return cls.construct_fast(cvals, t, extrapolate)
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
"""
Construct a piecewise polynomial in the power basis
from a polynomial in Bernstein basis.
Parameters
----------
bp : BPoly
A Bernstein basis polynomial, as created by BPoly
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
dx = np.diff(bp.x)
k = bp.c.shape[0] - 1 # polynomial order
rest = (None,)*(bp.c.ndim-2)
c = np.zeros_like(bp.c)
for a in range(k+1):
factor = (-1)**(a) * comb(k, a) * bp.c[a]
for s in range(a, k+1):
val = comb(k-a, s-a) * (-1)**s
c[k-s] += factor * val / dx[(slice(None),)+rest]**s
if extrapolate is None:
extrapolate = bp.extrapolate
return cls.construct_fast(c, bp.x, extrapolate, bp.axis)
class BPoly(_PPolyBase):
"""Piecewise polynomial in terms of coefficients and breakpoints.
The polynomial in the ``i``-th interval ``x[i] <= xp < x[i+1]`` is written
in the Bernstein polynomial basis::
S = sum(c[a, i] * b(a, k; x) for a in range(k+1)),
where ``k`` is the degree of the polynomial, and::
b(a, k; x) = binom(k, a) * t**k * (1 - t)**(k - a),
with ``t = (x - x[i]) / (x[i+1] - x[i])`` and ``binom`` is a binomial
coefficient.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. These must be sorted in
increasing order.
extrapolate : bool, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
extend
derivative
antiderivative
integrate
construct_fast
from_power_basis
from_derivatives
See also
--------
PPoly : piecewise polynomials in the power basis
Notes
-----
Properties of Bernstein polynomials are well documented in the literature.
Here's a non-exhaustive list:
.. [1] http://en.wikipedia.org/wiki/Bernstein_polynomial
.. [2] Kenneth I. Joy, Bernstein polynomials,
http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf
.. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems,
vol 2011, article ID 829546, doi:10.1155/2011/829543
Examples
--------
>>> from scipy.interpolate import BPoly
>>> x = [0, 1]
>>> c = [[1], [2], [3]]
>>> bp = BPoly(c, x)
This creates a 2nd order polynomial
.. math::
B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3 \\times b_{2, 2}(x) \\\\
= 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate_bernstein(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e. compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k - nu representing the derivative of
this polynomial.
"""
if nu < 0:
return self.antiderivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.derivative()
return bp
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
# For a polynomial
# B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x),
# we use the fact that
# b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ),
# which leads to
# B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1}
#
# finally, for an interval [y, y + dy] with dy != 1,
# we need to correct for an extra power of dy
rest = (None,)*(self.c.ndim-2)
k = self.c.shape[0] - 1
dx = np.diff(self.x)[(None, slice(None))+rest]
c2 = k * np.diff(self.c, axis=0) / dx
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e. compute
the first integral. If negative, the derivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k + nu representing the
antiderivative of this polynomial.
Notes
-----
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.antiderivative()
return bp
# Construct the indefinite integrals on individual intervals
c, x = self.c, self.x
k = c.shape[0]
c2 = np.zeros((k+1,) + c.shape[1:], dtype=c.dtype)
c2[1:, ...] = np.cumsum(c, axis=0) / k
delta = x[1:] - x[:-1]
c2 *= delta[(None, slice(None)) + (None,)*(c.ndim-2)]
# Now fix continuity: on the very first interval, take the integration
# constant to be zero; on an interval [x_j, x_{j+1}) with j>0,
# the integration constant is then equal to the jump of the `bp` at x_j.
# The latter is given by the coefficient of B_{n+1, n+1}
# *on the previous interval* (other B. polynomials are zero at the breakpoint)
# Finally, use the fact that BPs form a partition of unity.
c2[:,1:] += np.cumsum(c2[k,:], axis=0)[:-1]
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
return self.construct_fast(c2, x, extrapolate, axis=self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. If 'periodic', periodic
extrapolation is used. If None (default), use `self.extrapolate`.
Returns
-------
array_like
Definite integral of the piecewise polynomial over [a, b]
"""
# XXX: can probably use instead the fact that
# \int_0^{1} B_{j, n}(x) \dx = 1/(n+1)
ib = self.antiderivative()
if extrapolate is None:
extrapolate = self.extrapolate
# ib.extrapolate shouldn't be 'periodic', it is converted to
# False for 'periodic. in antiderivative() call.
if extrapolate != 'periodic':
ib.extrapolate = extrapolate
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
# For simplicity and clarity convert to a <= b case.
if a <= b:
sign = 1
else:
a, b = b, a
sign = -1
xs, xe = self.x[0], self.x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
res = n_periods * (ib(xe) - ib(xs))
# Map a and b to [xs, xe].
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
if b <= xe:
res += ib(b) - ib(a)
else:
res += ib(xe) - ib(a) + ib(xs + left + a - xe) - ib(xs)
return sign * res
else:
return ib(b) - ib(a)
def extend(self, c, x, right=True):
k = max(self.c.shape[0], c.shape[0])
self.c = self._raise_degree(self.c, k - self.c.shape[0])
c = self._raise_degree(c, k - c.shape[0])
return _PPolyBase.extend(self, c, x, right)
extend.__doc__ = _PPolyBase.extend.__doc__
@classmethod
def from_power_basis(cls, pp, extrapolate=None):
"""
Construct a piecewise polynomial in Bernstein basis
from a power basis polynomial.
Parameters
----------
pp : PPoly
A piecewise polynomial in the power basis
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
dx = np.diff(pp.x)
k = pp.c.shape[0] - 1 # polynomial order
rest = (None,)*(pp.c.ndim-2)
c = np.zeros_like(pp.c)
for a in range(k+1):
factor = pp.c[a] / comb(k, k-a) * dx[(slice(None),)+rest]**(k-a)
for j in range(k-a, k+1):
c[j] += factor * comb(j, k-a)
if extrapolate is None:
extrapolate = pp.extrapolate
return cls.construct_fast(c, pp.x, extrapolate, pp.axis)
@classmethod
def from_derivatives(cls, xi, yi, orders=None, extrapolate=None):
"""Construct a piecewise polynomial in the Bernstein basis,
compatible with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array_likes
``yi[i][j]`` is the ``j``-th derivative known at ``xi[i]``
orders : None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
Notes
-----
If ``k`` derivatives are specified at a breakpoint ``x``, the
constructed polynomial is exactly ``k`` times continuously
differentiable at ``x``, unless the ``order`` is provided explicitly.
In the latter case, the smoothness of the polynomial at
the breakpoint is controlled by the ``order``.
Deduces the number of derivatives to match at each end
from ``order`` and the number of derivatives available. If
possible it uses the same number of derivatives from
each end; if the number is odd it tries to take the
extra one from y2. In any case if not enough derivatives
are available at one end or another it draws enough to
make up the total from the other end.
If the order is too high and not enough derivatives are available,
an exception is raised.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]])
Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]`
such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4`
>>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]])
Creates a piecewise polynomial `f(x)`, such that
`f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`.
Based on the number of derivatives provided, the order of the
local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`.
Notice that no restriction is imposed on the derivatives at
`x = 1` and `x = 2`.
Indeed, the explicit form of the polynomial is::
f(x) = | x * (1 - x), 0 <= x < 1
| 2 * (x - 1), 1 <= x <= 2
So that f'(1-0) = -1 and f'(1+0) = 2
"""
xi = np.asarray(xi)
if len(xi) != len(yi):
raise ValueError("xi and yi need to have the same length")
if np.any(xi[1:] - xi[:1] <= 0):
raise ValueError("x coordinates are not in increasing order")
# number of intervals
m = len(xi) - 1
# global poly order is k-1, local orders are <=k and can vary
try:
k = max(len(yi[i]) + len(yi[i+1]) for i in range(m))
except TypeError:
raise ValueError("Using a 1D array for y? Please .reshape(-1, 1).")
if orders is None:
orders = [None] * m
else:
if isinstance(orders, (integer_types, np.integer)):
orders = [orders] * m
k = max(k, max(orders))
if any(o <= 0 for o in orders):
raise ValueError("Orders must be positive.")
c = []
for i in range(m):
y1, y2 = yi[i], yi[i+1]
if orders[i] is None:
n1, n2 = len(y1), len(y2)
else:
n = orders[i]+1
n1 = min(n//2, len(y1))
n2 = min(n - n1, len(y2))
n1 = min(n - n2, len(y2))
if n1+n2 != n:
raise ValueError("Point %g has %d derivatives, point %g"
" has %d derivatives, but order %d requested" %
(xi[i], len(y1), xi[i+1], len(y2), orders[i]))
if not (n1 <= len(y1) and n2 <= len(y2)):
raise ValueError("`order` input incompatible with"
" length y1 or y2.")
b = BPoly._construct_from_derivatives(xi[i], xi[i+1], y1[:n1], y2[:n2])
if len(b) < k:
b = BPoly._raise_degree(b, k - len(b))
c.append(b)
c = np.asarray(c)
return cls(c.swapaxes(0, 1), xi, extrapolate)
@staticmethod
def _construct_from_derivatives(xa, xb, ya, yb):
"""Compute the coefficients of a polynomial in the Bernstein basis
given the values and derivatives at the edges.
Return the coefficients of a polynomial in the Bernstein basis
defined on `[xa, xb]` and having the values and derivatives at the
endpoints ``xa`` and ``xb`` as specified by ``ya`` and ``yb``.
The polynomial constructed is of the minimal possible degree, i.e.,
if the lengths of ``ya`` and ``yb`` are ``na`` and ``nb``, the degree
of the polynomial is ``na + nb - 1``.
Parameters
----------
xa : float
Left-hand end point of the interval
xb : float
Right-hand end point of the interval
ya : array_like
Derivatives at ``xa``. ``ya[0]`` is the value of the function, and
``ya[i]`` for ``i > 0`` is the value of the ``i``-th derivative.
yb : array_like
Derivatives at ``xb``.
Returns
-------
array
coefficient array of a polynomial having specified derivatives
Notes
-----
This uses several facts from life of Bernstein basis functions.
First of all,
.. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1})
If B(x) is a linear combination of the form
.. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n},
then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}.
Iterating the latter one, one finds for the q-th derivative
.. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q},
with
.. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a}
This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and
`c_q` are found one by one by iterating `q = 0, ..., na`.
At `x = xb` it's the same with `a = n - q`.
"""
ya, yb = np.asarray(ya), np.asarray(yb)
if ya.shape[1:] != yb.shape[1:]:
raise ValueError('ya and yb have incompatible dimensions.')
dta, dtb = ya.dtype, yb.dtype
if (np.issubdtype(dta, np.complexfloating) or
np.issubdtype(dtb, np.complexfloating)):
dt = np.complex_
else:
dt = np.float_
na, nb = len(ya), len(yb)
n = na + nb
c = np.empty((na+nb,) + ya.shape[1:], dtype=dt)
# compute coefficients of a polynomial degree na+nb-1
# walk left-to-right
for q in range(0, na):
c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa)**q
for j in range(0, q):
c[q] -= (-1)**(j+q) * comb(q, j) * c[j]
# now walk right-to-left
for q in range(0, nb):
c[-q-1] = yb[q] / spec.poch(n - q, q) * (-1)**q * (xb - xa)**q
for j in range(0, q):
c[-q-1] -= (-1)**(j+1) * comb(q, j+1) * c[-q+j]
return c
@staticmethod
def _raise_degree(c, d):
"""Raise a degree of a polynomial in the Bernstein basis.
Given the coefficients of a polynomial degree `k`, return (the
coefficients of) the equivalent polynomial of degree `k+d`.
Parameters
----------
c : array_like
coefficient array, 1D
d : integer
Returns
-------
array
coefficient array, 1D array of length `c.shape[0] + d`
Notes
-----
This uses the fact that a Bernstein polynomial `b_{a, k}` can be
identically represented as a linear combination of polynomials of
a higher degree `k+d`:
.. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \
comb(d, j) / comb(k+d, a+j)
"""
if d == 0:
return c
k = c.shape[0] - 1
out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype)
for a in range(c.shape[0]):
f = c[a] * comb(k, a)
for j in range(d+1):
out[a+j] += f * comb(d, j) / comb(k+d, a+j)
return out
class NdPPoly(object):
"""
Piecewise tensor product polynomial
The value at point `xp = (x', y', z', ...)` is evaluated by first
computing the interval indices `i` such that::
x[0][i[0]] <= x' < x[0][i[0]+1]
x[1][i[1]] <= y' < x[1][i[1]+1]
...
and then computing::
S = sum(c[k0-m0-1,...,kn-mn-1,i[0],...,i[n]]
* (xp[0] - x[0][i[0]])**m0
* ...
* (xp[n] - x[n][i[n]])**mn
for m0 in range(k[0]+1)
...
for mn in range(k[n]+1))
where ``k[j]`` is the degree of the polynomial in dimension j. This
representation is the piecewise multivariate power basis.
Parameters
----------
c : ndarray, shape (k0, ..., kn, m0, ..., mn, ...)
Polynomial coefficients, with polynomial order `kj` and
`mj+1` intervals for each dimension `j`.
x : ndim-tuple of ndarrays, shapes (mj+1,)
Polynomial breakpoints for each dimension. These must be
sorted in increasing order.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
Attributes
----------
x : tuple of ndarrays
Breakpoints.
c : ndarray
Coefficients of the polynomials.
Methods
-------
__call__
construct_fast
See also
--------
PPoly : piecewise polynomials in 1D
Notes
-----
High-order polynomials in the power basis can be numerically
unstable.
"""
def __init__(self, c, x, extrapolate=None):
self.x = tuple(np.ascontiguousarray(v, dtype=np.float64) for v in x)
self.c = np.asarray(c)
if extrapolate is None:
extrapolate = True
self.extrapolate = bool(extrapolate)
ndim = len(self.x)
if any(v.ndim != 1 for v in self.x):
raise ValueError("x arrays must all be 1-dimensional")
if any(v.size < 2 for v in self.x):
raise ValueError("x arrays must all contain at least 2 points")
if c.ndim < 2*ndim:
raise ValueError("c must have at least 2*len(x) dimensions")
if any(np.any(v[1:] - v[:-1] < 0) for v in self.x):
raise ValueError("x-coordinates are not in increasing order")
if any(a != b.size - 1 for a, b in zip(c.shape[ndim:2*ndim], self.x)):
raise ValueError("x and c do not agree on the number of intervals")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
@classmethod
def construct_fast(cls, c, x, extrapolate=None):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
`c` and `x` must be arrays of the correct shape and type. The
`c` array can only be of dtypes float and complex, and `x`
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
def _ensure_c_contiguous(self):
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
if not isinstance(self.x, tuple):
self.x = tuple(self.x)
def __call__(self, x, nu=None, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative
Parameters
----------
x : array-like
Points to evaluate the interpolant at.
nu : tuple, optional
Orders of derivatives to evaluate. Each must be non-negative.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
y : array-like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
x = _ndim_coords_from_arrays(x)
x_shape = x.shape
x = np.ascontiguousarray(x.reshape(-1, x.shape[-1]), dtype=np.float_)
if nu is None:
nu = np.zeros((ndim,), dtype=np.intc)
else:
nu = np.asarray(nu, dtype=np.intc)
if nu.ndim != 1 or nu.shape[0] != ndim:
raise ValueError("invalid number of derivative orders nu")
dim1 = prod(self.c.shape[:ndim])
dim2 = prod(self.c.shape[ndim:2*ndim])
dim3 = prod(self.c.shape[2*ndim:])
ks = np.array(self.c.shape[:ndim], dtype=np.intc)
out = np.empty((x.shape[0], dim3), dtype=self.c.dtype)
self._ensure_c_contiguous()
_ppoly.evaluate_nd(self.c.reshape(dim1, dim2, dim3),
self.x,
ks,
x,
nu,
bool(extrapolate),
out)
return out.reshape(x_shape[:-1] + self.c.shape[2*ndim:])
def _derivative_inplace(self, nu, axis):
"""
Compute 1D derivative along a selected dimension in-place
May result to non-contiguous c array.
"""
if nu < 0:
return self._antiderivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
# reduce order
if nu == 0:
# noop
return
else:
sl = [slice(None)]*ndim
sl[axis] = slice(None, -nu, None)
c2 = self.c[sl]
if c2.shape[axis] == 0:
# derivative of order 0 is zero
shp = list(c2.shape)
shp[axis] = 1
c2 = np.zeros(shp, dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[axis], 0, -1), nu)
sl = [None]*c2.ndim
sl[axis] = slice(None)
c2 *= factor[sl]
self.c = c2
def _antiderivative_inplace(self, nu, axis):
"""
Compute 1D antiderivative along a selected dimension
May result to non-contiguous c array.
"""
if nu <= 0:
return self._derivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
perm = list(range(ndim))
perm[0], perm[axis] = perm[axis], perm[0]
perm = perm + list(range(ndim, self.c.ndim))
c = self.c.transpose(perm)
c2 = np.zeros((c.shape[0] + nu,) + c.shape[1:],
dtype=c.dtype)
c2[:-nu] = c
# divide by the correct rising factorials
factor = spec.poch(np.arange(c.shape[0], 0, -1), nu)
c2[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
perm2 = list(range(c2.ndim))
perm2[1], perm2[ndim+axis] = perm2[ndim+axis], perm2[1]
c2 = c2.transpose(perm2)
c2 = c2.copy()
_ppoly.fix_continuity(c2.reshape(c2.shape[0], c2.shape[1], -1),
self.x[axis], nu-1)
c2 = c2.transpose(perm2)
c2 = c2.transpose(perm)
# Done
self.c = c2
def derivative(self, nu):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the antiderivative is returned.
Returns
-------
pp : NdPPoly
Piecewise polynomial of orders (k[0] - nu[0], ..., k[n] - nu[n])
representing the derivative of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals in each dimension are
considered half-open, ``[a, b)``, except for the last interval
which is closed ``[a, b]``.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._derivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def antiderivative(self, nu):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivativative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._antiderivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def integrate_1d(self, a, b, axis, extrapolate=None):
r"""
Compute NdPPoly representation for one dimensional definite integral
The result is a piecewise polynomial representing the integral:
.. math::
p(y, z, ...) = \int_a^b dx\, p(x, y, z, ...)
where the dimension integrated over is specified with the
`axis` parameter.
Parameters
----------
a, b : float
Lower and upper bound for integration.
axis : int
Dimension over which to compute the 1D integrals
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : NdPPoly or array-like
Definite integral of the piecewise polynomial over [a, b].
If the polynomial was 1-dimensional, an array is returned,
otherwise, an NdPPoly object.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
axis = int(axis) % ndim
# Reuse 1D integration routines
c = self.c
swap = list(range(c.ndim))
swap.insert(0, swap[axis])
del swap[axis + 1]
swap.insert(1, swap[ndim + axis])
del swap[ndim + axis + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c.reshape(c.shape[0], c.shape[1], -1),
self.x[axis],
extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
# Construct result
if ndim == 1:
return out.reshape(c.shape[2:])
else:
c = out.reshape(c.shape[2:])
x = self.x[:axis] + self.x[axis+1:]
return self.construct_fast(c, x, extrapolate=extrapolate)
def integrate(self, ranges, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
ranges : ndim-tuple of 2-tuples float
Sequence of lower and upper bounds for each dimension,
``[(a[0], b[0]), ..., (a[ndim-1], b[ndim-1])]``
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over
[a[0], b[0]] x ... x [a[ndim-1], b[ndim-1]]
"""
ndim = len(self.x)
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
if not hasattr(ranges, '__len__') or len(ranges) != ndim:
raise ValueError("Range not a sequence of correct length")
self._ensure_c_contiguous()
# Reuse 1D integration routine
c = self.c
for n, (a, b) in enumerate(ranges):
swap = list(range(c.ndim))
swap.insert(1, swap[ndim - n])
del swap[ndim - n + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c, self.x[n], extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
c = out.reshape(c.shape[2:])
return c
class RegularGridInterpolator(object):
"""
Interpolation on a regular grid in arbitrary dimensions
The data must be defined on a regular grid; the grid spacing however may be
uneven. Linear and nearest-neighbour interpolation are supported. After
setting up the interpolator object, the interpolation method (*linear* or
*nearest*) may be chosen at each evaluation.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest". This parameter will become the default for the object's
``__call__`` method. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated.
Methods
-------
__call__
Notes
-----
Contrary to LinearNDInterpolator and NearestNDInterpolator, this class
avoids expensive triangulation of the input data by taking advantage of the
regular grid structure.
.. versionadded:: 0.14
Examples
--------
Evaluate a simple example function on the points of a 3D grid:
>>> from scipy.interpolate import RegularGridInterpolator
>>> def f(x,y,z):
... return 2 * x**3 + 3 * y**2 - z
>>> x = np.linspace(1, 4, 11)
>>> y = np.linspace(4, 7, 22)
>>> z = np.linspace(7, 9, 33)
>>> data = f(*np.meshgrid(x, y, z, indexing='ij', sparse=True))
``data`` is now a 3D array with ``data[i,j,k] = f(x[i], y[j], z[k])``.
Next, define an interpolating function from this data:
>>> my_interpolating_function = RegularGridInterpolator((x, y, z), data)
Evaluate the interpolating function at the two points
``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
>>> pts = np.array([[2.1, 6.2, 8.3], [3.3, 5.2, 7.1]])
>>> my_interpolating_function(pts)
array([ 125.80469388, 146.30069388])
which is indeed a close approximation to
``[f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)]``.
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
References
----------
.. [1] Python package *regulargrid* by Johannes Buchner, see
https://pypi.python.org/pypi/regulargrid/
.. [2] Trilinear interpolation. (2013, January 17). In Wikipedia, The Free
Encyclopedia. Retrieved 27 Feb 2013 01:28.
http://en.wikipedia.org/w/index.php?title=Trilinear_interpolation&oldid=533448871
.. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
and multilinear table interpolation in many dimensions." MATH.
COMPUT. 50.181 (1988): 189-196.
http://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
"""
# this class is based on code originally programmed by Johannes Buchner,
# see https://github.com/JohannesBuchner/regulargrid
def __init__(self, points, values, method="linear", bounds_error=True,
fill_value=np.nan):
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
self.method = method
self.bounds_error = bounds_error
if not hasattr(values, 'ndim'):
# allow reasonable duck-typed values
values = np.asarray(values)
if len(points) > values.ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), values.ndim))
if hasattr(values, 'dtype') and hasattr(values, 'astype'):
if not np.issubdtype(values.dtype, np.inexact):
values = values.astype(float)
self.fill_value = fill_value
if fill_value is not None:
fill_value_dtype = np.asarray(fill_value).dtype
if (hasattr(values, 'dtype') and not
np.can_cast(fill_value_dtype, values.dtype,
casting='same_kind')):
raise ValueError("fill_value must be either 'None' or "
"of a type compatible with values")
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
self.grid = tuple([np.asarray(p) for p in points])
self.values = values
def __call__(self, xi, method=None):
"""
Interpolation at coordinates
Parameters
----------
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str
The method of interpolation to perform. Supported are "linear" and
"nearest".
"""
method = self.method if method is None else method
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
ndim = len(self.grid)
xi = _ndim_coords_from_arrays(xi, ndim=ndim)
if xi.shape[-1] != len(self.grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], ndim))
xi_shape = xi.shape
xi = xi.reshape(-1, xi_shape[-1])
if self.bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(self.grid[i][0] <= p),
np.all(p <= self.grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
indices, norm_distances, out_of_bounds = self._find_indices(xi.T)
if method == "linear":
result = self._evaluate_linear(indices, norm_distances, out_of_bounds)
elif method == "nearest":
result = self._evaluate_nearest(indices, norm_distances, out_of_bounds)
if not self.bounds_error and self.fill_value is not None:
result[out_of_bounds] = self.fill_value
return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
def _evaluate_linear(self, indices, norm_distances, out_of_bounds):
# slice for broadcasting over trailing dimensions in self.values
vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices))
# find relevant values
# each i and i+1 represents a edge
edges = itertools.product(*[[i, i + 1] for i in indices])
values = 0.
for edge_indices in edges:
weight = 1.
for ei, i, yi in zip(edge_indices, indices, norm_distances):
weight *= np.where(ei == i, 1 - yi, yi)
values += np.asarray(self.values[edge_indices]) * weight[vslice]
return values
def _evaluate_nearest(self, indices, norm_distances, out_of_bounds):
idx_res = []
for i, yi in zip(indices, norm_distances):
idx_res.append(np.where(yi <= .5, i, i + 1))
return self.values[idx_res]
def _find_indices(self, xi):
# find relevant edges between which xi are situated
indices = []
# compute distance to lower edge in unity units
norm_distances = []
# check for out of bounds xi
out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
# iterate through dimensions
for x, grid in zip(xi, self.grid):
i = np.searchsorted(grid, x) - 1
i[i < 0] = 0
i[i > grid.size - 2] = grid.size - 2
indices.append(i)
norm_distances.append((x - grid[i]) /
(grid[i + 1] - grid[i]))
if not self.bounds_error:
out_of_bounds += x < grid[0]
out_of_bounds += x > grid[-1]
return indices, norm_distances, out_of_bounds
def interpn(points, values, xi, method="linear", bounds_error=True,
fill_value=np.nan):
"""
Multidimensional interpolation on regular grids.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
Notes
-----
.. versionadded:: 0.14
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
RegularGridInterpolator : Linear and nearest-neighbor Interpolation on a
regular grid in arbitrary dimensions
RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
"""
# sanity check 'method' kwarg
if method not in ["linear", "nearest", "splinef2d"]:
raise ValueError("interpn only understands the methods 'linear', "
"'nearest', and 'splinef2d'. You provided %s." %
method)
if not hasattr(values, 'ndim'):
values = np.asarray(values)
ndim = values.ndim
if ndim > 2 and method == "splinef2d":
raise ValueError("The method spline2fd can only be used for "
"2-dimensional input data")
if not bounds_error and fill_value is None and method == "splinef2d":
raise ValueError("The method spline2fd does not support extrapolation.")
# sanity check consistency of input dimensions
if len(points) > ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), ndim))
if len(points) != ndim and method == 'splinef2d':
raise ValueError("The method spline2fd can only be used for "
"scalar data with one point per coordinate")
# sanity check input grid
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
grid = tuple([np.asarray(p) for p in points])
# sanity check requested xi
xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
if xi.shape[-1] != len(grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], len(grid)))
for i, p in enumerate(xi.T):
if bounds_error and not np.logical_and(np.all(grid[i][0] <= p),
np.all(p <= grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
# perform interpolation
if method == "linear":
interp = RegularGridInterpolator(points, values, method="linear",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "nearest":
interp = RegularGridInterpolator(points, values, method="nearest",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "splinef2d":
xi_shape = xi.shape
xi = xi.reshape(-1, xi.shape[-1])
# RectBivariateSpline doesn't support fill_value; we need to wrap here
idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
axis=0)
result = np.empty_like(xi[:, 0])
# make a copy of values for RectBivariateSpline
interp = RectBivariateSpline(points[0], points[1], values[:])
result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
result[np.logical_not(idx_valid)] = fill_value
return result.reshape(xi_shape[:-1])
# backward compatibility wrapper
class ppform(PPoly):
"""
Deprecated piecewise polynomial class.
New code should use the `PPoly` class instead.
"""
def __init__(self, coeffs, breaks, fill=0.0, sort=False):
warnings.warn("ppform is deprecated -- use PPoly instead",
category=DeprecationWarning)
if sort:
breaks = np.sort(breaks)
else:
breaks = np.asarray(breaks)
PPoly.__init__(self, coeffs, breaks)
self.coeffs = self.c
self.breaks = self.x
self.K = self.coeffs.shape[0]
self.fill = fill
self.a = self.breaks[0]
self.b = self.breaks[-1]
def __call__(self, x):
return PPoly.__call__(self, x, 0, False)
def _evaluate(self, x, nu, extrapolate, out):
PPoly._evaluate(self, x, nu, extrapolate, out)
out[~((x >= self.a) & (x <= self.b))] = self.fill
return out
@classmethod
def fromspline(cls, xk, cvals, order, fill=0.0):
# Note: this spline representation is incompatible with FITPACK
N = len(xk)-1
sivals = np.empty((order+1, N), dtype=float)
for m in xrange(order, -1, -1):
fact = spec.gamma(m+1)
res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
res /= fact
sivals[order-m, :] = res
return cls(sivals, xk, fill=fill)
def _dot0(a, b):
"""Similar to numpy.dot, but sum over last axis of a and 1st axis of b"""
if b.ndim <= 2:
return dot(a, b)
else:
axes = list(range(b.ndim))
axes.insert(-1, 0)
axes.pop(0)
return dot(a, b.transpose(axes))
def _find_smoothest(xk, yk, order, conds=None, B=None):
# construct Bmatrix, and Jmatrix
# e = J*c
# minimize norm(e,2) given B*c=yk
# if desired B can be given
# conds is ignored
N = len(xk)-1
K = order
if B is None:
B = _fitpack._bsplmat(order, xk)
J = _fitpack._bspldismat(order, xk)
u, s, vh = scipy.linalg.svd(B)
ind = K-1
V2 = vh[-ind:,:].T
V1 = vh[:-ind,:].T
A = dot(J.T,J)
tmp = dot(V2.T,A)
Q = dot(tmp,V2)
p = scipy.linalg.solve(Q, tmp)
tmp = dot(V2,p)
tmp = np.eye(N+K) - tmp
tmp = dot(tmp,V1)
tmp = dot(tmp,np.diag(1.0/s))
tmp = dot(tmp,u.T)
return _dot0(tmp, yk)
def _setdiag(a, k, v):
if not a.ndim == 2:
raise ValueError("Input array should be 2-D.")
M,N = a.shape
if k > 0:
start = k
num = N - k
else:
num = M + k
start = abs(k)*N
end = start + num*(N+1)-1
a.flat[start:end:(N+1)] = v
# Return the spline that minimizes the dis-continuity of the
# "order-th" derivative; for order >= 2.
def _find_smoothest2(xk, yk):
N = len(xk) - 1
Np1 = N + 1
# find pseudo-inverse of B directly.
Bd = np.empty((Np1, N))
for k in range(-N,N):
if (k < 0):
l = np.arange(-k, Np1)
v = (l+k+1)
if ((k+1) % 2):
v = -v
else:
l = np.arange(k,N)
v = N - l
if ((k % 2)):
v = -v
_setdiag(Bd, k, v)
Bd /= (Np1)
V2 = np.ones((Np1,))
V2[1::2] = -1
V2 /= math.sqrt(Np1)
dk = np.diff(xk)
b = 2*np.diff(yk, axis=0)/dk
J = np.zeros((N-1,N+1))
idk = 1.0/dk
_setdiag(J,0,idk[:-1])
_setdiag(J,1,-idk[1:]-idk[:-1])
_setdiag(J,2,idk[1:])
A = dot(J.T,J)
val = dot(V2,dot(A,V2))
res1 = dot(np.outer(V2,V2)/val,A)
mk = dot(np.eye(Np1)-res1, _dot0(Bd,b))
return mk
def _get_spline2_Bb(xk, yk, kind, conds):
Np1 = len(xk)
dk = xk[1:]-xk[:-1]
if kind == 'not-a-knot':
# use banded-solver
nlu = (1,1)
B = ones((3,Np1))
alpha = 2*(yk[1:]-yk[:-1])/dk
zrs = np.zeros((1,)+yk.shape[1:])
row = (Np1-1)//2
b = np.concatenate((alpha[:row],zrs,alpha[row:]),axis=0)
B[0,row+2:] = 0
B[2,:(row-1)] = 0
B[0,row+1] = dk[row-1]
B[1,row] = -dk[row]-dk[row-1]
B[2,row-1] = dk[row]
return B, b, None, nlu
else:
raise NotImplementedError("quadratic %s is not available" % kind)
def _get_spline3_Bb(xk, yk, kind, conds):
# internal function to compute different tri-diagonal system
# depending on the kind of spline requested.
# conds is only used for 'second' and 'first'
Np1 = len(xk)
if kind in ['natural', 'second']:
if kind == 'natural':
m0, mN = 0.0, 0.0
else:
m0, mN = conds
# the matrix to invert is (N-1,N-1)
# use banded solver
beta = 2*(xk[2:]-xk[:-2])
alpha = xk[1:]-xk[:-1]
nlu = (1,1)
B = np.empty((3,Np1-2))
B[0,1:] = alpha[2:]
B[1,:] = beta
B[2,:-1] = alpha[1:-1]
dyk = yk[1:]-yk[:-1]
b = (dyk[1:]/alpha[1:] - dyk[:-1]/alpha[:-1])
b *= 6
b[0] -= m0
b[-1] -= mN
def append_func(mk):
# put m0 and mN into the correct shape for
# concatenation
ma = array(m0,copy=0,ndmin=yk.ndim)
mb = array(mN,copy=0,ndmin=yk.ndim)
if ma.shape[1:] != yk.shape[1:]:
ma = ma*(ones(yk.shape[1:])[np.newaxis,...])
if mb.shape[1:] != yk.shape[1:]:
mb = mb*(ones(yk.shape[1:])[np.newaxis,...])
mk = np.concatenate((ma,mk),axis=0)
mk = np.concatenate((mk,mb),axis=0)
return mk
return B, b, append_func, nlu
elif kind in ['clamped', 'endslope', 'first', 'not-a-knot', 'runout',
'parabolic']:
if kind == 'endslope':
# match slope of lagrange interpolating polynomial of
# order 3 at end-points.
x0,x1,x2,x3 = xk[:4]
sl_0 = (1./(x0-x1)+1./(x0-x2)+1./(x0-x3))*yk[0]
sl_0 += (x0-x2)*(x0-x3)/((x1-x0)*(x1-x2)*(x1-x3))*yk[1]
sl_0 += (x0-x1)*(x0-x3)/((x2-x0)*(x2-x1)*(x3-x2))*yk[2]
sl_0 += (x0-x1)*(x0-x2)/((x3-x0)*(x3-x1)*(x3-x2))*yk[3]
xN3,xN2,xN1,xN0 = xk[-4:]
sl_N = (1./(xN0-xN1)+1./(xN0-xN2)+1./(xN0-xN3))*yk[-1]
sl_N += (xN0-xN2)*(xN0-xN3)/((xN1-xN0)*(xN1-xN2)*(xN1-xN3))*yk[-2]
sl_N += (xN0-xN1)*(xN0-xN3)/((xN2-xN0)*(xN2-xN1)*(xN3-xN2))*yk[-3]
sl_N += (xN0-xN1)*(xN0-xN2)/((xN3-xN0)*(xN3-xN1)*(xN3-xN2))*yk[-4]
elif kind == 'clamped':
sl_0, sl_N = 0.0, 0.0
elif kind == 'first':
sl_0, sl_N = conds
# Now set up the (N+1)x(N+1) system of equations
beta = np.r_[0,2*(xk[2:]-xk[:-2]),0]
alpha = xk[1:]-xk[:-1]
gamma = np.r_[0,alpha[1:]]
B = np.diag(alpha,k=-1) + np.diag(beta) + np.diag(gamma,k=1)
d1 = alpha[0]
dN = alpha[-1]
if kind == 'not-a-knot':
d2 = alpha[1]
dN1 = alpha[-2]
B[0,:3] = [d2,-d1-d2,d1]
B[-1,-3:] = [dN,-dN1-dN,dN1]
elif kind == 'runout':
B[0,:3] = [1,-2,1]
B[-1,-3:] = [1,-2,1]
elif kind == 'parabolic':
B[0,:2] = [1,-1]
B[-1,-2:] = [-1,1]
elif kind == 'periodic':
raise NotImplementedError
elif kind == 'symmetric':
raise NotImplementedError
else:
B[0,:2] = [2*d1,d1]
B[-1,-2:] = [dN,2*dN]
# Set up RHS (b)
b = np.empty((Np1,)+yk.shape[1:])
dyk = (yk[1:]-yk[:-1])*1.0
if kind in ['not-a-knot', 'runout', 'parabolic']:
b[0] = b[-1] = 0.0
elif kind == 'periodic':
raise NotImplementedError
elif kind == 'symmetric':
raise NotImplementedError
else:
b[0] = (dyk[0]/d1 - sl_0)
b[-1] = -(dyk[-1]/dN - sl_N)
b[1:-1,...] = (dyk[1:]/alpha[1:]-dyk[:-1]/alpha[:-1])
b *= 6.0
return B, b, None, None
else:
raise ValueError("%s not supported" % kind)
# conds is a tuple of an array and a vector
# giving the left-hand and the right-hand side
# of the additional equations to add to B
def _find_user(xk, yk, order, conds, B):
lh = conds[0]
rh = conds[1]
B = np.concatenate((B, lh), axis=0)
w = np.concatenate((yk, rh), axis=0)
M, N = B.shape
if (M > N):
raise ValueError("over-specification of conditions")
elif (M < N):
return _find_smoothest(xk, yk, order, None, B)
else:
return scipy.linalg.solve(B, w)
# If conds is None, then use the not_a_knot condition
# at K-1 farthest separated points in the interval
def _find_not_a_knot(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then ensure zero-valued second
# derivative at K-1 farthest separated points
def _find_natural(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then ensure zero-valued first
# derivative at K-1 farthest separated points
def _find_clamped(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
def _find_fixed(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then use coefficient periodicity
# If conds is 'function' then use function periodicity
def _find_periodic(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# Doesn't use conds
def _find_symmetric(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# conds is a dictionary with multiple values
def _find_mixed(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
def splmake(xk, yk, order=3, kind='smoothest', conds=None):
"""
Return a representation of a spline given data-points at internal knots
Parameters
----------
xk : array_like
The input array of x values of rank 1
yk : array_like
The input array of y values of rank N. `yk` can be an N-d array to
represent more than one curve, through the same `xk` points. The first
dimension is assumed to be the interpolating dimension and is the same
length of `xk`.
order : int, optional
Order of the spline
kind : str, optional
Can be 'smoothest', 'not_a_knot', 'fixed', 'clamped', 'natural',
'periodic', 'symmetric', 'user', 'mixed' and it is ignored if order < 2
conds : optional
Conds
Returns
-------
splmake : tuple
Return a (`xk`, `cvals`, `k`) representation of a spline given
data-points where the (internal) knots are at the data-points.
"""
yk = np.asanyarray(yk)
order = int(order)
if order < 0:
raise ValueError("order must not be negative")
if order == 0:
return xk, yk[:-1], order
elif order == 1:
return xk, yk, order
try:
func = eval('_find_%s' % kind)
except:
raise NotImplementedError
# the constraint matrix
B = _fitpack._bsplmat(order, xk)
coefs = func(xk, yk, order, conds, B)
return xk, coefs, order
def spleval(xck, xnew, deriv=0):
"""
Evaluate a fixed spline represented by the given tuple at the new x-values
The `xj` values are the interior knot points. The approximation
region is `xj[0]` to `xj[-1]`. If N+1 is the length of `xj`, then `cvals`
should have length N+k where `k` is the order of the spline.
Parameters
----------
(xj, cvals, k) : tuple
Parameters that define the fixed spline
xj : array_like
Interior knot points
cvals : array_like
Curvature
k : int
Order of the spline
xnew : array_like
Locations to calculate spline
deriv : int
Deriv
Returns
-------
spleval : ndarray
If `cvals` represents more than one curve (`cvals.ndim` > 1) and/or
`xnew` is N-d, then the result is `xnew.shape` + `cvals.shape[1:]`
providing the interpolation of multiple curves.
Notes
-----
Internally, an additional `k`-1 knot points are added on either side of
the spline.
"""
(xj,cvals,k) = xck
oldshape = np.shape(xnew)
xx = np.ravel(xnew)
sh = cvals.shape[1:]
res = np.empty(xx.shape + sh, dtype=cvals.dtype)
for index in np.ndindex(*sh):
sl = (slice(None),)+index
if issubclass(cvals.dtype.type, np.complexfloating):
res[sl].real = _fitpack._bspleval(xx,xj,cvals.real[sl],k,deriv)
res[sl].imag = _fitpack._bspleval(xx,xj,cvals.imag[sl],k,deriv)
else:
res[sl] = _fitpack._bspleval(xx,xj,cvals[sl],k,deriv)
res.shape = oldshape + sh
return res
def spltopp(xk, cvals, k):
"""Return a piece-wise polynomial object from a fixed-spline tuple.
"""
return ppform.fromspline(xk, cvals, k)
def spline(xk, yk, xnew, order=3, kind='smoothest', conds=None):
"""
Interpolate a curve at new points using a spline fit
Parameters
----------
xk, yk : array_like
The x and y values that define the curve.
xnew : array_like
The x values where spline should estimate the y values.
order : int
Default is 3.
kind : string
One of {'smoothest'}
conds : Don't know
Don't know
Returns
-------
spline : ndarray
An array of y values; the spline evaluated at the positions `xnew`.
"""
return spleval(splmake(xk,yk,order=order,kind=kind,conds=conds),xnew)
| mit |
datapythonista/pandas | pandas/tests/frame/methods/test_describe.py | 2 | 13700 | import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Series,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestDataFrameDescribe:
def test_describe_bool_in_mixed_frame(self):
df = DataFrame(
{
"string_data": ["a", "b", "c", "d", "e"],
"bool_data": [True, True, False, False, False],
"int_data": [10, 20, 30, 40, 50],
}
)
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame(
{"int_data": [5, 30, df.int_data.std(), 10, 20, 30, 40, 50]},
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=["bool"])
expected = DataFrame(
{"bool_data": [5, 2, False, 3]}, index=["count", "unique", "top", "freq"]
)
tm.assert_frame_equal(result, expected)
def test_describe_empty_object(self):
# GH#27183
df = DataFrame({"A": [None, None]}, dtype=object)
result = df.describe()
expected = DataFrame(
{"A": [0, 0, np.nan, np.nan]},
dtype=object,
index=["count", "unique", "top", "freq"],
)
tm.assert_frame_equal(result, expected)
result = df.iloc[:0].describe()
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH#13891
df = DataFrame(
{
"bool_data_1": [False, False, True, True],
"bool_data_2": [False, True, True, True],
}
)
result = df.describe()
expected = DataFrame(
{"bool_data_1": [4, 2, False, 2], "bool_data_2": [4, 2, True, 3]},
index=["count", "unique", "top", "freq"],
)
tm.assert_frame_equal(result, expected)
df = DataFrame(
{
"bool_data": [False, False, True, True, False],
"int_data": [0, 1, 2, 3, 4],
}
)
result = df.describe()
expected = DataFrame(
{"int_data": [5, 2, df.int_data.std(), 0, 1, 2, 3, 4]},
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
tm.assert_frame_equal(result, expected)
df = DataFrame(
{"bool_data": [False, False, True, True], "str_data": ["a", "b", "c", "a"]}
)
result = df.describe()
expected = DataFrame(
{"bool_data": [4, 2, False, 2], "str_data": [4, 3, "a", 2]},
index=["count", "unique", "top", "freq"],
)
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({"value": np.random.randint(0, 10000, 100)})
labels = [f"{i} - {i + 499}" for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
df["value_group"] = pd.cut(
df.value, range(0, 10500, 500), right=False, labels=cat_labels
)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(
["a", "b", "b", "b"], categories=["a", "b", "c"], ordered=True
)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3], index=["count", "unique", "top", "freq"])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_empty_categorical_column(self):
# GH#26397
# Ensure the index of an empty categorical DataFrame column
# also contains (count, unique, top, freq)
df = DataFrame({"empty_col": Categorical([])})
result = df.describe()
expected = DataFrame(
{"empty_col": [0, 0, np.nan, np.nan]},
index=["count", "unique", "top", "freq"],
dtype="object",
)
tm.assert_frame_equal(result, expected)
# ensure NaN, not None
assert np.isnan(result.iloc[2, 0])
assert np.isnan(result.iloc[3, 0])
def test_describe_categorical_columns(self):
# GH#11558
columns = pd.CategoricalIndex(["int1", "int2", "obj"], ordered=True, name="XXX")
df = DataFrame(
{
"int1": [10, 20, 30, 40, 50],
"int2": [10, 20, 30, 40, 50],
"obj": ["A", 0, None, "X", 1],
},
columns=columns,
)
result = df.describe()
exp_columns = pd.CategoricalIndex(
["int1", "int2"],
categories=["int1", "int2", "obj"],
ordered=True,
name="XXX",
)
expected = DataFrame(
{
"int1": [5, 30, df.int1.std(), 10, 20, 30, 40, 50],
"int2": [5, 30, df.int2.std(), 10, 20, 30, 40, 50],
},
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
columns=exp_columns,
)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values, expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(
["2011-01-01", "2011-02-01", "2011-03-01"],
freq="MS",
tz="US/Eastern",
name="XXX",
)
df = DataFrame(
{
0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ["A", 0, None, "X", 1],
}
)
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(
["2011-01-01", "2011-02-01"], freq="MS", tz="US/Eastern", name="XXX"
)
expected = DataFrame(
{
0: [5, 30, df.iloc[:, 0].std(), 10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(), 10, 20, 30, 40, 50],
},
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == "MS"
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH#6145
t1 = pd.timedelta_range("1 days", freq="D", periods=5)
t2 = pd.timedelta_range("1 hours", freq="H", periods=5)
df = DataFrame({"t1": t1, "t2": t2})
expected = DataFrame(
{
"t1": [
5,
pd.Timedelta("3 days"),
df.iloc[:, 0].std(),
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
pd.Timedelta("4 days"),
pd.Timedelta("5 days"),
],
"t2": [
5,
pd.Timedelta("3 hours"),
df.iloc[:, 1].std(),
pd.Timedelta("1 hours"),
pd.Timedelta("2 hours"),
pd.Timedelta("3 hours"),
pd.Timedelta("4 hours"),
pd.Timedelta("5 hours"),
],
},
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (
" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919273 0 days 01:34:52.099788303\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00"
)
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH#21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = DataFrame({"s1": s1, "s2": s2})
expected = DataFrame(
{
"s1": [5, 2, 0, 1, 2, 3, 4, 1.581139],
"s2": [
5,
Timestamp(2018, 1, 3).tz_localize(tz),
start.tz_localize(tz),
s2[1],
s2[2],
s2[3],
end.tz_localize(tz),
np.nan,
],
},
index=["count", "mean", "min", "25%", "50%", "75%", "max", "std"],
)
result = df.describe(include="all", datetime_is_numeric=True)
tm.assert_frame_equal(result, expected)
def test_datetime_is_numeric_includes_datetime(self):
df = DataFrame({"a": date_range("2012", periods=3), "b": [1, 2, 3]})
result = df.describe(datetime_is_numeric=True)
expected = DataFrame(
{
"a": [
3,
Timestamp("2012-01-02"),
Timestamp("2012-01-01"),
Timestamp("2012-01-01T12:00:00"),
Timestamp("2012-01-02"),
Timestamp("2012-01-02T12:00:00"),
Timestamp("2012-01-03"),
np.nan,
],
"b": [3, 2, 1, 1.5, 2, 2.5, 3, 1],
},
index=["count", "mean", "min", "25%", "50%", "75%", "max", "std"],
)
tm.assert_frame_equal(result, expected)
def test_describe_tz_values2(self):
tz = "CET"
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = DataFrame({"s1": s1, "s2": s2})
s1_ = s1.describe()
s2_ = Series(
[
5,
5,
s2.value_counts().index[0],
1,
start.tz_localize(tz),
end.tz_localize(tz),
],
index=["count", "unique", "top", "freq", "first", "last"],
)
idx = [
"count",
"unique",
"top",
"freq",
"first",
"last",
"mean",
"std",
"min",
"25%",
"50%",
"75%",
"max",
]
expected = pd.concat([s1_, s2_], axis=1, keys=["s1", "s2"]).loc[idx]
with tm.assert_produces_warning(FutureWarning):
result = df.describe(include="all")
tm.assert_frame_equal(result, expected)
def test_describe_percentiles_integer_idx(self):
# GH#26660
df = DataFrame({"x": [1]})
pct = np.linspace(0, 1, 10 + 1)
result = df.describe(percentiles=pct)
expected = DataFrame(
{"x": [1.0, 1.0, np.NaN, 1.0, *[1.0 for _ in pct], 1.0]},
index=[
"count",
"mean",
"std",
"min",
"0%",
"10%",
"20%",
"30%",
"40%",
"50%",
"60%",
"70%",
"80%",
"90%",
"100%",
"max",
],
)
tm.assert_frame_equal(result, expected)
def test_describe_does_not_raise_error_for_dictlike_elements(self):
# GH#32409
df = DataFrame([{"test": {"a": "1"}}, {"test": {"a": "2"}}])
expected = DataFrame(
{"test": [2, 2, {"a": "1"}, 1]}, index=["count", "unique", "top", "freq"]
)
result = df.describe()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("exclude", ["x", "y", ["x", "y"], ["x", "z"]])
def test_describe_when_include_all_exclude_not_allowed(self, exclude):
"""
When include is 'all', then setting exclude != None is not allowed.
"""
df = DataFrame({"x": [1], "y": [2], "z": [3]})
msg = "exclude must be None when include is 'all'"
with pytest.raises(ValueError, match=msg):
df.describe(include="all", exclude=exclude)
def test_describe_with_duplicate_columns(self):
df = DataFrame(
[[1, 1, 1], [2, 2, 2], [3, 3, 3]],
columns=["bar", "a", "a"],
dtype="float64",
)
result = df.describe()
ser = df.iloc[:, 0].describe()
expected = pd.concat([ser, ser, ser], keys=df.columns, axis=1)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
chemlab/chemlab | chemlab/md/potential.py | 1 | 11381 | """Topology handling in gromacs"""
from ..db import ChemlabDB
from ..core import System, Molecule, Atom
from ..table import atomic_no, atomic_weight
from .energy import lorentz_berthelot as combine_lorentz_berthelot
import itertools
import time
import datetime
from itertools import combinations, combinations_with_replacement
from collections import OrderedDict
import numpy as np
def line(*args, **kwargs):
just = kwargs.get("just", "left")
if just == "right":
return ' '.join(str(a).rjust(10) for a in args) + '\n'
if just == "left":
return ' '.join(str(a).ljust(10) for a in args) + '\n'
else:
raise ValueError('just must be right or left')
def comment(*args):
return ';' + line(*args)
class ChargedLJ(object):
def __init__(self, name, q, type, sigma, eps):
self.name = name
self.q = q
self.type = type
self.sigma = sigma
self.eps = eps
@property
def c6(self):
return 4.0 * self.eps * self.sigma ** 6
@property
def c12(self):
return 4.0 * self.eps * self.sigma ** 12
class InterMolecular(object):
def __init__(self, type='lj'):
self.particles = {}
self.special_pairs = {}
self.type = type
@classmethod
def from_dict(cls, data):
self = cls()
for name, atomspec in data.items():
particle = ChargedLJ(name, atomspec['q'], atomspec[
'type'], atomspec['sigma'], atomspec['eps'])
self.particles[name] = particle
return self
def pair_interaction(self, a, b):
i, j = self.particles[a], self.particles[b]
if (a, b) in self.special_pairs:
params = self.special_pairs[a, b]
else:
params = {}
if self.type == 'lj':
sigma, eps = combine_lorentz_berthelot(
i.sigma, j.sigma, i.eps, j.eps)
return PairInteraction((i, j), sigma, eps)
elif self.type == 'custom':
# We expect coulomb, dispersion, repulsion
coulomb = params['coulomb']
dispersion = params['dispersion']
repulsion = params['repulsion']
return CustomPairInteraction((i, j), coulomb, dispersion, repulsion)
else:
raise ValueError("Type not recognized")
class PairInteraction:
def __init__(self, pair, sigma=None, eps=None):
self.pair = pair
self.sigma, self.eps = combine_lorentz_berthelot(pair[0].sigma, pair[1].sigma, pair[0].eps, pair[1].eps)
@property
def c6(self):
return 4.0 * self.eps * self.sigma ** 6
@property
def c12(self):
return 4.0 * self.eps * self.sigma ** 12
def f(self, x):
return 1.0/x
def g(self, x):
return - self.c6 * (1/x**6)
def h(self, x):
return self.c12 * (1/x**12)
class CustomParticle:
def __init__(self, name, type, q, params):
self.name = name
self.type = type
self.params = params
self.q = q
from scipy.misc import derivative
class CustomPairInteraction:
def __init__(self, pair, coulomb, dispersion, repulsion):
'''Define a custom pair interaction. func is a python function that
takes an array of x values and returns an array of potential values'''
self.pair = pair
self.coulomb = coulomb
self.dispersion = dispersion
self.repulsion = repulsion
def f(self, x):
return self.coulomb(x, self.pair[0].params, self.pair[1].params)
def df(self, x):
return derivative(self.f, x, dx=1e-10)
def g(self, x):
return self.dispersion(x, self.pair[0].params, self.pair[1].params)
def dg(self, x):
return derivative(self.g, x, dx=1e-10)
def h(self, x):
return self.repulsion(x, self.pair[0].params, self.pair[1].params)
def dh(self, x):
return derivative(self.h, x, dx=1e-10)
class MolecularConstraints:
def __init__(self, name, atoms, bonds, angles, dihedrals):
self.atoms = atoms
self.name = name
self.bonds = bonds
self.angles = angles
self.dihedrals = dihedrals
class HarmonicConstraint:
def __init__(self, between, r, k):
self.between = between
self.r = r
self.k = k
class HarmonicAngleConstraint:
def __init__(self, between, theta, k):
self.between = between
self.theta = theta
self.k = k
class IntraMolecular(object):
def __init__(self):
self.molecules = {}
@classmethod
def from_dict(cls, data):
self = cls()
for name, molspec in data.items():
if 'bonds' in molspec:
bonds = [HarmonicConstraint(b['between'], b['r'], b['k'])
for b in molspec['bonds']]
else:
bonds = []
if 'angles' in molspec:
angles = [HarmonicAngleConstraint(b['between'], b['theta'], b['k'])
for b in molspec['angles']]
else:
angles = []
cst = MolecularConstraints(
name, molspec['atoms'], bonds, angles, [])
self.molecules[name] = cst
return self
class Potential(object):
def __init__(self, nonbonded, bonded):
self.intermolecular = nonbonded
self.intramolecular = bonded
class ForceGenerator(object):
def __init__(self, spec):
self.intermolecular = InterMolecular.from_dict(spec['nonbonded'])
self.intramolecular = IntraMolecular.from_dict(spec['bonded'])
def to_table(custom_interaction, cutoff, precision='double'):
if precision == 'single':
step = 0.002
if precision == 'double':
step = 0.0005
else:
raise ValueError("Precision can be either single or double")
r = np.arange(0.0, 1 + cutoff + 2*step, step)
f = custom_interaction.f(r)
df = custom_interaction.df(r)
g = custom_interaction.g(r)
dg = custom_interaction.dg(r)
h = custom_interaction.h(r)
dh = custom_interaction.dh(r)
columns = [r, f, -df, g, -dg, h, -dh]
rows = np.array(columns).T
rows[0] = 0.0
return '\n'.join(' '.join("%.8E" % n for n in row) for row in rows)
def to_top(system, potential):
molecules = [system.subentity(Molecule, i)
for i in range(system.dimensions['molecule'])]
unique_molecules = OrderedDict()
[unique_molecules.__setitem__(m.molecule_name, m) for m in molecules]
unique_atoms = OrderedDict()
for m in unique_molecules.values():
for a in [m.subentity(Atom, i) for i in range(m.dimensions['atom'])]:
unique_atoms[a.atom_name] = a
# Defaults section
r = ''
r += comment('Generated by chemlab ' +
datetime.datetime
.fromtimestamp(time.time())
.strftime('%Y-%m-%d %H:%M:%S'))
r += line('[ defaults ]')
r += comment('nbfunc', 'comb-rule', 'gen-pairs', 'fudgeL', 'fudgeQQ')
r += line(1, 1, "yes", 0.5, 0.5)
r += line()
# Non bonded interactions
r += line('[ atomtypes ]')
r += comment('name', 'atom_type', 'mass', 'charge', 'ptype', 'C', 'A')
name_to_type = {}
for atom in unique_atoms:
# potential.intermolecular.particles
particle = potential.intermolecular.particles[atom]
if isinstance(particle, ChargedLJ):
r += line(particle.name, particle.type, atomic_no(particle.type), atomic_weight(particle.type),
particle.q, 'A', particle.c6, particle.c12)
elif isinstance(particle, CustomParticle):
r += line(particle.name, particle.type, atomic_no(particle.type), atomic_weight(particle.type),
particle.q, 'A', 1.0, 1.0)
else:
raise ValueError("unknown particle type {}".format(particle))
name_to_type[particle.name] = particle.type
r += line()
r += line('[ nonbondparams ]')
r += comment('i', 'j', 'func', 'V', 'W')
# We override gromacs with our own rules
for atom1, atom2 in combinations_with_replacement(unique_atoms, 2):
# potential.intermolecular.pairs:
pair = potential.intermolecular.pair_interaction(atom1, atom2)
if isinstance(pair, PairInteraction):
r += line(pair.pair[0].name,
pair.pair[1].name,
1, # Combination rule 1 = lorentz-berthelot
pair.c6,
pair.c12)
elif isinstance(pair, CustomPairInteraction):
r += line(pair.pair[0].name,
pair.pair[1].name, 1, 1.0, 1.0)
else:
raise ValueError("Wrong pair interaction {}".format(pair))
r += line()
for molecule_name in unique_molecules:
# print potential.intramolecular.molecules
molecule = potential.intramolecular.molecules[molecule_name]
r += line('[ moleculetype ]')
r += comment('name', 'nbexcl')
r += line(molecule.name, 2)
r += line()
# Atoms directive...
r += line('[ atoms ]', just="left")
r += comment('nr', 'type', 'resnr', 'residue',
'atom', 'cgnr', 'charge', 'mass')
for i, t in enumerate(molecule.atoms):
p = potential.intermolecular.particles[t]
r += line(i + 1, t, 1, molecule.name, t, 1, p.q)
# 1 O 1 SOL OW 1 -0.8476
r += line()
# Bonds directive...
if molecule.bonds:
r += line('[ bonds ]', just="left")
r += comment('i', 'j', 'funct', 'length', 'force.c.')
for b in molecule.bonds:
r += line(b.between[0] + 1, b.between[1] + 1, 1, b.r, b.k)
r += line()
# Angle directive...
if molecule.angles:
r += line('[ angles ]', just="left")
r += comment('i', 'j', 'k', 'funct', 'angle', 'force.c.')
for ang in molecule.angles:
r += line(ang.between[0] + 1,
ang.between[1] + 1,
ang.between[2] + 1, 1, ang.theta, ang.k)
r += line()
# Create dihedrals
for ang in molecule.dihedrals:
r += line(ang.between[0] + 1,
ang.between[1] + 1,
ang.between[2] + 1, 1, ang.theta, ang.k)
r += line()
# System
r += line('[ system ]')
r += line('flying pandas')
r += line()
r += line('[ molecules ]')
counter = 0
current = -1
mollist = []
for t in system.molecule_name:
if t != current:
mollist.append((current, counter))
current = t
counter = 0
counter += 1
mollist.append((current, counter))
mollist.pop(0)
for mol, counter in mollist:
r += line(mol, counter)
return r
def from_top(topfile):
topfile.read()
# atom_types
# pair_interactions -> system-wide (they are combined for all molecules)
# bond_interactions -> relative to each molecule
# angle_interactions -> relative to each molecule
# number of molecules -> relative only to the system, but this is a flaw of
# the top format, we don't read that
| gpl-3.0 |
apache/arrow | python/pyarrow/tests/parquet/test_datetime.py | 4 | 12939 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import io
import numpy as np
import pytest
import pyarrow as pa
from pyarrow.tests.parquet.common import (
_check_roundtrip, parametrize_legacy_dataset)
try:
import pyarrow.parquet as pq
from pyarrow.tests.parquet.common import _read_table, _write_table
except ImportError:
pq = None
try:
import pandas as pd
import pandas.testing as tm
from pyarrow.tests.parquet.common import _roundtrip_pandas_dataframe
except ImportError:
pd = tm = None
pytestmark = pytest.mark.parquet
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_pandas_parquet_datetime_tz(use_legacy_dataset):
s = pd.Series([datetime.datetime(2017, 9, 6)])
s = s.dt.tz_localize('utc')
s.index = s
# Both a column and an index to hit both use cases
df = pd.DataFrame({'tz_aware': s,
'tz_eastern': s.dt.tz_convert('US/Eastern')},
index=s)
f = io.BytesIO()
arrow_table = pa.Table.from_pandas(df)
_write_table(arrow_table, f, coerce_timestamps='ms')
f.seek(0)
table_read = pq.read_pandas(f, use_legacy_dataset=use_legacy_dataset)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_datetime_timezone_tzinfo(use_legacy_dataset):
value = datetime.datetime(2018, 1, 1, 1, 23, 45,
tzinfo=datetime.timezone.utc)
df = pd.DataFrame({'foo': [value]})
_roundtrip_pandas_dataframe(
df, write_kwargs={}, use_legacy_dataset=use_legacy_dataset)
@pytest.mark.pandas
def test_coerce_timestamps(tempdir):
from collections import OrderedDict
# ARROW-622
arrays = OrderedDict()
fields = [pa.field('datetime64',
pa.list_(pa.timestamp('ms')))]
arrays['datetime64'] = [
np.array(['2007-07-13T01:23:34.123456789',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ms]'),
None,
None,
np.array(['2007-07-13T02',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ms]'),
]
df = pd.DataFrame(arrays)
schema = pa.schema(fields)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df, schema=schema)
_write_table(arrow_table, filename, version="2.0", coerce_timestamps='us')
table_read = _read_table(filename)
df_read = table_read.to_pandas()
df_expected = df.copy()
for i, x in enumerate(df_expected['datetime64']):
if isinstance(x, np.ndarray):
df_expected['datetime64'][i] = x.astype('M8[us]')
tm.assert_frame_equal(df_expected, df_read)
with pytest.raises(ValueError):
_write_table(arrow_table, filename, version='2.0',
coerce_timestamps='unknown')
@pytest.mark.pandas
def test_coerce_timestamps_truncated(tempdir):
"""
ARROW-2555: Test that we can truncate timestamps when coercing if
explicitly allowed.
"""
dt_us = datetime.datetime(year=2017, month=1, day=1, hour=1, minute=1,
second=1, microsecond=1)
dt_ms = datetime.datetime(year=2017, month=1, day=1, hour=1, minute=1,
second=1)
fields_us = [pa.field('datetime64', pa.timestamp('us'))]
arrays_us = {'datetime64': [dt_us, dt_ms]}
df_us = pd.DataFrame(arrays_us)
schema_us = pa.schema(fields_us)
filename = tempdir / 'pandas_truncated.parquet'
table_us = pa.Table.from_pandas(df_us, schema=schema_us)
_write_table(table_us, filename, version="2.0", coerce_timestamps='ms',
allow_truncated_timestamps=True)
table_ms = _read_table(filename)
df_ms = table_ms.to_pandas()
arrays_expected = {'datetime64': [dt_ms, dt_ms]}
df_expected = pd.DataFrame(arrays_expected)
tm.assert_frame_equal(df_expected, df_ms)
@pytest.mark.pandas
def test_date_time_types(tempdir):
t1 = pa.date32()
data1 = np.array([17259, 17260, 17261], dtype='int32')
a1 = pa.array(data1, type=t1)
t2 = pa.date64()
data2 = data1.astype('int64') * 86400000
a2 = pa.array(data2, type=t2)
t3 = pa.timestamp('us')
start = pd.Timestamp('2001-01-01').value / 1000
data3 = np.array([start, start + 1, start + 2], dtype='int64')
a3 = pa.array(data3, type=t3)
t4 = pa.time32('ms')
data4 = np.arange(3, dtype='i4')
a4 = pa.array(data4, type=t4)
t5 = pa.time64('us')
a5 = pa.array(data4.astype('int64'), type=t5)
t6 = pa.time32('s')
a6 = pa.array(data4, type=t6)
ex_t6 = pa.time32('ms')
ex_a6 = pa.array(data4 * 1000, type=ex_t6)
t7 = pa.timestamp('ns')
start = pd.Timestamp('2001-01-01').value
data7 = np.array([start, start + 1000, start + 2000],
dtype='int64')
a7 = pa.array(data7, type=t7)
table = pa.Table.from_arrays([a1, a2, a3, a4, a5, a6, a7],
['date32', 'date64', 'timestamp[us]',
'time32[s]', 'time64[us]',
'time32_from64[s]',
'timestamp[ns]'])
# date64 as date32
# time32[s] to time32[ms]
expected = pa.Table.from_arrays([a1, a1, a3, a4, a5, ex_a6, a7],
['date32', 'date64', 'timestamp[us]',
'time32[s]', 'time64[us]',
'time32_from64[s]',
'timestamp[ns]'])
_check_roundtrip(table, expected=expected, version='2.0')
t0 = pa.timestamp('ms')
data0 = np.arange(4, dtype='int64')
a0 = pa.array(data0, type=t0)
t1 = pa.timestamp('us')
data1 = np.arange(4, dtype='int64')
a1 = pa.array(data1, type=t1)
t2 = pa.timestamp('ns')
data2 = np.arange(4, dtype='int64')
a2 = pa.array(data2, type=t2)
table = pa.Table.from_arrays([a0, a1, a2],
['ts[ms]', 'ts[us]', 'ts[ns]'])
expected = pa.Table.from_arrays([a0, a1, a2],
['ts[ms]', 'ts[us]', 'ts[ns]'])
# int64 for all timestamps supported by default
filename = tempdir / 'int64_timestamps.parquet'
_write_table(table, filename, version='2.0')
parquet_schema = pq.ParquetFile(filename).schema
for i in range(3):
assert parquet_schema.column(i).physical_type == 'INT64'
read_table = _read_table(filename)
assert read_table.equals(expected)
t0_ns = pa.timestamp('ns')
data0_ns = np.array(data0 * 1000000, dtype='int64')
a0_ns = pa.array(data0_ns, type=t0_ns)
t1_ns = pa.timestamp('ns')
data1_ns = np.array(data1 * 1000, dtype='int64')
a1_ns = pa.array(data1_ns, type=t1_ns)
expected = pa.Table.from_arrays([a0_ns, a1_ns, a2],
['ts[ms]', 'ts[us]', 'ts[ns]'])
# int96 nanosecond timestamps produced upon request
filename = tempdir / 'explicit_int96_timestamps.parquet'
_write_table(table, filename, version='2.0',
use_deprecated_int96_timestamps=True)
parquet_schema = pq.ParquetFile(filename).schema
for i in range(3):
assert parquet_schema.column(i).physical_type == 'INT96'
read_table = _read_table(filename)
assert read_table.equals(expected)
# int96 nanosecond timestamps implied by flavor 'spark'
filename = tempdir / 'spark_int96_timestamps.parquet'
_write_table(table, filename, version='2.0',
flavor='spark')
parquet_schema = pq.ParquetFile(filename).schema
for i in range(3):
assert parquet_schema.column(i).physical_type == 'INT96'
read_table = _read_table(filename)
assert read_table.equals(expected)
def test_timestamp_restore_timezone():
# ARROW-5888, restore timezone from serialized metadata
ty = pa.timestamp('ms', tz='America/New_York')
arr = pa.array([1, 2, 3], type=ty)
t = pa.table([arr], names=['f0'])
_check_roundtrip(t)
def test_timestamp_restore_timezone_nanosecond():
# ARROW-9634, also restore timezone for nanosecond data that get stored
# as microseconds in the parquet file
ty = pa.timestamp('ns', tz='America/New_York')
arr = pa.array([1000, 2000, 3000], type=ty)
table = pa.table([arr], names=['f0'])
ty_us = pa.timestamp('us', tz='America/New_York')
expected = pa.table([arr.cast(ty_us)], names=['f0'])
_check_roundtrip(table, expected=expected)
@pytest.mark.pandas
def test_list_of_datetime_time_roundtrip():
# ARROW-4135
times = pd.to_datetime(['09:00', '09:30', '10:00', '10:30', '11:00',
'11:30', '12:00'])
df = pd.DataFrame({'time': [times.time]})
_roundtrip_pandas_dataframe(df, write_kwargs={})
@pytest.mark.pandas
def test_parquet_version_timestamp_differences():
i_s = pd.Timestamp('2010-01-01').value / 1000000000 # := 1262304000
d_s = np.arange(i_s, i_s + 10, 1, dtype='int64')
d_ms = d_s * 1000
d_us = d_ms * 1000
d_ns = d_us * 1000
a_s = pa.array(d_s, type=pa.timestamp('s'))
a_ms = pa.array(d_ms, type=pa.timestamp('ms'))
a_us = pa.array(d_us, type=pa.timestamp('us'))
a_ns = pa.array(d_ns, type=pa.timestamp('ns'))
names = ['ts:s', 'ts:ms', 'ts:us', 'ts:ns']
table = pa.Table.from_arrays([a_s, a_ms, a_us, a_ns], names)
# Using Parquet version 1.0, seconds should be coerced to milliseconds
# and nanoseconds should be coerced to microseconds by default
expected = pa.Table.from_arrays([a_ms, a_ms, a_us, a_us], names)
_check_roundtrip(table, expected)
# Using Parquet version 2.0, seconds should be coerced to milliseconds
# and nanoseconds should be retained by default
expected = pa.Table.from_arrays([a_ms, a_ms, a_us, a_ns], names)
_check_roundtrip(table, expected, version='2.0')
# Using Parquet version 1.0, coercing to milliseconds or microseconds
# is allowed
expected = pa.Table.from_arrays([a_ms, a_ms, a_ms, a_ms], names)
_check_roundtrip(table, expected, coerce_timestamps='ms')
# Using Parquet version 2.0, coercing to milliseconds or microseconds
# is allowed
expected = pa.Table.from_arrays([a_us, a_us, a_us, a_us], names)
_check_roundtrip(table, expected, version='2.0', coerce_timestamps='us')
# TODO: after pyarrow allows coerce_timestamps='ns', tests like the
# following should pass ...
# Using Parquet version 1.0, coercing to nanoseconds is not allowed
# expected = None
# with pytest.raises(NotImplementedError):
# _roundtrip_table(table, coerce_timestamps='ns')
# Using Parquet version 2.0, coercing to nanoseconds is allowed
# expected = pa.Table.from_arrays([a_ns, a_ns, a_ns, a_ns], names)
# _check_roundtrip(table, expected, version='2.0', coerce_timestamps='ns')
# For either Parquet version, coercing to nanoseconds is allowed
# if Int96 storage is used
expected = pa.Table.from_arrays([a_ns, a_ns, a_ns, a_ns], names)
_check_roundtrip(table, expected,
use_deprecated_int96_timestamps=True)
_check_roundtrip(table, expected, version='2.0',
use_deprecated_int96_timestamps=True)
@pytest.mark.pandas
def test_noncoerced_nanoseconds_written_without_exception(tempdir):
# ARROW-1957: the Parquet version 2.0 writer preserves Arrow
# nanosecond timestamps by default
n = 9
df = pd.DataFrame({'x': range(n)},
index=pd.date_range('2017-01-01', freq='1n', periods=n))
tb = pa.Table.from_pandas(df)
filename = tempdir / 'written.parquet'
try:
pq.write_table(tb, filename, version='2.0')
except Exception:
pass
assert filename.exists()
recovered_table = pq.read_table(filename)
assert tb.equals(recovered_table)
# Loss of data through coercion (without explicit override) still an error
filename = tempdir / 'not_written.parquet'
with pytest.raises(ValueError):
pq.write_table(tb, filename, coerce_timestamps='ms', version='2.0')
| apache-2.0 |
adrinjalali/Network-Classifier | read_tcga.py | 1 | 18166 | '''
license: GPLv3.
Adrin Jalali.
March 2014, Saarbruecken, Germany.
read TCGA data.
you probably want to use only load_data function only!
'''
import numpy as np
import os
import pickle
import subprocess
import glob
import graph_tool as gt;
from itertools import chain
from collections import defaultdict
from sklearn import cross_validation as cv;
from misc import *
from constants import *
def read_methylation_annotation():
tmp = read_csv(Globals.met_annot_file, skip_header=True, delimiter=',')
tmp = [[row[i] for i in [1, 4, 9, 16, 17, 11]] for row in tmp]
tmp = np.array(tmp)
promoter_meta = set(['TSS200', 'TSS1500', "5'UTR", '1stExon'])
promoter = [len(promoter_meta.intersection(row[5].split(';'))) > 0
for row in tmp]
boz = np.hstack((tmp, np.array(promoter).astype(int).reshape(-1,1)))
tmp2 = boz.view(dtype=[('TargetID', 'U367'),
('CHR', 'U367'),
('GeneNames', 'U367'),
('snp_hit', 'U367'),
('bwa_multi_hit', 'U367'),
('UCSC_REFGENE_REGION', 'U367'),
('is_promoter', 'U367')])
tmp2['CHR'][(tmp2['CHR'] == 'X') | (tmp2['CHR'] == 'Y')] = '23'
tmp2['CHR'][tmp2['CHR'] == 'NA'] = '24'
tmp3 = tmp2.astype([('TargetID', 'U367'),
('CHR', 'int32'),
('GeneNames', 'U367'),
('snp_hit', 'U367'),
('bwa_multi_hit', 'U367'),
('UCSC_REFGENE_REGION', 'U367'),
('is_promoter', 'bool')]).view(np.recarray)
return tmp3
def networkize_illumina450k(X, probe_names):
# read PPI network.
print('reading the network...')
table = read_csv(Globals.ppi_file, True);
refseq_ids = get_column(table, 0)
refseq_ids.extend(get_column(table, 3));
refseq_ids = list(set(refseq_ids));
interactions = np.array(table)[:,[0,3]]
del table
print('reading methylation annotation data...')
met_annot = read_methylation_annotation()
print('coordinate met_annot rows with probe_names')
tmp_list = list(met_annot.TargetID)
tmp_indices = list()
last_index = 0
for i in range(len(probe_names)):
#for i in range(100):
try:
index = next((j for j in range(last_index, len(tmp_list))
if tmp_list[j] == probe_names[i]))
except StopIteration:
index = next((j for j in range(0, len(tmp_list))
if tmp_list[j] == probe_names[i]))
tmp_indices.append(index)
last_index = index
met_annot = met_annot[np.array(tmp_indices),]
# should I filter probes according to ... ?
'''
site_idx = ((met_annot.CHR > 0) & (met_annot.CHR < 23) &
(met_annot.snp_hit == 'FALSE') &
(met_annot.bwa_multi_hit == 'FALSE')).reshape(-1)
'''
#site_idx = met_annot.is_promoter.reshape(-1)
#met_annot = met_annot[site_idx,]
#X = X[:,site_idx]
probe_genes = set(chain(*chain(*met_annot.GeneNames.strip().split(';')))) - {''}
# gene2met contains indices of met_annot for each gene.
gene2met = defaultdict(list)
met2gene = defaultdict(set)
for i in range(met_annot.shape[0]):
genes = set(chain(*met_annot[i].GeneNames.strip().split(';'))) - {''}
if (len(genes) > 0):
met2gene[met_annot[i].TargetID[0]] = genes
for gene in genes:
gene2met[gene].append(i)
print("refactoring betas into genename format...")
print("\tAlso calculating expression median for multiple mapped probes")
genes = refseq_ids
expressions_colgenes = list()
tmpX = np.empty([X.shape[0],0])
for gene in genes:
indices = gene2met[gene]
if (len(indices) == 0):
continue;
expressions_colgenes.append(gene)
new_col = np.median(X[:,indices], axis=1)
tmpX = np.append(tmpX, new_col.reshape([-1,1]), 1)
del indices
print("extracting common genes between expressions and network...");
usable_interaction_indices = [i for i in range(interactions.shape[0])
if interactions[i,0] in expressions_colgenes
and interactions[i,1] in expressions_colgenes]
interactions = interactions[usable_interaction_indices,:]
del usable_interaction_indices
print("creating graph from network data...");
g = gt.Graph(directed=False);
vlist = g.add_vertex(len(expressions_colgenes))
for i in range(interactions.shape[0]):
tmp_e = g.add_edge(expressions_colgenes.index(interactions[i,0]),
expressions_colgenes.index(interactions[i,1]))
del tmp_e, vlist
return(tmpX, g, np.array(expressions_colgenes))
def load_450k_methylation(data_dir, patient_codes, sample_type):
files = os.listdir(data_dir)
suffix = sample_type
col_names = np.empty(0)
used_samples = np.empty(0)
unused_samples = np.empty(0)
multiple_data_samples = np.empty(0)
i = 0
for name in patient_codes:
i += 1
print('processing %3d/%3d ' %(i, len(patient_codes)) + name)
matched = [f for f in files if f.find(name+'-'+suffix) > -1]
if (len(matched) > 1):
multiple_data_samples = np.append(multiple_data_samples, name)
continue
elif len(matched) == 0:
print('no files found.')
unused_samples = np.append(unused_samples, name)
continue
used_samples = np.append(used_samples, name)
matched = matched[0]
sample_data = np.array(read_csv(data_dir +
matched, skip_header = False))
data_skipped_lines = 2
sample_col_names = sample_data[data_skipped_lines:,0]
if col_names.shape[0] == 0:
col_names = sample_col_names
betas = np.empty((0,sample_col_names.shape[0]), dtype=float)
else:
if all(col_names == sample_col_names) == False:
raise RuntimeError("column names don't match")
v = sample_data[data_skipped_lines:, 1]
v[v == 'NA'] = -1
v = np.array(v, dtype=float)
v[v == -1] = np.nan
betas = np.vstack((betas, v.reshape(1,-1)))
indices = np.array([i for i in range(betas.shape[1])
if not any(np.isnan(betas[:,i]))])
betas = betas[:,indices]
col_names = col_names[indices]
sample_indices = np.array([list(patient_codes).index(used_samples[i])
for i in range(len(used_samples))])
return (sample_indices, col_names, betas,
{'unused_samples': unused_samples,
'multiple_data_samples': multiple_data_samples})
'''
X is the main matrix.
target_labels: {'vital_status': {-1: 'Dead', 1: 'Alive'},...}
patient_annot and patient_annot_colnames are patient annotation file data
and its column names
sample_indices are the indices of the patient_annot that make the X matrix
dump_dir is the output dir, in which a folder for each key in target_labels
will be created.
The function returns a dictionary having the same set of keys as
target_labels, and on each member of the dictionary we have:
(X, y, patient_annot, original_sample_indices)
it also saves the cross validations in two files, one for random 100 x 80% vs 20%
and one for batch based cross validation
'''
def dump_by_target_label(X, target_labels, patient_annot,
patient_annot_colnames, sample_indices, L,
dump_dir):
result = dict()
for key, value in target_labels.items():
print (key)
print(value)
target_index = list(patient_annot_colnames).index(key)
tmp_annot = patient_annot[sample_indices,:]
labels = tmp_annot[:,target_index]
y = np.zeros(len(sample_indices), dtype=int)
'''
if no label is a prefix of another label, then I'll lookup for them
in labels with "startswith", otherwise will be exact match.
'''
vague = False
for jkey in value.keys():
if isinstance(value[jkey], list):
jvalue = value[jkey]
else:
jvalue = [value[jkey]]
for kkey in value.keys():
if jkey <= kkey:
continue
print(jkey, kkey)
if isinstance(value[kkey], list):
kvalue = value[kkey]
else:
kvalue = [value[kkey]]
print(jvalue, kvalue)
for jl in jvalue:
for kl in kvalue:
if (jl.startswith(kl) or kl.startswith(jl)):
vague = True
print('labels are vage:', vague)
for jkey, jvalue in value.items():
if (jkey == 0):
print("class label 0 is not allowed, maybe you meant 1,-1?")
continue
if isinstance(jvalue, list):
for t_label in jvalue:
print(jkey, t_label)
if vague:
y[labels == t_label] = jkey
else:
y[np.array([l.startswith(t_label) for l in labels])] = jkey
else:
print(jkey, jvalue)
if vague:
y[labels == jvalue] = jkey
else:
y[np.array([l.startswith(jvalue) for l in labels])] = jkey
final_sample_indices = (y != 0)
tmp_y = y[final_sample_indices]
tmp_X = X[final_sample_indices,]
tmp_annot = tmp_annot[final_sample_indices,:]
tmp_sample_indices = sample_indices[final_sample_indices]
if L is not None:
X_prime = tmp_X.dot(L)
else:
X_prime = None
tmp_dump_dir = dump_dir + '/' + key
if (not os.path.exists(tmp_dump_dir)):
os.mkdir(tmp_dump_dir)
np.savez(open(tmp_dump_dir + '/data.npz', 'wb'),
X = tmp_X, X_prime = X_prime, y = tmp_y,
patient_annot = tmp_annot,
original_sample_indices = tmp_sample_indices)
result[key] = (tmp_X, tmp_y, tmp_annot, tmp_sample_indices)
'''
save cross validation sets, both batch based and random.
'''
cvs = list(cv.StratifiedShuffleSplit(tmp_y, n_iter = 100, test_size = 0.2))
pickle.dump(cvs, open(tmp_dump_dir + '/normal_cvs.dmp', 'wb'))
sample_batches = tmp_annot[:,
list(patient_annot_colnames).index('batch_number')]
batches = np.unique(sample_batches)
cvs = list()
for i in range(len(batches)):
print('batch size:', sum(sample_batches == batches[i]))
cvs.append((np.arange(len(sample_batches))[sample_batches != batches[i]],
np.arange(len(sample_batches))[sample_batches == batches[i]]))
pickle.dump(cvs, open(tmp_dump_dir + '/batch_cvs.dmp', 'wb'))
return result
'''
This function loads data from the input_dir. This input_dir is supposed to have
a folder for each data type, for example DNA_Methylation, and a folder for
clinical data, which is named Clinical. These are the standard folder structure
of the TCGA data. The function will find the patient information and the data.
In Clinical section, both xml and biotab data types are required. XMLs are mostly
used for extracting the batch information in this code.
The function returns the output of dump_by_target_label, as well as the PPI graph
and the list of gene names of the graph.
target_labels is of the shape {'vital_status': {-1: 'Dead', 1: 'Alive'},...}
sample_type shows the type of the sample, for example main tumor, and is a prefix
to the patient code, for example 01A.
patient_annot_file can easily be found from the Clinical folder, no need to be given.
'''
def load_data(input_dir,
target_labels, sample_type=None, patient_annot_file=None,
final_dump_folder = None, networkize_data = False):
if (sample_type == None):
print("sample type must be given. For example 01A (as suffix to patient codes.)")
return
dump_dir = input_dir + '/processed'
if (not os.path.exists(dump_dir)):
os.mkdir(dump_dir)
if (patient_annot_file == None):
patient_file_candidates = glob.glob(input_dir + '/Clinical/Biotab/nationwidechildrens.org_clinical_patient*.txt')
if (len(patient_file_candidates) != 1):
print('ERROR: patient_file_candidates: ', patient_file_candidates)
return(None)
patient_annot_file = patient_file_candidates[0]
patient_annot_processed_file = dump_dir + '/patient_annot.npz'
betas_file = dump_dir + '/betas.npz'
processed_betas_file = dump_dir + '/betas-processed.npz'
gene_annot_file = dump_dir + '/genes.npz'
graph_dump_file = dump_dir + '/graph.xml.gz'
calculated_L_matrix = dump_dir + '/L.npz'
'''
here we load the annotation and batch information of the samples
'''
if (os.path.isfile(patient_annot_processed_file)):
data_file = np.load(patient_annot_processed_file)
patient_annot = data_file['patient_annot']
patient_annot_colnames = data_file['patient_annot_colnames']
patient_codes = data_file['patient_codes']
else:
patient_skipped_lines = 3
patient_data = np.array(read_csv(patient_annot_file, skip_header = False))
patient_annot_colnames = patient_data[0,:]
patient_annot = patient_data[patient_skipped_lines:,]
patient_codes = patient_data[patient_skipped_lines:,0]
xml_dir = input_dir + '/Clinical/XML'
'''
here I look for the admin:batch_number key in xml files of the patients,
extract that line, remove extra stuff with sed, and get a two column text
with patient ids and batch numbers.
'''
output = subprocess.check_output("grep \"admin:batch_number xsd_ver=\" %s/*_clinical*.xml | awk '{print $1 \"\t\" $3}' | sed \"s/.*clinical\.//g\" | sed \"s/\.xml:\t.*\\\">/\t/g\" | sed \"s/\..*//g\"" % (xml_dir),
shell=True,
universal_newlines=True).splitlines()
patient_batches_dict = {output[i].split('\t')[0]:output[i].split('\t')[1]
for i in range(len(output))}
patient_batches = np.zeros(len(patient_codes), dtype=int)
for i in range(len(patient_codes)):
patient_batches[i] = patient_batches_dict[patient_codes[i]]
patient_annot = np.hstack((patient_annot, patient_batches.reshape(-1,1)))
patient_annot_colnames = np.append(patient_annot_colnames, 'batch_number')
np.savez(open(patient_annot_processed_file, 'wb'),
patient_annot = patient_annot,
patient_annot_colnames = patient_annot_colnames,
patient_codes = patient_codes)
'''
in this section the methylation beta values are extracted and put into
a matrix loaded from 450k illumina chip.
'''
if (os.path.isfile(betas_file)):
data_file = np.load(betas_file)
betas = data_file['betas']
col_names = data_file['col_names']
sample_indices = data_file['methylation_45k_sample_indices']
print('fount betas_file, shape: %s' % (betas.shape.__str__()))
else:
data_dir = input_dir + '/DNA_Methylation/JHU_USC__HumanMethylation450/Level_3/'
if (os.path.exists(data_dir)):
sample_indices, col_names, betas, debug_info = \
load_450k_methylation(data_dir, patient_codes, sample_type)
print(debug_info)
np.savez(open(betas_file, 'wb'),
betas = betas, col_names = col_names,
methylation_45k_sample_indices = sample_indices)
"""
Don't use the PPI network if no network is needed, and return raw
beta values.
"""
if not networkize_data:
processed_data = dump_by_target_label(betas, target_labels, patient_annot,
patient_annot_colnames, sample_indices, None,
dump_dir)
return (processed_data, None, col_names)
'''
use the graph to map nodes to genes and get the graph itself.
'''
if (os.path.isfile(processed_betas_file)
and os.path.isfile(graph_dump_file)
and os.path.isfile(gene_annot_file)):
g = gt.load_graph(graph_dump_file)
data_file = np.load(processed_betas_file)
X = data_file['X']
data_file = np.load(gene_annot_file)
genes = data_file['genes']
print('processed data found, X: %s' % (X.shape.__str__()))
else:
X, g, genes = networkize_illumina450k(betas, col_names)
print (X.__class__)
print (genes.__class__)
print (g.__class__)
np.savez(open(processed_betas_file, 'wb'), X = X)
np.savez(open(gene_annot_file, 'wb'), genes=genes)
g.save(graph_dump_file)
if (os.path.isfile(calculated_L_matrix)):
data_file = np.load(calculated_L_matrix)
L = data_file['L']
print('fount L matrix, shape: %s' % (L.shape.__str__()))
else:
print("calculating L and transformation of the data...")
B = gt.spectral.laplacian(g)
M = np.identity(B.shape[0]) + Globals.beta * B
M_inv = np.linalg.inv(M)
L = np.linalg.cholesky(M_inv)
np.savez(open(calculated_L_matrix, 'wb'),
L = L)
if (final_dump_folder != None):
dump_dir = final_dump_folder
processed_data = dump_by_target_label(X, target_labels, patient_annot,
patient_annot_colnames, sample_indices, L,
dump_dir)
return (processed_data, g, genes)
| gpl-3.0 |
mihaic/brainiak | brainiak/eventseg/event.py | 2 | 26617 | # Copyright 2020 Princeton University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Event segmentation using a Hidden Markov Model
Given an ROI timeseries, this class uses an annealed fitting procedure to
segment the timeseries into events with stable activity patterns. After
learning the signature activity pattern of each event, the model can then be
applied to other datasets to identify a corresponding sequence of events.
Full details are available in:
Christopher Baldassano, Janice Chen, Asieh Zadbood,
Jonathan W Pillow, Uri Hasson, Kenneth A Norman
Discovering event structure in continuous narrative perception and memory
Neuron, Volume 95, Issue 3, 709 - 721.e5
https://doi.org/10.1016/j.neuron.2017.06.041
This class also extends the model described in the Neuron paper:
1) It allows transition matrices that are composed of multiple separate
chains of events rather than a single linear path. This allows a model to
contain patterns for multiple event sequences (e.g. narratives), and
fit probabilities along each of these chains on a new, unlabeled timeseries.
To use this option, pass in an event_chain vector labeling which events
belong to each chain, define event patterns using set_event_patterns(),
then fit to a new dataset with find_events.
2) To obtain better fits when the underlying event structure contains
events that vary substantially in length, the split_merge option allows
the fit() function to re-distribute events during fitting. The number of
merge/split proposals is controlled by split_merge_proposals, which
controls how thorough versus fast the fitting process is.
"""
# Authors: Chris Baldassano and Cătălin Iordan (Princeton University)
import numpy as np
from scipy import stats
import logging
import copy
from sklearn.base import BaseEstimator
from sklearn.utils.validation import check_is_fitted, check_array
from sklearn.exceptions import NotFittedError
import itertools
from . import _utils as utils # type: ignore
logger = logging.getLogger(__name__)
__all__ = [
"EventSegment",
]
class EventSegment(BaseEstimator):
"""Class for event segmentation of continuous fMRI data
Parameters
----------
n_events: int
Number of segments to learn
step_var: Callable[[int], float] : default 4 * (0.98 ** (step - 1))
The Gaussian variance to use during fitting, as a function of the
number of steps. Should decrease slowly over time.
n_iter: int, default: 500
Maximum number of steps to run during fitting
event_chains: ndarray with length = n_events
Array with unique value for each separate chain of events, each linked
in the order they appear in the array
split_merge: bool, default: False
Determines whether merge/split proposals are used during fitting with
fit(). This can improve fitting performance when events are highly
uneven in size, but requires additional time
split_merge_proposals: int, default: 1
Number of merges and splits to consider at each step. Computation time
scales as O(proposals^2) so this should usually be a small value
Attributes
----------
p_start, p_end: length n_events+1 ndarray
initial and final prior distributions over events
P: n_events+1 by n_events+1 ndarray
HMM transition matrix
ll_ : ndarray with length = number of training datasets
Log-likelihood for training datasets over the course of training
segments_: list of (time by event) ndarrays
Learned (soft) segmentation for training datasets
event_var_ : float
Gaussian variance at the end of learning
event_pat_ : voxel by event ndarray
Learned mean patterns for each event
"""
def _default_var_schedule(step):
return 4 * (0.98 ** (step - 1))
def __init__(self, n_events=2,
step_var=_default_var_schedule,
n_iter=500, event_chains=None,
split_merge=False, split_merge_proposals=1):
self.n_events = n_events
self.step_var = step_var
self.n_iter = n_iter
self.split_merge = split_merge
self.split_merge_proposals = split_merge_proposals
if event_chains is None:
self.event_chains = np.zeros(n_events)
else:
self.event_chains = event_chains
def _fit_validate(self, X):
"""Validate input to fit()
Validate data passed to fit(). Includes a transpose operation to
change the row/column order of X and z-scoring in time.
Parameters
----------
X: time by voxel ndarray, or a list of such ndarrays
fMRI data to be segmented
Returns
-------
X: list of voxel by time ndarrays
"""
if len(np.unique(self.event_chains)) > 1:
raise RuntimeError("Cannot fit chains, use set_event_patterns")
# Copy X into a list and transpose
X = copy.deepcopy(X)
if type(X) is not list:
X = [X]
for i in range(len(X)):
X[i] = check_array(X[i])
X[i] = X[i].T
# Check that number of voxels is consistent across datasets
n_dim = X[0].shape[0]
for i in range(len(X)):
assert (X[i].shape[0] == n_dim)
# Double-check that data is z-scored in time
for i in range(len(X)):
X[i] = stats.zscore(X[i], axis=1, ddof=1)
return X
def fit(self, X, y=None):
"""Learn a segmentation on training data
Fits event patterns and a segmentation to training data. After
running this function, the learned event patterns can be used to
segment other datasets using find_events
Parameters
----------
X: time by voxel ndarray, or a list of such ndarrays
fMRI data to be segmented. If a list is given, then all datasets
are segmented simultaneously with the same event patterns
y: not used (added to comply with BaseEstimator definition)
Returns
-------
self: the EventSegment object
"""
X = self._fit_validate(X)
n_train = len(X)
n_dim = X[0].shape[0]
self.classes_ = np.arange(self.n_events)
# Initialize variables for fitting
log_gamma = []
for i in range(n_train):
log_gamma.append(np.zeros((X[i].shape[1], self.n_events)))
step = 1
best_ll = float("-inf")
self.ll_ = np.empty((0, n_train))
while step <= self.n_iter:
iteration_var = self.step_var(step)
# Based on the current segmentation, compute the mean pattern
# for each event
seg_prob = [np.exp(lg) / np.sum(np.exp(lg), axis=0)
for lg in log_gamma]
mean_pat = np.empty((n_train, n_dim, self.n_events))
for i in range(n_train):
mean_pat[i, :, :] = X[i].dot(seg_prob[i])
mean_pat = np.mean(mean_pat, axis=0)
# Based on the current mean patterns, compute the event
# segmentation
self.ll_ = np.append(self.ll_, np.empty((1, n_train)), axis=0)
for i in range(n_train):
logprob = self._logprob_obs(X[i], mean_pat, iteration_var)
log_gamma[i], self.ll_[-1, i] = self._forward_backward(logprob)
if step > 1 and self.split_merge:
curr_ll = np.mean(self.ll_[-1, :])
self.ll_[-1, :], log_gamma, mean_pat = \
self._split_merge(X, log_gamma, iteration_var, curr_ll)
# If log-likelihood has started decreasing, undo last step and stop
if np.mean(self.ll_[-1, :]) < best_ll:
self.ll_ = self.ll_[:-1, :]
break
self.segments_ = [np.exp(lg) for lg in log_gamma]
self.event_var_ = iteration_var
self.event_pat_ = mean_pat
best_ll = np.mean(self.ll_[-1, :])
logger.debug("Fitting step %d, LL=%f", step, best_ll)
step += 1
return self
def _logprob_obs(self, data, mean_pat, var):
"""Log probability of observing each timepoint under each event model
Computes the log probability of each observed timepoint being
generated by the Gaussian distribution for each event pattern
Parameters
----------
data: voxel by time ndarray
fMRI data on which to compute log probabilities
mean_pat: voxel by event ndarray
Centers of the Gaussians for each event
var: float or 1D array of length equal to the number of events
Variance of the event Gaussians. If scalar, all events are
assumed to have the same variance
Returns
-------
logprob : time by event ndarray
Log probability of each timepoint under each event Gaussian
"""
n_vox = data.shape[0]
t = data.shape[1]
# z-score both data and mean patterns in space, so that Gaussians
# are measuring Pearson correlations and are insensitive to overall
# activity changes
data_z = stats.zscore(data, axis=0, ddof=1)
mean_pat_z = stats.zscore(mean_pat, axis=0, ddof=1)
logprob = np.empty((t, self.n_events))
if type(var) is not np.ndarray:
var = var * np.ones(self.n_events)
for k in range(self.n_events):
logprob[:, k] = -0.5 * n_vox * np.log(
2 * np.pi * var[k]) - 0.5 * np.sum(
(data_z.T - mean_pat_z[:, k]).T ** 2, axis=0) / var[k]
logprob /= n_vox
return logprob
def _forward_backward(self, logprob):
"""Runs forward-backward algorithm on observation log probs
Given the log probability of each timepoint being generated by
each event, run the HMM forward-backward algorithm to find the
probability that each timepoint belongs to each event (based on the
transition priors in p_start, p_end, and P)
See https://en.wikipedia.org/wiki/Forward-backward_algorithm for
mathematical details
Parameters
----------
logprob : time by event ndarray
Log probability of each timepoint under each event Gaussian
Returns
-------
log_gamma : time by event ndarray
Log probability of each timepoint belonging to each event
ll : float
Log-likelihood of fit
"""
logprob = copy.copy(logprob)
t = logprob.shape[0]
logprob = np.hstack((logprob, float("-inf") * np.ones((t, 1))))
# Initialize variables
log_scale = np.zeros(t)
log_alpha = np.zeros((t, self.n_events + 1))
log_beta = np.zeros((t, self.n_events + 1))
# Set up transition matrix, with final sink state
self.p_start = np.zeros(self.n_events + 1)
self.p_end = np.zeros(self.n_events + 1)
self.P = np.zeros((self.n_events + 1, self.n_events + 1))
label_ind = np.unique(self.event_chains, return_inverse=True)[1]
n_chains = np.max(label_ind) + 1
# For each chain of events, link them together and then to sink state
for c in range(n_chains):
chain_ind = np.nonzero(label_ind == c)[0]
self.p_start[chain_ind[0]] = 1 / n_chains
self.p_end[chain_ind[-1]] = 1 / n_chains
p_trans = (len(chain_ind) - 1) / t
if p_trans >= 1:
raise ValueError('Too few timepoints')
for i in range(len(chain_ind)):
self.P[chain_ind[i], chain_ind[i]] = 1 - p_trans
if i < len(chain_ind) - 1:
self.P[chain_ind[i], chain_ind[i+1]] = p_trans
else:
self.P[chain_ind[i], -1] = p_trans
self.P[-1, -1] = 1
# Forward pass
for i in range(t):
if i == 0:
log_alpha[0, :] = self._log(self.p_start) + logprob[0, :]
else:
log_alpha[i, :] = self._log(np.exp(log_alpha[i - 1, :])
.dot(self.P)) + logprob[i, :]
log_scale[i] = np.logaddexp.reduce(log_alpha[i, :])
log_alpha[i] -= log_scale[i]
# Backward pass
log_beta[-1, :] = self._log(self.p_end) - log_scale[-1]
for i in reversed(range(t - 1)):
obs_weighted = log_beta[i + 1, :] + logprob[i + 1, :]
offset = np.max(obs_weighted)
log_beta[i, :] = offset + self._log(
np.exp(obs_weighted - offset).dot(self.P.T)) - log_scale[i]
# Combine and normalize
log_gamma = log_alpha + log_beta
log_gamma -= np.logaddexp.reduce(log_gamma, axis=1, keepdims=True)
ll = np.sum(log_scale[:(t - 1)]) + np.logaddexp.reduce(
log_alpha[-1, :] + log_scale[-1] + self._log(self.p_end))
log_gamma = log_gamma[:, :-1]
return log_gamma, ll
def _log(self, x):
"""Modified version of np.log that manually sets values <=0 to -inf
Parameters
----------
x: ndarray of floats
Input to the log function
Returns
-------
log_ma: ndarray of floats
log of x, with x<=0 values replaced with -inf
"""
xshape = x.shape
_x = x.flatten()
y = utils.masked_log(_x)
return y.reshape(xshape)
def set_event_patterns(self, event_pat):
"""Set HMM event patterns manually
Rather than fitting the event patterns automatically using fit(), this
function allows them to be set explicitly. They can then be used to
find corresponding events in a new dataset, using find_events().
Parameters
----------
event_pat: voxel by event ndarray
"""
if event_pat.shape[1] != self.n_events:
raise ValueError(("Number of columns of event_pat must match "
"number of events"))
self.event_pat_ = event_pat.copy()
def find_events(self, testing_data, var=None, scramble=False):
"""Applies learned event segmentation to new testing dataset
After fitting an event segmentation using fit() or setting event
patterns directly using set_event_patterns(), this function finds the
same sequence of event patterns in a new testing dataset.
Parameters
----------
testing_data: timepoint by voxel ndarray
fMRI data to segment based on previously-learned event patterns
var: float or 1D ndarray of length equal to the number of events
default: uses variance that maximized training log-likelihood
Variance of the event Gaussians. If scalar, all events are
assumed to have the same variance. If fit() has not previously
been run, this must be specifed (cannot be None).
scramble: bool : default False
If true, the order of the learned events are shuffled before
fitting, to give a null distribution
Returns
-------
segments : time by event ndarray
The resulting soft segmentation. segments[t,e] = probability
that timepoint t is in event e
test_ll : float
Log-likelihood of model fit
"""
if var is None:
if not hasattr(self, 'event_var_'):
raise NotFittedError(("Event variance must be provided, if "
"not previously set by fit()"))
else:
var = self.event_var_
if not hasattr(self, 'event_pat_'):
raise NotFittedError(("The event patterns must first be set "
"by fit() or set_event_patterns()"))
if scramble:
mean_pat = self.event_pat_[:, np.random.permutation(self.n_events)]
else:
mean_pat = self.event_pat_
logprob = self._logprob_obs(testing_data.T, mean_pat, var)
lg, test_ll = self._forward_backward(logprob)
segments = np.exp(lg)
return segments, test_ll
def predict(self, X):
"""Applies learned event segmentation to new testing dataset
Alternative function for segmenting a new dataset after using
fit() to learn a sequence of events, to comply with the sklearn
Classifier interface
Parameters
----------
X: timepoint by voxel ndarray
fMRI data to segment based on previously-learned event patterns
Returns
-------
Event label for each timepoint
"""
check_is_fitted(self, ["event_pat_", "event_var_"])
X = check_array(X)
segments, test_ll = self.find_events(X)
return np.argmax(segments, axis=1)
def calc_weighted_event_var(self, D, weights, event_pat):
"""Computes normalized weighted variance around event pattern
Utility function for computing variance in a training set of weighted
event examples. For each event, the sum of squared differences for all
timepoints from the event pattern is computed, and then the weights
specify how much each of these differences contributes to the
variance (normalized by the number of voxels).
Parameters
----------
D : timepoint by voxel ndarray
fMRI data for which to compute event variances
weights : timepoint by event ndarray
specifies relative weights of timepoints for each event
event_pat : voxel by event ndarray
mean event patterns to compute variance around
Returns
-------
ev_var : ndarray of variances for each event
"""
Dz = stats.zscore(D, axis=1, ddof=1)
ev_var = np.empty(event_pat.shape[1])
for e in range(event_pat.shape[1]):
# Only compute variances for weights > 0.1% of max weight
nz = weights[:, e] > np.max(weights[:, e])/1000
sumsq = np.dot(weights[nz, e],
np.sum(np.square(Dz[nz, :] -
event_pat[:, e]), axis=1))
ev_var[e] = sumsq/(np.sum(weights[nz, e]) -
np.sum(np.square(weights[nz, e])) /
np.sum(weights[nz, e]))
ev_var = ev_var / D.shape[1]
return ev_var
def model_prior(self, t):
"""Returns the prior probability of the HMM
Runs forward-backward without any data, showing the prior distribution
of the model (for comparison with a posterior).
Parameters
----------
t: int
Number of timepoints
Returns
-------
segments : time by event ndarray
segments[t,e] = prior probability that timepoint t is in event e
test_ll : float
Log-likelihood of model (data-independent term)"""
lg, test_ll = self._forward_backward(np.zeros((t, self.n_events)))
segments = np.exp(lg)
return segments, test_ll
def _split_merge(self, X, log_gamma, iteration_var, curr_ll):
"""Attempt to improve log-likelihood with a merge/split
The simulated annealing used in fit() is susceptible to getting
stuck in a local minimum if there are some very short events. This
function attempts to find
a) pairs of neighboring events that are highly similar, to merge
b) events that can be split into two dissimilar events
It then tests to see whether simultaneously merging one of the
pairs from (a) and splitting one of the events from (b) can improve
the log-likelihood. The number of (a)/(b) pairs tested is determined
by the split_merge_proposals class attribute.
Parameters
----------
X: list of voxel by time ndarrays
fMRI datasets being fit
log_gamma : list of time by event ndarrays
Log probability of each timepoint belonging to each event,
for each dataset
iteration_var : float
Current variance in simulated annealing
curr_ll: float
Log-likelihood of current model
Returns
-------
return_ll : ndarray with length equal to length of X
Log-likelihood after merge/split (same as curr_ll if no
merge/split improved curr_ll)
return_lg : list of time by event ndarrays
Log probability of each timepoint belonging to each event,
for each dataset (same as log_gamma if no merge/split
improved curr_ll)
return_mp : voxel by event ndarray
Mean patterns of events (after possible merge/split)
"""
# Compute current probabilities and mean patterns
n_train = len(X)
n_dim = X[0].shape[0]
seg_prob = [np.exp(lg) / np.sum(np.exp(lg), axis=0)
for lg in log_gamma]
mean_pat = np.empty((n_train, n_dim, self.n_events))
for i in range(n_train):
mean_pat[i, :, :] = X[i].dot(seg_prob[i])
mean_pat = np.mean(mean_pat, axis=0)
# For each event, merge its probability distribution
# with the next event, and also split its probability
# distribution at its median into two separate events.
# Use these new event probability distributions to compute
# merged and split event patterns.
merge_pat = np.empty((n_train, n_dim, self.n_events))
split_pat = np.empty((n_train, n_dim, 2 * self.n_events))
for i, sp in enumerate(seg_prob): # Iterate over datasets
m_evprob = np.zeros((sp.shape[0], sp.shape[1]))
s_evprob = np.zeros((sp.shape[0], 2 * sp.shape[1]))
cs = np.cumsum(sp, axis=0)
for e in range(sp.shape[1]):
# Split distribution at midpoint and normalize each half
mid = np.where(cs[:, e] >= 0.5)[0][0]
cs_first = cs[mid, e] - sp[mid, e]
cs_second = 1 - cs_first
s_evprob[:mid, 2 * e] = sp[:mid, e] / cs_first
s_evprob[mid:, 2 * e + 1] = sp[mid:, e] / cs_second
# Merge distribution with next event distribution
m_evprob[:, e] = sp[:, e:(e + 2)].mean(1)
# Weight data by distribution to get event patterns
merge_pat[i, :, :] = X[i].dot(m_evprob)
split_pat[i, :, :] = X[i].dot(s_evprob)
# Average across datasets
merge_pat = np.mean(merge_pat, axis=0)
split_pat = np.mean(split_pat, axis=0)
# Correlate the current event patterns with the split and
# merged patterns
merge_corr = np.zeros(self.n_events)
split_corr = np.zeros(self.n_events)
for e in range(self.n_events):
split_corr[e] = np.corrcoef(mean_pat[:, e],
split_pat[:, (2 * e):(2 * e + 2)],
rowvar=False)[0, 1:3].max()
merge_corr[e] = np.corrcoef(merge_pat[:, e],
mean_pat[:, e:(e + 2)],
rowvar=False)[0, 1:3].min()
merge_corr = merge_corr[:-1]
# Find best merge/split candidates
# A high value of merge_corr indicates that a pair of events are
# very similar to their merged pattern, and are good candidates for
# being merged.
# A low value of split_corr indicates that an event's pattern is
# very dissimilar from the patterns in its first and second half,
# and is a good candidate for being split.
best_merge = np.flipud(np.argsort(merge_corr))
best_merge = best_merge[:self.split_merge_proposals]
best_split = np.argsort(split_corr)
best_split = best_split[:self.split_merge_proposals]
# For every pair of merge/split candidates, attempt the merge/split
# and measure the log-likelihood. If any are better than curr_ll,
# accept this best merge/split
mean_pat_last = mean_pat.copy()
return_ll = curr_ll
return_lg = copy.deepcopy(log_gamma)
return_mp = mean_pat.copy()
for m_e, s_e in itertools.product(best_merge, best_split):
if m_e == s_e or m_e+1 == s_e:
# Don't attempt to merge/split same event
continue
# Construct new set of patterns with merge/split
mean_pat_ms = np.delete(mean_pat_last, s_e, axis=1)
mean_pat_ms = np.insert(mean_pat_ms, [s_e, s_e],
split_pat[:, (2 * s_e):(2 * s_e + 2)],
axis=1)
mean_pat_ms = np.delete(mean_pat_ms,
[m_e + (s_e < m_e), m_e + (s_e < m_e) + 1],
axis=1)
mean_pat_ms = np.insert(mean_pat_ms, m_e + (s_e < m_e),
merge_pat[:, m_e], axis=1)
# Measure log-likelihood with these new patterns
ll_ms = np.zeros(n_train)
log_gamma_ms = list()
for i in range(n_train):
logprob = self._logprob_obs(X[i],
mean_pat_ms, iteration_var)
lg, ll_ms[i] = self._forward_backward(logprob)
log_gamma_ms.append(lg)
# If better than best ll so far, save to return to fit()
if ll_ms.mean() > return_ll:
return_mp = mean_pat_ms.copy()
return_ll = ll_ms
for i in range(n_train):
return_lg[i] = log_gamma_ms[i].copy()
logger.debug("Identified merge %d,%d and split %d",
m_e, m_e+1, s_e)
return return_ll, return_lg, return_mp
| apache-2.0 |
astrofrog/glue-vispy-viewers | glue_vispy_viewers/extern/vispy/ext/_bundled/mplutils.py | 17 | 11507 | """
Utility Routines for Working with Matplotlib Objects
====================================================
"""
import itertools
import io
import base64
import numpy as np
import warnings
import matplotlib
from matplotlib.colors import colorConverter
from matplotlib.path import Path
from matplotlib.markers import MarkerStyle
from matplotlib.transforms import Affine2D
from matplotlib import ticker
def color_to_hex(color):
"""Convert matplotlib color code to hex color code"""
if color is None or colorConverter.to_rgba(color)[3] == 0:
return 'none'
else:
rgb = colorConverter.to_rgb(color)
return '#{0:02X}{1:02X}{2:02X}'.format(*(int(255 * c) for c in rgb))
def _many_to_one(input_dict):
"""Convert a many-to-one mapping to a one-to-one mapping"""
return dict((key, val)
for keys, val in input_dict.items()
for key in keys)
LINESTYLES = _many_to_one({('solid', '-', (None, None)): 'none',
('dashed', '--'): "6,6",
('dotted', ':'): "2,2",
('dashdot', '-.'): "4,4,2,4",
('', ' ', 'None', 'none'): None})
def get_dasharray(obj):
"""Get an SVG dash array for the given matplotlib linestyle
Parameters
----------
obj : matplotlib object
The matplotlib line or path object, which must have a get_linestyle()
method which returns a valid matplotlib line code
Returns
-------
dasharray : string
The HTML/SVG dasharray code associated with the object.
"""
if obj.__dict__.get('_dashSeq', None) is not None:
return ','.join(map(str, obj._dashSeq))
else:
ls = obj.get_linestyle()
dasharray = LINESTYLES.get(ls, 'not found')
if dasharray == 'not found':
warnings.warn("line style '{0}' not understood: "
"defaulting to solid line.".format(ls))
dasharray = LINESTYLES['solid']
return dasharray
PATH_DICT = {Path.LINETO: 'L',
Path.MOVETO: 'M',
Path.CURVE3: 'S',
Path.CURVE4: 'C',
Path.CLOSEPOLY: 'Z'}
def SVG_path(path, transform=None, simplify=False):
"""Construct the vertices and SVG codes for the path
Parameters
----------
path : matplotlib.Path object
transform : matplotlib transform (optional)
if specified, the path will be transformed before computing the output.
Returns
-------
vertices : array
The shape (M, 2) array of vertices of the Path. Note that some Path
codes require multiple vertices, so the length of these vertices may
be longer than the list of path codes.
path_codes : list
A length N list of single-character path codes, N <= M. Each code is
a single character, in ['L','M','S','C','Z']. See the standard SVG
path specification for a description of these.
"""
if transform is not None:
path = path.transformed(transform)
vc_tuples = [(vertices if path_code != Path.CLOSEPOLY else [],
PATH_DICT[path_code])
for (vertices, path_code)
in path.iter_segments(simplify=simplify)]
if not vc_tuples:
# empty path is a special case
return np.zeros((0, 2)), []
else:
vertices, codes = zip(*vc_tuples)
vertices = np.array(list(itertools.chain(*vertices))).reshape(-1, 2)
return vertices, list(codes)
def get_path_style(path, fill=True):
"""Get the style dictionary for matplotlib path objects"""
style = {}
style['alpha'] = path.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['edgecolor'] = color_to_hex(path.get_edgecolor())
if fill:
style['facecolor'] = color_to_hex(path.get_facecolor())
else:
style['facecolor'] = 'none'
style['edgewidth'] = path.get_linewidth()
style['dasharray'] = get_dasharray(path)
style['zorder'] = path.get_zorder()
return style
def get_line_style(line):
"""Get the style dictionary for matplotlib line objects"""
style = {}
style['alpha'] = line.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['color'] = color_to_hex(line.get_color())
style['linewidth'] = line.get_linewidth()
style['dasharray'] = get_dasharray(line)
style['zorder'] = line.get_zorder()
return style
def get_marker_style(line):
"""Get the style dictionary for matplotlib marker objects"""
style = {}
style['alpha'] = line.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['facecolor'] = color_to_hex(line.get_markerfacecolor())
style['edgecolor'] = color_to_hex(line.get_markeredgecolor())
style['edgewidth'] = line.get_markeredgewidth()
style['marker'] = line.get_marker()
markerstyle = MarkerStyle(line.get_marker())
markersize = line.get_markersize()
markertransform = (markerstyle.get_transform() +
Affine2D().scale(markersize, -markersize))
style['markerpath'] = SVG_path(markerstyle.get_path(),
markertransform)
style['markersize'] = markersize
style['zorder'] = line.get_zorder()
return style
def get_text_style(text):
"""Return the text style dict for a text instance"""
style = {}
style['alpha'] = text.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['fontsize'] = text.get_size()
style['color'] = color_to_hex(text.get_color())
style['halign'] = text.get_horizontalalignment() # left, center, right
style['valign'] = text.get_verticalalignment() # baseline, center, top
style['malign'] = text._multialignment # text alignment when '\n' in text
style['rotation'] = text.get_rotation()
style['zorder'] = text.get_zorder()
return style
def get_axis_properties(axis):
"""Return the property dictionary for a matplotlib.Axis instance"""
props = {}
label1On = axis._major_tick_kw.get('label1On', True)
if isinstance(axis, matplotlib.axis.XAxis):
if label1On:
props['position'] = "bottom"
else:
props['position'] = "top"
elif isinstance(axis, matplotlib.axis.YAxis):
if label1On:
props['position'] = "left"
else:
props['position'] = "right"
else:
raise ValueError("{0} should be an Axis instance".format(axis))
# Use tick values if appropriate
locator = axis.get_major_locator()
props['nticks'] = len(locator())
if isinstance(locator, ticker.FixedLocator):
props['tickvalues'] = list(locator())
else:
props['tickvalues'] = None
# Find tick formats
formatter = axis.get_major_formatter()
if isinstance(formatter, ticker.NullFormatter):
props['tickformat'] = ""
elif isinstance(formatter, ticker.FixedFormatter):
props['tickformat'] = list(formatter.seq)
elif not any(label.get_visible() for label in axis.get_ticklabels()):
props['tickformat'] = ""
else:
props['tickformat'] = None
# Get axis scale
props['scale'] = axis.get_scale()
# Get major tick label size (assumes that's all we really care about!)
labels = axis.get_ticklabels()
if labels:
props['fontsize'] = labels[0].get_fontsize()
else:
props['fontsize'] = None
# Get associated grid
props['grid'] = get_grid_style(axis)
return props
def get_grid_style(axis):
gridlines = axis.get_gridlines()
if axis._gridOnMajor and len(gridlines) > 0:
color = color_to_hex(gridlines[0].get_color())
alpha = gridlines[0].get_alpha()
dasharray = get_dasharray(gridlines[0])
return dict(gridOn=True,
color=color,
dasharray=dasharray,
alpha=alpha)
else:
return {"gridOn": False}
def get_figure_properties(fig):
return {'figwidth': fig.get_figwidth(),
'figheight': fig.get_figheight(),
'dpi': fig.dpi}
def get_axes_properties(ax):
props = {'axesbg': color_to_hex(ax.patch.get_facecolor()),
'axesbgalpha': ax.patch.get_alpha(),
'bounds': ax.get_position().bounds,
'dynamic': ax.get_navigate(),
'axison': ax.axison,
'frame_on': ax.get_frame_on(),
'axes': [get_axis_properties(ax.xaxis),
get_axis_properties(ax.yaxis)]}
for axname in ['x', 'y']:
axis = getattr(ax, axname + 'axis')
domain = getattr(ax, 'get_{0}lim'.format(axname))()
lim = domain
if isinstance(axis.converter, matplotlib.dates.DateConverter):
scale = 'date'
try:
import pandas as pd
from pandas.tseries.converter import PeriodConverter
except ImportError:
pd = None
if (pd is not None and isinstance(axis.converter,
PeriodConverter)):
_dates = [pd.Period(ordinal=int(d), freq=axis.freq)
for d in domain]
domain = [(d.year, d.month - 1, d.day,
d.hour, d.minute, d.second, 0)
for d in _dates]
else:
domain = [(d.year, d.month - 1, d.day,
d.hour, d.minute, d.second,
d.microsecond * 1E-3)
for d in matplotlib.dates.num2date(domain)]
else:
scale = axis.get_scale()
if scale not in ['date', 'linear', 'log']:
raise ValueError("Unknown axis scale: "
"{0}".format(axis[axname].get_scale()))
props[axname + 'scale'] = scale
props[axname + 'lim'] = lim
props[axname + 'domain'] = domain
return props
def iter_all_children(obj, skipContainers=False):
"""
Returns an iterator over all childen and nested children using
obj's get_children() method
if skipContainers is true, only childless objects are returned.
"""
if hasattr(obj, 'get_children') and len(obj.get_children()) > 0:
for child in obj.get_children():
if not skipContainers:
yield child
# could use `yield from` in python 3...
for grandchild in iter_all_children(child, skipContainers):
yield grandchild
else:
yield obj
def get_legend_properties(ax, legend):
handles, labels = ax.get_legend_handles_labels()
visible = legend.get_visible()
return {'handles': handles, 'labels': labels, 'visible': visible}
def image_to_base64(image):
"""
Convert a matplotlib image to a base64 png representation
Parameters
----------
image : matplotlib image object
The image to be converted.
Returns
-------
image_base64 : string
The UTF8-encoded base64 string representation of the png image.
"""
ax = image.axes
binary_buffer = io.BytesIO()
# image is saved in axes coordinates: we need to temporarily
# set the correct limits to get the correct image
lim = ax.axis()
ax.axis(image.get_extent())
image.write_png(binary_buffer)
ax.axis(lim)
binary_buffer.seek(0)
return base64.b64encode(binary_buffer.read()).decode('utf-8')
| bsd-2-clause |
dinos66/termAnalysis | forTateDataset/oldScripts/termAnalysisDynamicTmp.py | 1 | 25190 | # -*- coding: utf-8 -*-
'''
Create adjacency matrices and analyse terms dynamically
'''
print('Create dynamic adjacency matrices and ESOMs')
#--------------------------------------------
#run create_Info_Files.py before running this
#--------------------------------------------
import pickle, time, igraph, glob, os, somoclu, collections
import itertools, codecs, seaborn, math, pprint, random, re
from matplotlib import rc
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import interactive
from scipy.spatial import distance
from matplotlib.pyplot import cm
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import matplotlib.colors as colors
import seaborn as sns
import sklearn.cluster as clusterAlgs
#--------------------------------------------
print(time.asctime( time.localtime(time.time()) ))
t = time.time()
edgeReadPath = './data/artworks_edges/dynamic'
adjMatWritePath = './data/artworks_adjacencyMats/dynamic'
distMatWritePath = './data/artworks_distanceMats/dynamic'
potentMatWritePath = './data/artworks_potentialMats/dynamic'
gravMatWritePath = './data/artworks_gravityMats/dynamic'
umatrixWritePath = './data/artworks_UMX/dynamic'
figWritePath = './data/artworks_figs/dynamic'
greenwichFigWritePath = figWritePath+'/greenwich'
greenwichUmatrixWritePath = umatrixWritePath+'/greenwich'
gephiWritePath = './data/artworks_gephi/dynamic'
statsWritePath = './data/artworks_stats'
if not os.path.exists('./data/artworks_tmp'):
os.makedirs('./data/artworks_tmp')
if not os.path.exists(adjMatWritePath):
os.makedirs(adjMatWritePath)
if not os.path.exists(distMatWritePath):
os.makedirs(distMatWritePath)
if not os.path.exists(potentMatWritePath):
os.makedirs(potentMatWritePath)
os.makedirs(gravMatWritePath)
if not os.path.exists(umatrixWritePath):
os.makedirs(umatrixWritePath)
if not os.path.exists(figWritePath):
os.makedirs(figWritePath)
if not os.path.exists(gephiWritePath):
os.makedirs(gephiWritePath)
if not os.path.exists(greenwichFigWritePath):
os.makedirs(greenwichFigWritePath)
if not os.path.exists(greenwichUmatrixWritePath):
os.makedirs(greenwichUmatrixWritePath)
LVLs = ['lvlA']#['lvl1','lvl2','lvl3','lvlA'] #'lvl1','lvl2','lvl3',
heatmapFonts = [4]#[12,7,6,4]#12,7,6,
yearPeriods = ['2000s'] #['1800s','2000s']
trueYearsIni = [1964]#[1800,1964]
termLabelDict = pickle.load(open('./data/artworks_verification_labels/WmatrixLabelDict.pck','rb'))
def recRank(mylist):#Perform the Reciprocal Rank Fusion for a list of rank values
finscore = []
mylist=[x+1 for x in mylist]
for rank in mylist:
finscore.append(1/(20+rank))
return sum(finscore)
def toroidDistance(myarray,width,height):
somDim2 = []
for idx,x in enumerate(myarray[:-1]):
newxa = myarray[idx+1:]
for nx in newxa:
somDim2.append(np.sqrt(min(abs(x[0] - nx[0]), width - abs(x[0] - nx[0]))**2 + min(abs(x[1] - nx[1]), height - abs(x[1]-nx[1]))**2))
SD = np.array(somDim2)
return distance.squareform(SD)
def toroidDistanceSingle(coords1,coords2,width,height):
return (np.sqrt(min(abs(coords1[0] - coords2[0]), width - abs(coords1[0] - coords2[0]))**2 + min(abs(coords1[1] - coords2[1]), height - abs(coords1[1]-coords2[1]))**2))
def toroidCoordinateFinder(coorx,distx,coory,disty,w,h):
if coorx+distx>=w:
ncx = coorx+distx-w
elif coorx+distx<0:
ncx = w+coorx+distx
else:
ncx = coorx+distx
if coory+disty>=h:
ncy = coory+disty-h
elif coory+disty<0:
ncy = h+coory+disty
else:
ncy = coory+disty
return (ncx,ncy)
for lIdx,lvl in enumerate(LVLs):
heatMapFont = heatmapFonts[lIdx]
for idy,years in enumerate(yearPeriods):
files = glob.glob(edgeReadPath+'/'+years+lvl+'_*.csv')
files.sort(key=lambda x: os.path.getmtime(x))
try:
edgeDict = pickle.load(open('./data/artworks_tmp/edgeDictDynamic'+years+lvl+'.pck','rb'))
except:
edgeDict = {'uniquePersistentTerms':[]}
termsYears = []
for filename in files:
periodIdx = filename[filename.index(lvl)+5:-4]
tmpTerms = []
edgeDict[periodIdx] = {}
with codecs.open(filename, 'r','utf8') as f:
# print(filename)
adjList = []
next(f)
for line in f:
line = line.split(',')
tripletuple = line[:2]
tmpTerms.extend(tripletuple)
tripletuple.append(int(line[2].strip()))
adjList.append(tuple(tripletuple))
edgeDict[periodIdx]['adjList'] = adjList
termsYears.append(list(set(tmpTerms)))
print('There are %s unique nodes for period %s' %(len(termsYears[-1]),periodIdx))
repetitiveTerms = collections.Counter(list(itertools.chain.from_iterable(termsYears)))
edgeDict['allTerms'] = list(repetitiveTerms.keys())
edgeDict['uniquePersistentTerms'] = [x for x,v in repetitiveTerms.items() if v == len(files)]
edgeDict['uniquePersistentTerms'].sort()
pass
with open(statsWritePath+'/'+years+lvl+'_unique_persistent_terms.txt','w') as f:
for word in edgeDict['uniquePersistentTerms']:
f.write(word+'\n')
statement = ('For %s in the %s there are %s unique persistent terms globally out of %s unique terms' %(lvl,years,len(edgeDict['uniquePersistentTerms']),len(edgeDict['allTerms'])))
time.sleep(5)
print(statement)
'''set up SOM'''#--------------------------------------------------------------------
## n_columns, n_rows = 200, 120
## lablshift = 1
if lvl == 'lvl1':
n_columns, n_rows = 20, 12
lablshift = .2
elif lvl == 'lvl2':
n_columns, n_rows = 40, 24
lablshift = .3
elif lvl == 'lvl3':
n_columns, n_rows = 50, 30
lablshift = .4
elif lvl == 'lvlA':
n_columns, n_rows = 60, 40
lablshift = .5
epochs2 = 3
som = somoclu.Somoclu(n_columns, n_rows, maptype="toroid", initialization="pca")
savefig = True
SOMdimensionsString = 'x'.join([str(x) for x in [n_columns,n_rows]])
#--------------------------------------------------------------------------------
yearList = []
count = 0
termPrRanks, termAuthRanks, termHubRanks, termBetweenRanks = {}, {}, {}, {}
for filename in files:
periodIdx = filename[filename.index(lvl)+5:-4]
## if periodIdx != '7':
## continue
yearList.append(periodIdx)
print(periodIdx)
# try:
# gUndirected = edgeDict[periodIdx]['graph']
# except:
gUndirected=igraph.Graph.Full(0, directed = False)
gUndirected.es['weight'] = 1
'''ReRanking the nodes based on their reciprocal rank between timeslots'''
try:
gUndirected.add_vertices(edgeDict['topTermsByPR'])
print('used top Terms By PageRank')
# print(edgeDict['topTermsByPR'][:5])
except:
gUndirected.add_vertices(edgeDict['uniquePersistentTerms'])
print('used alphabetically ranked terms')
pass
myEdges,myWeights = [], []
nodesWithEdges = []
for x in edgeDict[periodIdx]['adjList']:
if x[0] in edgeDict['uniquePersistentTerms'] and x[1] in edgeDict['uniquePersistentTerms']:
myEdges.append((x[0],x[1]))
myWeights.append(x[2])
nodesWithEdges.extend(x[:2])
print('Full No of edges: %s and pruned No of edges %s' %(len(edgeDict[periodIdx]['adjList']),len(myEdges)))
gUndirected.add_edges(myEdges)
gUndirected.es["weight"] = myWeights
edgeDict[periodIdx]['graph'] = gUndirected
gUndirected.vs['label'] = gUndirected.vs['name']
nodes = gUndirected.vs['name']
# print(nodes[:5])
#--------------------------------------------------------------------------------
'''Extract centrality measures'''#-----------------------------------------------
#--------------------------------------------------------------------------------
edgeDict[periodIdx]['term'] = {'degree':{},'pageRank':{},'maxnormPageRank':{}, 'minnormPageRank':{}, 'authority':{}, 'hub':{}, 'betweenness':{}}
pageRank = gUndirected.pagerank(weights = 'weight', directed=False)
authority = gUndirected.authority_score(weights = 'weight') #HITS authority score
hub = gUndirected.hub_score(weights = 'weight')#HITS hub score
betweenness = gUndirected.betweenness(weights = 'weight', directed = False)
# print('extracted pagerank')
maxPR = max(pageRank)
maxnormPageRank = [x/maxPR for x in pageRank]
minPR = min(pageRank)
minnormPageRank = [x/minPR for x in pageRank]
maxminPr = max(minnormPageRank)
minmaxPRdiff = maxPR-minPR
minmaxnormPageRank = [1+3*((x-minPR)/minmaxPRdiff) for x in pageRank]
for x in nodes:
edgeDict[periodIdx]['term']['pageRank'][x] = pageRank[nodes.index(x)]
edgeDict[periodIdx]['term']['maxnormPageRank'][x] = maxnormPageRank[nodes.index(x)]
edgeDict[periodIdx]['term']['minnormPageRank'][x] = minnormPageRank[nodes.index(x)]
edgeDict[periodIdx]['term']['degree'][x] = gUndirected.degree(x)
edgeDict[periodIdx]['term']['authority'][x] = authority[nodes.index(x)]
edgeDict[periodIdx]['term']['hub'][x] = hub[nodes.index(x)]
edgeDict[periodIdx]['term']['betweenness'][x] = betweenness[nodes.index(x)]
tmpPRrank = sorted(edgeDict[periodIdx]['term']['pageRank'], key=lambda k: [edgeDict[periodIdx]['term']['pageRank'][k],edgeDict[periodIdx]['term']['degree'][k],k],reverse =True)
for x in nodes:
if x not in termPrRanks:
termPrRanks[x] = [tmpPRrank.index(x)]
else:
termPrRanks[x].append(tmpPRrank.index(x))
tmpAuthrank = sorted(edgeDict[periodIdx]['term']['authority'], key=lambda k: [edgeDict[periodIdx]['term']['authority'][k],edgeDict[periodIdx]['term']['degree'][k],k],reverse =True)
for x in nodes:
if x not in termAuthRanks:
termAuthRanks[x] = [tmpAuthrank.index(x)]
else:
termAuthRanks[x].append(tmpAuthrank.index(x))
tmpHubrank = sorted(edgeDict[periodIdx]['term']['hub'], key=lambda k: [edgeDict[periodIdx]['term']['hub'][k],edgeDict[periodIdx]['term']['degree'][k],k],reverse =True)
for x in nodes:
if x not in termHubRanks:
termHubRanks[x] = [tmpHubrank.index(x)]
else:
termHubRanks[x].append(tmpHubrank.index(x))
tmpBetweenrank = sorted(edgeDict[periodIdx]['term']['betweenness'], key=lambda k: [edgeDict[periodIdx]['term']['betweenness'][k],edgeDict[periodIdx]['term']['degree'][k],k],reverse =True)
for x in nodes:
if x not in termBetweenRanks:
termBetweenRanks[x] = [tmpBetweenrank.index(x)]
else:
termBetweenRanks[x].append(tmpBetweenrank.index(x))
# -----------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------
'''creating undirected adjacency mat'''#--------------------------------------------------------
#-----------------------------------------------------------------------------------------------
if not os.path.exists(adjMatWritePath):
os.makedirs(adjMatWritePath)
print('creating adjacency matrix')
adjMat = gUndirected.get_adjacency(attribute='weight')
adjMat = np.array(adjMat.data)
print('writing undirected adjacency matrix to file')
with open(adjMatWritePath+'/AdjMat'+years+lvl+'_'+periodIdx+'.txt', 'w') as d:
d.write('Term\t'+'\t'.join(nodes)+'\n')
for s in nodes:
distLine = [str(x) for x in adjMat[nodes.index(s)].tolist()]
d.write(s+'\t'+'\t'.join(distLine)+'\n')
#-----------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------------
'''SOM data extraction from here on------------------------------------------------------------------------------------'''
# ------------------------------------------------------------------------------------------------------------------------
'''Extract Self Organizing Maps of undirected weighted adj mats'''#change filename depending on labeled or numbered terms
nummedOrNot = ''#'nummed' are the labels numbers or text (leave blank)?
labelFormat = 'code' #switch terms by Wmatrix code or label?
df = pd.read_table(adjMatWritePath+'/'+nummedOrNot+'AdjMat'+years+lvl+'_'+periodIdx+'.txt', sep="\t", header=0,index_col=0)
dfmax = df.max()
dfmax[dfmax == 0] = 1
df = df / dfmax
originallabels = df.index.tolist()
# print(originallabels[:5])
labels = originallabels # labels = [termLabelDict[nodes[x]][labelFormat] for x in originallabels] #switch terms by Wmatrix code or label?
som.update_data(df.values)
U, s, V = np.linalg.svd(df.values, full_matrices=False)
if periodIdx == yearList[0]:
epochs = 10
radius0 = 0
scale0 = 0.1
else:
radius0 = n_rows//5
scale0 = 0.03
epochs = epochs2
#-------clustering params---------------
# algorithm = clusterAlgs.SpectralClustering()
clusterAlgLabel = 'KMeans8'# KMeans8 , SpectralClustering
#---------------------------------------
if savefig:
if not os.path.exists(figWritePath+'/Clusters/'+clusterAlgLabel+'/SOMs/'+SOMdimensionsString+'_epochs'+str(epochs2)):
os.makedirs(figWritePath+'/Clusters/'+clusterAlgLabel+'/SOMs/'+SOMdimensionsString+'_epochs'+str(epochs2))
SOMfilename = figWritePath+'/Clusters/'+clusterAlgLabel+'/SOMs/'+SOMdimensionsString+'_epochs'+str(epochs2)+'/SOM_'+nummedOrNot+'AdjMat'+years+lvl+'_'+periodIdx+'.png'
SOMfilenameNoLabels = figWritePath+'/Clusters/'+clusterAlgLabel+'/SOMs/'+SOMdimensionsString+'_epochs'+str(epochs2)+'/noLabelsSOM_AdjMat'+years+lvl+'_'+periodIdx+'.png'
# SOMfilenameNoBMUs = figWritePath+'/Clusters/'+clusterAlgLabel+'/SOMs/'+SOMdimensionsString+'_epochs'+str(epochs2)+'/noBMUsSOM_AdjMat'+years+lvl+'_'+periodIdx+'.png'
else:
SOMfilename = None
som.train(epochs=epochs, radius0=radius0, scale0=scale0)
#----------------------clustering-----------
try:
som.cluster(algorithm=algorithm)
print('Clustering algorithm employed: %s' %clusterAlgLabel)
except:
som.cluster()
print('Clustering algorithm employed: K-means with 8 centroids')
pass
#----------------------clustering-----------
rc('font', **{'size': 11}); figsize = (20, 20/float(n_columns/n_rows))
som.view_umatrix(figsize = figsize, colormap="Spectral_r", bestmatches=True, labels=labels,filename=SOMfilename)
plt.close()
som.view_umatrix(figsize = figsize, colormap="Spectral_r", bestmatches=True, filename=SOMfilenameNoLabels)
plt.close()
# som.view_umatrix(figsize = figsize, colormap="Spectral_r", filename=SOMfilenameNoBMUs)
# plt.close()
edgeDict[periodIdx]['somCoords'] = {SOMdimensionsString:som.bmus}
colors = []
for bm in som.bmus:
colors.append(som.clusters[bm[1], bm[0]])
# areas = [200]*len(som.bmus)
areas = [x*70 for x in minmaxnormPageRank]
#-----------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------
'''write and show the umatrix (umx)'''#---------------------------------------------------------
#-----------------------------------------------------------------------------------------------
## somUmatrix = edgeDict[periodIdx]['somUmatrix'][SOMdimensionsString]
## print('writing umatrix to file')
## np.savetxt(umatrixWritePath+'/umx'+years+lvl+'_'+periodIdx+'.umx',somUmatrix,delimiter='\t', newline='\n',header='% '+ '%s %s'%(n_rows,n_columns))
##
## print('writing BMU coords to file')
## with open(umatrixWritePath+'/umx'+years+lvl+'_'+periodIdx+'.bm','w') as f:
## with open(umatrixWritePath+'/umx'+years+lvl+'_'+periodIdx+'.names','w') as fn:
## f.write('% '+'%s %s\n' %(n_rows,n_columns))
## fn.write('% '+str(len(nodes))+'\n')
## for idx,coos in enumerate(edgeDict[periodIdx]['somCoords'][SOMdimensionsString]):
## f.write('%s %s %s\n' %(idx,coos[1],coos[0]))
## fn.write('%s %s %s\n' %(idx,nodes[idx],nodes[idx]))
##
## print('plotting umatrix 3D surface')
## fig = plt.figure()
## ax = fig.gca(projection='3d')
## X = np.arange(0, n_columns, 1)
## Y = np.arange(0, n_rows, 1)
## X, Y = np.meshgrid(X, Y)
## N=somUmatrix/somUmatrix.max()
## surf = ax.plot_surface(X, Y, somUmatrix, facecolors=cm.jet(N),rstride=1, cstride=1)#,facecolors=cm.jet(somUmatrix) cmap=cm.coolwarm, linewidth=0, antialiased=False)
## m = cm.ScalarMappable(cmap=cm.jet)
## m.set_array(somUmatrix)
## plt.colorbar(m, shrink=0.5, aspect=5)
## plt.title('SOM umatrix 3D surface vizualization (Level '+lvl+' terms | 5 year period prior to '+str(int(periodIdx)*5+trueYearsIni[idy])+')')
## mng = plt.get_current_fig_manager()
## mng.window.state('zoomed')
## interactive(True)
## plt.show()
## fig.savefig(figWritePath+'/SOM Umatrices/umxSurf'+years+lvl+'_'+periodIdx+'.png',bbox_inches='tight')
## plt.close()
## interactive(False)
#-----------------------------------------------------------------------------------------------
'''Plotting BMU coordinates with labels'''#-----------------------------------------------------
#-----------------------------------------------------------------------------------------------
## labelFormat = 'code'
## fig, ax = plt.subplots()
## xDimension = [x[0] for x in edgeDict[periodIdx]['somCoords'][SOMdimensionsString]]#[:10]]
## yDimension = [x[1] for x in edgeDict[periodIdx]['somCoords'][SOMdimensionsString]]#[:10]]
## plt.scatter(xDimension,yDimension, c=colors, s = areas, alpha = 0.7)
## labels = [str(colors[x])+'_'+termLabelDict[" ".join(re.findall("[a-zA-Z]+", nodes[x]))][labelFormat] for x in range(len(xDimension))]
## doneLabs = set([''])
## for label, x, y in zip(labels, xDimension, yDimension):
## lblshiftRatio = 2
## labFinshift = ''
## while labFinshift in doneLabs:
## potentialPositions = [(x, y+lablshift), (x+lblshiftRatio*lablshift, y), (x-lblshiftRatio*lablshift, y), (x+lblshiftRatio*lablshift, y+lblshiftRatio*lablshift),
## (x-lblshiftRatio*lablshift, y+lblshiftRatio*lablshift), (x+lblshiftRatio*lablshift, y-lblshiftRatio*lablshift), (x+lblshiftRatio*lablshift, y+lblshiftRatio*lablshift),
## (x-lblshiftRatio*lablshift, y+lblshiftRatio*lablshift)]
## for pP in potentialPositions:
## labFinshift = pP
## if labFinshift not in doneLabs:
## break
## lblshiftRatio+=1
## doneLabs.add(labFinshift)
## plt.annotate(label, xy = (x, y), xytext = labFinshift, textcoords = 'data', ha = 'center', va = 'center',bbox = dict(boxstyle = 'round,pad=0.1', fc = 'white', alpha = 0.4))
## lIdx+=1
##
## xCc = [x[1] for x in som.centroidBMcoords]
## yCc = [x[0] for x in som.centroidBMcoords]
## plt.scatter(xCc,yCc, c= range(len(som.centroidBMcoords)), s= [1000]*len(som.centroidBMcoords), alpha = 0.4)
##
## plt.xlim(0,n_columns)
## plt.ylim(0,n_rows)
## ax.invert_yaxis()
## plt.title('Labeled SOM. Level '+lvl+' terms, timeslot '+periodIdx+' (5 year period prior to '+str(int(periodIdx)*5+trueYearsIni[idy])+')')
## mng = plt.get_current_fig_manager()
## mng.window.state('zoomed')
## interactive(True)
## plt.show()
## fig.savefig(figWritePath+'/Clusters/'+clusterAlgLabel+'/SOMs/'+SOMdimensionsString+'_epochs'+str(epochs2)+'/SOM_Wmatrix'+labelFormat+'LabeledAdjMat'+years+lvl+'_'+periodIdx+'.png',bbox_inches='tight')
## plt.close()
## interactive(False)
#-----------------------------------------------------------------------------------------------
'''pageRank and HITS term fluctuation'''
numOfPlots = [5, 10, 20]
marker, color = ['*', '+', 'o','d','h','p','s','v','^','d'], ['g','r','m','c','y','k']#line, ["-","--","-.",":"] #list(colors.cnames.keys())
marker.sort()
color.sort()
asmarker = itertools.cycle(marker)
ascolor = itertools.cycle(color)
# asline = itertools.cycle(line)
if not os.path.exists(figWritePath+'/centrality fluctuations over time/PageRank'):
os.makedirs(figWritePath+'/centrality fluctuations over time/PageRank')
os.makedirs(figWritePath+'/centrality fluctuations over time/HITS')
os.makedirs(figWritePath+'/centrality fluctuations over time/Betweenness')
allPeriods = list(edgeDict.keys())
allPeriods.remove('uniquePersistentTerms')
allPeriods.remove('allTerms')
try:
allPeriods.remove('topTermsByPR')
except:
pass
allPeriods.sort()
termPRRankDict = {}
termPRSequences = {}
termAuthRankDict = {}
termAuthSequences = {}
termHubRankDict = {}
termHubSequences = {}
termBetweenRankDict = {}
termBetweenSequences = {}
for x in nodes:
prSequence, authSequence, hubSequence, betweenSequence = [], [] ,[], []
for p in allPeriods:
prSequence.append(edgeDict[p]['term']['pageRank'][x])
authSequence.append(edgeDict[p]['term']['authority'][x])
hubSequence.append(edgeDict[p]['term']['hub'][x])
betweenSequence.append(edgeDict[p]['term']['betweenness'][x])
termPRSequences[x] = prSequence
termPRRankDict[x] = recRank(termPrRanks[x])
termAuthSequences[x] = authSequence
termAuthRankDict[x] = recRank(termAuthRanks[x])
termHubSequences[x] = hubSequence
termHubRankDict[x] = recRank(termHubRanks[x])
termBetweenSequences[x] = betweenSequence
termBetweenRankDict[x] = recRank(termBetweenRanks[x])
termPRRanked = sorted(termPRRankDict, key=termPRRankDict.get, reverse=True)
termAuthRanked = sorted(termAuthRankDict, key=termAuthRankDict.get, reverse=True)
termHubRanked = sorted(termHubRankDict, key=termHubRankDict.get, reverse=True)
termBetweenRanked = sorted(termBetweenRankDict, key=termBetweenRankDict.get, reverse=True)
edgeDict['topTermsByPR'] = termPRRanked
pickle.dump(edgeDict,open('./data/artworks_tmp/edgeDictDynamic'+years+lvl+'.pck','wb'), protocol = 2)
elapsed = time.time() - t
print('Total time Elapsed: %.2f seconds' % elapsed)
| apache-2.0 |
magnastrazh/NEUCOGAR | nest/serotonin/research/C/nest-2.10.0/topology/examples/test_3d_exp.py | 13 | 2642 | # -*- coding: utf-8 -*-
#
# test_3d_exp.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
NEST Topology Module
EXPERIMENTAL example of 3d layer.
3d layers are currently not supported, use at your own risk!
Hans Ekkehard Plesser, UMB
'''
import nest
import pylab
import random
import nest.topology as topo
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
pylab.ion()
nest.ResetKernel()
# generate list of 1000 (x,y,z) triplets
pos = [[random.uniform(-0.5,0.5), random.uniform(-0.5,0.5), random.uniform(-0.5,0.5)]
for j in range(1000)]
l1 = topo.CreateLayer({'extent': [1.5, 1.5, 1.5], # must specify 3d extent AND center
'center': [0., 0., 0.],
'positions': pos,
'elements': 'iaf_neuron'})
# visualize
#xext, yext = nest.GetStatus(l1, 'topology')[0]['extent']
#xctr, yctr = nest.GetStatus(l1, 'topology')[0]['center']
# extract position information, transpose to list of x, y and z positions
xpos, ypos, zpos = zip(*topo.GetPosition(nest.GetChildren(l1)[0]))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xpos, ypos, zpos, s=15, facecolor='b', edgecolor='none')
# Gaussian connections in full volume [-0.75,0.75]**3
topo.ConnectLayers(l1, l1,
{'connection_type': 'divergent', 'allow_autapses': False,
'mask': {'volume': {'lower_left': [-0.75,-0.75,-0.75], 'upper_right': [0.75,0.75,0.75]}},
'kernel':{'exponential': {'c': 0., 'a': 1., 'tau': 0.25}}})
# show connections from center element
# sender shown in red, targets in green
ctr=topo.FindCenterElement(l1)
xtgt, ytgt, ztgt = zip(*topo.GetTargetPositions(ctr,l1)[0])
xctr, yctr, zctr = topo.GetPosition(ctr)[0]
ax.scatter([xctr],[yctr],[zctr],s=40, facecolor='r', edgecolor='none')
ax.scatter(xtgt,ytgt,ztgt,s=40, facecolor='g', edgecolor='g')
tgts=topo.GetTargetNodes(ctr,l1)[0]
d=topo.Distance(ctr,tgts)
plt.figure()
plt.hist(d, 25)
#plt.show()
| gpl-2.0 |
MTgeophysics/mtpy | legacy/tftools.py | 1 | 63495 | # -*- coding: utf-8 -*-
"""
Created on Mon May 03 14:53:54 2010
@author: a1185872
"""
import numpy as np
import scipy.signal as sps
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
def padzeros(f, npad=None, padpattern=None):
"""
padzeros(f) will return a function that is padded with zeros to the next
power of 2 for faster processing for fft or to length npad if given.
Inputs:
f = array to pad
npad = length to pad to defaults to next power of two
padpattern = pattern to pad with default is zero
Outputs:
fpad = array f padded to length npad with padpattern
"""
# make f an array
f = np.array(f)
# check dimensions of f
try:
n, m = f.shape
except ValueError:
n = f.shape[0]
m = 0
if npad is None:
power = np.log2(n)
fpow = np.floor(power)
if power != fpow:
npad = 2**(fpow + 1)
else:
npad = 2**power
else:
pass
if m != 0:
fpad = np.zeros((npad, m), dtype=type(f[0, 0]))
fpad[0:n, m - 1] = f[0:n, m - 1]
if padpattern is not None:
fpad[n:npad, m - 1] = padpattern
else:
fpad = np.zeros(npad, dtype=type(f[0]))
fpad[0:n] = f[0:n]
if padpattern is not None:
fpad[n:npad] = padpattern
return fpad
def sfilter(f, fcutoff=10., w=10.0, dt=.001):
"""
Will apply a sinc filter of width w to the function f by multipling in
the frequency domain. Returns filtered function
Inputs:
f = array to filter
fcuttoff = cutoff frequency
w = length of filter
dt = sampling time (s)
Outputs:
filtfunc = filtered function
"""
tshift = float(w) / 2.
fpad = padzeros(f)
Fpad = np.fft.fft(fpad)
fc = fcutoff
t = np.arange(start=-tshift, stop=tshift, step=dt)
filt = np.zeros(len(fpad))
fs = 2 * fc * np.sinc(2 * t * fc)
norm = sum(fs)
filt[0:len(t)] = fs / norm
Filt = np.fft.fft(filt)
Filtfunc = Fpad * Filt
filtfunc = np.fft.ifft(Filtfunc)
filtfunc = filtfunc[len(t) / 2:len(f) + len(t) / 2]
return filtfunc
def dctrend(f):
"""
dctrend(f) will remove a dc trend from the function f.
Inputs:
f = array to dctrend
Outputs:
fdc = array f with dc component removed
"""
fdc = sps.detrend(f)
return fdc
def normalizeL2(f):
"""
normalizeL2(f) will return the function f normalized by the L2 norm ->
f/(sqrt(sum(abs(x_i)^2))).
Inputs:
f = array to be normalized
Outputs:
fnorm = array f normalized in L2 sense
"""
f = np.array(f)
fsum = np.sum(np.abs(f))
if fsum == 0:
fnorm = f
else:
fnorm = f / np.sqrt(np.sum(np.abs(f)**2))
return fnorm
def decimatef(f, m):
"""
Will decimate a function by the factor m. First an 8th order Cheybechev
type I filter with a cuttoff frequency of .8/m is applied in both
directions to minimize any phase distortion and remove any aliasing. Note
decimation values above 10 will typically result in bad coefficients,
therefore if you decimation is more than 10 just repeat the decimation until
the desired decimation is reached.
Inputs:
f = array to be decimated
m = decimation factor
Outputs:
fdec = array f decimated by factor m
"""
n = len(f)
fdec = sps.resample(f, n / m, window='hanning')
# n=len(f)
# nout=np.ceil(n/m)
# nfilt=8
# rip=.05
#
# #make a cheybeshev1 zero-phase filter with cuttoff frequency of .8/m
# b,a=sps.iirfilter(nfilt,.8/m,rp=rip,btype='low',ftype='cheby1',output='ba')
# ffilt=sps.filtfilt(b,a,f)
# nbeg=n-m*nout
# fdec=np.array([ffilt[ii] for ii in np.arange(start=nbeg,stop=int(n),step=m)])
return fdec
def dwindow(window):
"""
Calculates the derivative of the given window
Input:
window = some sort of window function
Output:
dwin = derivative of window
"""
h = window
nh = len(h)
lh = (nh - 1) / 2
stepheight = (h[0] + h[-1]) / 2.
ramp = float((h[-1] - h[0])) / nh
h2 = np.zeros(nh + 2)
h2[1:nh + 1] = h - stepheight - ramp * \
np.arange(start=-lh, stop=lh + 1, step=1)
dwin = (h2[2:nh + 2] - h2[0:nh]) / 2. + ramp
dwin[0] = dwin[0] + stepheight
dwin[-1] = dwin[-1] - stepheight
return dwin
def gausswin(winlen, alpha=2.5):
"""
gausswin will compute a gaussian window of length winlen with a variance of
alpha
Inputs:
winlen = length of desired window
alpha = 1/standard deviation of window, ie full width half max of window
Outputs:
gwin = gaussian window
"""
lh = (winlen - 1) / 2 + 1 - np.remainder(winlen, 2)
gt = np.arange(start=-lh, stop=lh + 1, step=1)
gwin = np.exp(-.5 * (alpha * gt / float(lh))**2)
return gwin
def wvdas(fx):
"""
wvdas(fx) will compute the analytic signal for WVVD as defined by \
J. M. O' Toole, M. Mesbah, and B. Boashash, (2008), "A New Discrete Analytic\
Signal for Reducing Aliasing in the Discrete Wigner-Ville Distribution", \
IEEE Trans. on Signal Processing,
Inputs:
fx = signal to compute anlytic signal for with length N
Outputs:
fxa = analytic signal of fx with length 2*N
"""
n = len(fx)
# pad the time series with zeros
fxp = padzeros(fx, npad=2 * n)
# compute the fourier transform
FX = np.fft.fft(fxp)
# apply analytic signal
FX[1:n - 1] = 2 * FX[1:n - 1]
FX[n:] = 0
# inverse fourier transform and set anything outside of length n to zero
fxa = np.fft.ifft(FX)
fxa[n:] = 0
return fxa
def stft(fx, nh=2**8, tstep=2**7, ng=1, df=1.0, nfbins=2**10):
"""stft(fx,nh=2**8,tstep=2**7,ng=1,df=1.0) will calculate the spectrogam of
the given function by calculating the fft of a window of length nh at each
time instance with an interval of tstep. The frequency resolution is nfbins
Can compute the cross STFT by inputting fx as [fx1,fx2]
Inputs:
fx = the function to have a spectrogram computed for can be two functions
input as [fx1,fx2]
nh = window length for each time step
tstep = time step between short windows
ng = smoothing window along frequency plane should be odd
df = sampling frequency
nfbins = number of frequency bins
Outputs:
tfarray = spectrogram in units of amplitude
tlst = time instance array where each window was calculated
flst = frequency array containing only positive frequencies
"""
# get length of input time series if there is two columns
if isinstance(fx, list):
fx = np.array(fx)
try:
fn, fm = fx.shape
if fm < fn:
fm, fn = fx.shape
except ValueError:
fn = fx.shape[0]
fm = 1
if fm > 1:
fx = fx.reshape(fn)
else:
fx = fx.reshape(fn)
# make a hanning window to minimize aliazing and Gibbs effect of short time
# windows
h = normalizeL2(np.hanning(nh))
# make a hanning window to smooth in frequency domain
if ng != 1:
if np.remainder(ng, 2) != 1:
ng = ng - 1
print 'ng forced to be odd as ng-1'
else:
pass
g = normalizeL2(np.hanning(ng))
else:
pass
# make time step list
tlst = np.arange(start=0, stop=fn - nh + 1, step=tstep)
# make a frequency list for plotting exporting only positive frequencies
df = float(df)
# get only positive frequencies
flst = np.fft.fftfreq(nfbins, 1 / df)[0:nfbins / 2]
# initialize the TFD array
tfarray = np.zeros((nfbins / 2, len(tlst)), dtype='complex128')
fa = sps.hilbert(dctrend(fx))
for place, ii in enumerate(tlst):
fxwin = fa[ii:ii + nh] * h
# get only positive frequencies
FXwin = np.fft.fft(padzeros(fxwin, npad=nfbins))[:nfbins / 2]
# smooth in frequency plane
if ng != 1:
FXwin = np.convolve(
padzeros(
FXwin,
npad=len(FXwin) +
ng -
1),
g,
'valid')
else:
pass
# pull out only positive quadrant, flip array for plotting
tfarray[:, place] = FXwin[::-1]
return tfarray, tlst, flst
def reassignedstft(fx, nh=2**6 - 1, tstep=2**5, nfbins=2**10, df=1.0, alpha=4,
threshold=None):
"""
reassignedstft(fx,nh=2**5-1,tstep=2**8,nfbins=2**10,df=1.0,alpha=20) will
compute the reassigned spectrogram by estimating the center of gravity of
the signal and condensing dispersed energy back to that location.
Inputs:
fx = time series to be analyzed
nh = length of gaussian window, should be odd
tstep = time step for each window calculation
nfbins = number of frequency bins to calculate, note result will be
length nfbins/2
df = sampling frequency (Hz)
alpha = reciprocal of full width half max of gaussian window
threshold = threshold value for reassignment
Outputs:
rtfarray = reassigned spectrogram in units of amplitude
tlst = array of time instances where windows were calculated for ploting
flst = array of frequencies for plotting
stft = standard spectrogram in units of amplitude
"""
# make sure fx is type array
fx = np.array(fx)
# compute length of fx
nx = len(fx)
# make sure window length is odd
if np.remainder(nh, 2) == 0:
nh = nh + 1
# compute gaussian window
h = gausswin(nh, alpha=alpha)
# h=np.hanning(nh)
lh = (nh - 1) / 2
# compute ramp window
th = h * np.arange(start=-lh, stop=lh + 1, step=1)
# compute derivative of window
dh = dwindow(h)
# make a time list of indexes
tlst = np.arange(start=0, stop=nx, step=tstep)
nt = len(tlst)
# make a frequency list
flst = np.fft.fftfreq(nfbins, 1. / df)[nfbins / 2:]
# initialize some time-frequency arrays
tfr = np.zeros((nfbins, nt), dtype='complex128')
tf2 = np.zeros((nfbins, nt), dtype='complex128')
tf3 = np.zeros((nfbins, nt), dtype='complex128')
# compute components for reassignment
for ii, tt in enumerate(tlst):
# create a time shift list
tau = np.arange(start=-min([np.round(nx / 2.), lh, tt - 1]),
stop=min([np.round(nx / 2.), lh, nx - tt - 1]) + 1)
# compute the frequency spots to be calculated
ff = np.remainder(nfbins + tau, nfbins)
xlst = tt + tau
hlst = lh + tau
normh = np.sqrt(np.sum(abs(h[hlst])**2))
tfr[ff, ii] = fx[xlst] * h[hlst].conj() / normh
tf2[ff, ii] = fx[xlst] * th[hlst].conj() / normh
tf3[ff, ii] = fx[xlst] * dh[hlst].conj() / normh
# compute Fourier Transform
spec = np.fft.fft(tfr, axis=0)
spect = np.fft.fft(tf2, axis=0)
specd = np.fft.fft(tf3, axis=0)
# get only positive frequencies
spec = spec[nfbins / 2:, :]
spect = spect[nfbins / 2:, :]
specd = specd[nfbins / 2:, :]
# check to make sure no spurious zeros floating around
szf = np.where(abs(spec) < 1.E-6)
spec[szf] = 0.0
zerofind = np.nonzero(abs(spec))
twspec = np.zeros((nfbins / 2, nt), dtype='float')
dwspec = np.zeros((nfbins / 2, nt), dtype='float')
twspec[zerofind] = np.round(np.real(spect[zerofind] / spec[zerofind]) / 1)
dwspec[zerofind] = np.round(np.imag((nfbins / 2.) * specd[zerofind] / spec[zerofind]) /
(np.pi))
# compute reassignment
rtfarray = np.zeros_like(spec)
if threshold is None:
threshold = 1.E-4 * np.mean(fx[tlst])
for nn in range(nt):
for kk in range(nfbins / 2):
if abs(spec[kk, nn]) > threshold:
# get center of gravity index in time direction
nhat = int(nn + twspec[kk, nn])
nhat = int(min([max([nhat, 1]), nt - 1]))
# get center of gravity index in frequency direction
khat = int(kk - dwspec[kk, nn])
khat = int(np.remainder(np.remainder(khat - 1, nfbins / 2) + nfbins / 2,
nfbins / 2))
# reassign energy
rtfarray[khat, nhat] = rtfarray[khat, nhat] + spec[kk, nn]
# rtfarray[kk,nn]=spec[khat,nhat]
spect[kk, nn] = khat + 1j * nhat
else:
spect[kk, nn] = np.inf * (1 + 1j)
rtfarray[kk, nn] = rtfarray[kk, nn] + spec[kk, nn]
return rtfarray, tlst, flst, spec
def wvd(fx, nh=2**8 - 1, tstep=2**5, nfbins=2**10, df=1.0):
"""
wvd(f,nh=2**8-1,tstep=2**5,nfbins=2**10,df=1.0) will calculate the
Wigner-Ville distribution for a function f. Can compute the cross spectra
by inputting fx as [fx1,fx2]
Inputs:
fx = array for which WVD will be calculated, input as [fx1,fx2] for
cross-spectra calculation
nh = window length, needs to be odd so centered on zero
tstep = time step between windows
nfbins = number of frequencies
df = sampling frequency (Hz)
Outputs:
tfarray = WVD estimation of array fx
tlst = time instances of each calculation
flst = array of positive frequencies
"""
if isinstance(fx, list):
fx = np.array(fx)
try:
fn, fm = fx.shape
if fm > fn:
fm, fn = fx.shape
except ValueError:
fn = len(fx)
fm = 1
if fm > 1:
fn = fn[0]
print 'computing cross spectra'
# compute the analytic signal of function f and dctrend
fa = wvdas(fx[0])
fb = wvdas(fx[1])
else:
# compute the analytic signal of function f and dctrend
fa = wvdas(fx)
fa = sps.hilbert(dctrend(fx))
fb = fa.copy()
fn = len(fa)
# sampling period
df = float(df)
dt = 1. / df
tau = (nh - 1) / 2
# create a time array such that the first point is centered on time window
tlst = np.arange(start=0, stop=fn - 1, step=tstep, dtype='int')
# create an empty array to put the tf in
tfarray = np.zeros((nfbins, len(tlst)), dtype='complex128')
# create a frequency array with just positive frequencies
flst = np.fft.fftfreq(nfbins, dt)[0:nfbins / 2]
# calculate pseudo WV
for point, nn in enumerate(tlst):
# calculate the smallest timeshift possible
taun = min(nn, tau, fn - nn - 1)
# make a timeshift array
taulst = np.arange(start=-taun, stop=taun + 1, step=1, dtype='int')
# calculate rectangular windowed correlation function of analytic
# signal
Rnn = 4 * np.conjugate(fa[nn - taulst]) * fb[nn + taulst]
# calculate fft of windowed correlation function
# FTRnn=np.fft.fft(padzeros(Rnn,npad=nfbins))
# put into tfarray
tfarray[:, point] = padzeros(Rnn, npad=nfbins)[::-1]
# normalize
tfarray = np.fft.fft(tfarray, axis=0)
tfarray = tfarray / nh
return tfarray, tlst, flst
def spwvd(fx, tstep=2**5, nfbins=2**10, df=1.0, nh=None, ng=None, sigmat=None,
sigmaf=None):
"""
spwvd(fx,tstep=2**5,nfbins=2**10,df=1.0,nh=2**8-1,ng=2**5-1,sigmat=None,
sigmaf=None)
will calculate the smoothed pseudo Wigner-Ville distribution for a function
fx. smoothed with Gaussians windows to get best localization.
Inputs:
fx = array to estimate spwvd, input as [fx1,fx2] if computing cross
spectra
tstep = time step between windows
nfbins = number of frequencies
df = sampling frequency (Hz)
ng = length of time-domain smoothing window (needs to be odd)
nh = length of frequency-domain smoothing window (needs to be odd)
sigmat = std of window h, ie full width half max of gaussian
sigmaf = std of window g, ie full width half max of gaussian
Outputs:
tfarray = SPWVD estimation of array fx
tlst = time instances of each calculation
flst = array of positive frequencies
"""
if isinstance(fx, list):
fx = np.array(fx)
try:
fn, fm = fx.shape
if fm > fn:
fm, fn = fx.shape
except ValueError:
fn = len(fx)
fm = 1
if fm > 1:
print 'computing cross spectra'
# compute the analytic signal of function f and dctrend
fa = wvdas(fx[0])
fb = wvdas(fx[1])
else:
# compute the analytic signal of function f and dctrend
fa = wvdas(fx)
fa = sps.hilbert(dctrend(fx))
fb = fa.copy()
print 'Computed Analytic signal'
# sampling period
df = float(df)
dt = 1 / df
# create normalize windows in time (g) and frequency (h)
# note window length should be odd so that h,g[0]=1,nh>ng
if nh is None:
nh = np.floor(fn / 2.)
# make sure the window length is odd
if np.remainder(nh, 2) == 0:
nh = nh + 1
# calculate length for time smoothing window
if ng is None:
ng = np.floor(fn / 5.)
if np.remainder(ng, 2) == 0:
ng = ng + 1
# calculate standard deviations for gaussian windows
if sigmat is None:
sigmah = nh / (6 * np.sqrt(2 * np.log(2)))
else:
sigmah = sigmat
if sigmaf is None:
sigmag = ng / (6 * np.sqrt(2 * np.log(2)))
else:
sigmag = sigmaf
nh = int(nh)
ng = int(ng)
print 'nh=' + str(nh) + '; ng=' + str(ng)
# calculate windows and normalize
h = sps.gaussian(nh, sigmah)
h = h / sum(h)
g = sps.gaussian(ng, sigmag)
g = g / sum(g)
Lh = (nh - 1) / 2 # midpoint index of window h
Lg = (ng - 1) / 2 # midpoint index of window g
# create a time array such that the first point is centered on time window
tlst = np.arange(start=0, stop=fn + 1, step=tstep, dtype='int')
# create an empty array to put the tf in
# make sure data type is complex
tfarray = np.zeros((nfbins, len(tlst)), dtype='complex128')
# create a frequency array with just positive frequencies
flst = np.fft.fftfreq(nfbins, dt)[0:nfbins / 2]
# calculate pseudo WV
for point, t in enumerate(tlst):
# find the smallest possible time shift
maxtau = min(t + Lg - 1, fn - t + Lg, round(nfbins / 2), Lh)
# create time lag list
taulst = np.arange(start=-min(Lg, fn - t), stop=min(Lg, t - 1) + 1, step=1,
dtype='int')
# calculate windowed correlation function of analytic function for
# zero frequency
tfarray[0, point] = sum(2 * (g[Lg + taulst] / sum(g[Lg + taulst])) * fa[t - taulst - 1] *
np.conjugate(fb[t - taulst - 1]))
# calculate tfd by calculating convolution of window and correlation
# function as sum of correlation function over the lag period times the
# window at that point. Calculate symmetrical segments for FFT later
for mm in range(maxtau):
taulst = np.arange(start=-min(Lg, fn - t - mm - 1), stop=min(Lg, t - mm - 1) + 1,
step=1, dtype='int')
# compute positive half
gm = 2 * (g[Lg + taulst] / sum(g[Lg + taulst]))
Rmm = sum(gm * fa[t + mm - taulst - 1] *
np.conjugate(fb[t - mm - taulst]))
tfarray[mm, point] = h[Lh + mm - 1] * Rmm
# compute negative half
Rmm = sum(gm * fa[t - mm - taulst] *
np.conjugate(fb[t + mm - taulst - 1]))
tfarray[nfbins - mm - 1, point] = h[Lh - mm] * Rmm
mm = round(nfbins / 2)
if t <= fn - mm and t >= mm and mm <= Lh:
print 'doing weird thing'
taulst = np.arange(start=-min(Lg, fn - t - mm), stop=min(Lg, fn - t, mm) + 1, step=1,
dtype='int')
gm = g[Lg + taulst] / sum(g[Lg + taulst])
tfarray[mm - 1, point] = .5 *\
(sum(h[Lh + mm] * (gm * fa[t + mm - taulst - 1] *
np.conjugate(fb[t - mm - taulst]))) +
sum(h[Lh - mm] * (gm * fa[t - mm - taulst] *
np.conjugate(fb[t + mm - taulst - 1]))))
tfarray = np.fft.fft(tfarray, axis=0)
# rotate for plotting purposes so that (t=0,f=0) is at the lower left
tfarray = np.rot90(tfarray.T, 1)
return tfarray, tlst, flst
def robustwvd(fx, nh=2**7 - 1, ng=2**4 - 1, tstep=2**4, nfbins=2**8, df=1.0,
sigmanh=None, sigmang=None):
"""
robustwvd(fx,tstep=2**5,nfbins=2**10,df=1.0,nh=2**8-1,ng=2**5-1,
sigmanh=None,sigmang=None)
will calculate the smoothed pseudo Wigner-Ville distribution for a function
fx. smoothed with Gaussians windows to get best localization.
Inputs:
fx = array to estimate spwvd, input as [fx1,fx2] if computing cross
spectra
tstep = time step between windows
nfbins = number of frequencies
df = sampling frequency (Hz)
ng = length of time-domain smoothing window (needs to be odd)
nh = length of frequency-domain smoothing window (needs to be odd)
sigmanh = std of window h, ie full width half max of gaussian
sigmang = std of window g, ie full width half max of gaussian
Outputs:
tfarray = SPWVD estimation of array fx
tlst = time instances of each calculation
flst = array of positive frequencies
"""
if isinstance(fx, list):
fx = np.array(fx)
try:
fn, fm = fx.shape
if fm > fn:
fm, fn = fx.shape
except ValueError:
fn = len(fx)
fm = 1
if fm > 1:
print 'computing cross spectra'
# compute the analytic signal of function f and dctrend
fa = wvdas(fx[0])
fb = wvdas(fx[1])
else:
# compute the analytic signal of function f and dctrend
fa = wvdas(fx)
fa = sps.hilbert(dctrend(fx))
fb = fa.copy()
print 'Computed Analytic signal'
# make sure window length is odd
if nh is None:
nh = np.floor(fn / 2.)
# make sure the window length is odd
if np.remainder(nh, 2) == 0:
nh = nh + 1
# calculate length for time smoothing window
if ng is None:
ng = np.floor(fn / 5.)
if np.remainder(ng, 2) == 0:
ng = ng + 1
nh = int(nh)
ng = int(ng)
print 'nh= ', nh
print 'ng= ', ng
dt = 1. / (df * 2.)
# get length of input time series
nfx = len(fa)
# make frequency smoothing window
if sigmanh is None:
sigmanh = nh / (5 * np.sqrt(2 * np.log(2)))
h = sps.gaussian(nh, sigmanh)
h = h / sum(h)
# make a time smoothing window
if sigmang is None:
sigmang = ng / (5 * np.sqrt(2 * np.log(2)))
g = sps.gaussian(ng, sigmang)
mlst = np.arange(start=-nh / 2 + 1, stop=nh / 2 + 1, step=1, dtype='int')
# mlst=np.arange(nh,dtype='int')
tlst = np.arange(start=nh / 2, stop=nfx - nh / 2, step=tstep)
# make a frequency list for plotting exporting only positive frequencies
# get only positive frequencies
flst = np.fft.fftfreq(nfbins, dt)[nfbins / 2:]
flst[-1] = 0
flstp = np.fft.fftfreq(nfbins, 2 * dt)[0:nfbins / 2]
# create an empty array to put the tf in
tfarray = np.zeros((nfbins / 2, len(tlst)), dtype='complex128')
for tpoint, nn in enumerate(tlst):
# calculate windowed correlation function of analytic function
fxwin = h * fa[nn + mlst] * fb[nn - mlst].conj()
for fpoint, mm in enumerate(flst):
fxmed = np.convolve(g, fxwin * np.exp(1j * 4 * np.pi * mlst * mm * dt),
mode='same') / (nh * ng)
fxmedpoint = np.median(fxmed.real)
if fxmedpoint == 0.0:
tfarray[fpoint, tpoint] = 1E-10
else:
tfarray[fpoint, tpoint] = fxmedpoint
tfarray = (4. * nh / dt) * tfarray
return tfarray, tlst, flstp
def specwv(fx, tstep=2**5, nfbins=2**10, nhs=2**8,
nhwv=2**9 - 1, ngwv=2**3 - 1, df=1.0):
"""
specwv(f,tstep=2**5,nfbins=2**10,nh=2**8-1,ng=1,df=1.0) will calculate
the Wigner-Ville distribution mulitplied by the STFT windowed by the common
gaussian window h for a function f.
Inputs:
fx = array to compute the specwv
tstep = time step between windows
nfbins = number of frequencies
nhs = length of time-domain smoothing window for STFT should be even
nhwv = length of time-domain smoothing window for WV (needs to be odd)
ngwv = lenght of frequency-domain smoothing window (needs to be odd)
df = sampling frequency (Hz)
Outputs:
tfarray = SPECWV estimation of array fx
tlst = time instances of each calculation
flst = array of positive frequencies
"""
# calculate stft
pst, tlst, flst = stft(fx, nh=nhs, tstep=tstep, nfbins=nfbins, df=df)
# calculate new time step so WVD and STFT will align
ntstep = len(fx) / (len(tlst) * 2.)
# calculate spwvd
pwv, twv, fwv = spwvd(fx, tstep=ntstep, nfbins=nfbins,
df=df, nh=nhwv, ng=ngwv)
# multiply the two together normalize
tfarray = pst / pst.max() * pwv / pwv.max()
return tfarray, tlst, flst
def modifiedb(fx, tstep=2**5, nfbins=2**10, df=1.0, nh=2**8 - 1, beta=.2):
"""modifiedb(fx,tstep=2**5,nfbins=2**10,df=1.0,nh=2**8-1,beta=.2)
will calculate the modified b distribution as defined by cosh(n)^-2 beta
for a function fx.
Inputs:
fx = array from which modifiedb will be calculated if computing cross
spectra input as [fx1,fx2]
tstep = time step between windows
nfbins = number of frequencies
df = sampling frequency (Hz)
nh = length of time-domain smoothing window (needs to be odd)
beta = smoothing coefficient
Outputs:
tfarray = modifiedB estimation of array fx
tlst = time instances of each calculation
flst = array of positive frequencies
"""
if isinstance(fx, list):
fx = np.array(fx)
try:
fn, fm = fx.shape
if fm > fn:
fm, fn = fx.shape
except ValueError:
fn = len(fx)
fm = 1
if fm > 1:
fn = fn[0]
print 'computing cross spectra'
# compute the analytic signal of function f and dctrend
fa = wvdas(fx[0])
fb = wvdas(fx[1])
else:
# compute the analytic signal of function f and dctrend
fa = wvdas(fx)
fa = sps.hilbert(dctrend(fx))
fb = fa.copy()
# sampling period
df = float(df)
dt = 1. / df
tau = (nh - 1) / 2 # midpoint index of window h
# create a time array such that the first point is centered on time window
tlst = np.arange(start=0, stop=fn - 1, step=tstep, dtype='int')
# create an empty array to put the tf in
tfarray = np.zeros((nfbins, len(tlst)), dtype='complex')
# create a frequency array with just positive frequencies
flst = np.fft.fftfreq(nfbins, dt)[0:nfbins / 2]
# calculate pseudo WV
for point, nn in enumerate(tlst):
# calculate the smallest timeshift possible
taun = min(nn, tau, fn - nn - 1)
# make a timeshift array
taulst = np.arange(start=-taun, stop=taun + 1, step=1, dtype='int')
# create modified b window
mbwin = np.cosh(taulst)**(-2 * beta)
mbwin = mbwin / sum(mbwin)
MBwin = np.fft.fft(padzeros(mbwin, npad=nfbins))
# calculate windowed correlation function of analytic function
Rnn = np.conjugate(fa[nn - taulst]) * fb[nn + taulst]
# calculate fft of windowed correlation function
FTRnn = MBwin * np.fft.fft(padzeros(Rnn, npad=nfbins))
# put into tfarray
tfarray[:, point] = FTRnn[::-1]
# need to cut the time frequency array in half due to the WVD assuming
# time series sampled at twice nyquist.
tfarray = tfarray
return tfarray, tlst, flst
def robuststftMedian(fx, nh=2**8, tstep=2**5, df=1.0, nfbins=2**10):
"""
robuststftMedian(fx,nh=2**8,tstep=2**5,ng=1,df=1.0) will output an array
of the time-frequency robust spectrogram calculated using the vector median
simplification.
Inputs:
fx = the function to have a spectrogram computed for
nh = window length for each time step
tstep = time step between short windows
df = sampling frequency
nfbins = number of frequency bins
Outputs:
tfarray = WVD estimation of array fx
tlst = time instances of each calculation
flst = array of positive frequencies
"""
# get length of input time series
nfx = len(fx)
# compute time shift list
mlst = np.arange(start=-nh / 2 + 1, stop=nh / 2 + 1, step=1, dtype='int')
# compute time locations to take STFT
tlst = np.arange(start=0, stop=nfx - nh + 1, step=tstep)
# make a frequency list for plotting exporting only positive frequencies
flst = np.fft.fftfreq(nfbins, 1 / df)
flstc = flst[nfbins / 2:]
# Note: these are actually the negative frequencies but works better for
# calculations
flstp = flst[0:nfbins / 2]
# make time window and normalize
sigmanh = nh / (6 * np.sqrt(2 * np.log(2)))
h = sps.gaussian(nh, sigmanh)
h = h / sum(h)
# create an empty array to put the tf in and initialize a complex value
tfarray = np.zeros((nfbins / 2, len(tlst)), dtype='complex128')
# take the hilbert transform of the signal to make complex and remove
# negative frequencies
fa = sps.hilbert(dctrend(fx))
fa = fa / fa.std()
# make a frequency list for plotting exporting only positive frequencies
# get only positive frequencies
flst = np.fft.fftfreq(nfbins, 1 / df)[nfbins / 2:]
for tpoint, nn in enumerate(tlst):
# calculate windowed correlation function of analytic function
fxwin = h * fa[nn:nn + nh]
for fpoint, mm in enumerate(flstc):
fxmed = fxwin * np.exp(1j * 2 * np.pi * mlst * mm / df)
fxmedreal = np.median(fxmed.real)
fxmedimag = np.median(fxmed.imag)
if fxmedreal + 1j * fxmedimag == 0.0:
tfarray[fpoint, tpoint] = 1E-10
else:
tfarray[fpoint, tpoint] = fxmedreal + 1j * fxmedimag
# normalize tfarray
tfarray = (4. * nh * df) * tfarray
return tfarray, tlst, flstp
def robuststftL(fx, alpha=.325, nh=2**8, tstep=2**5, df=1.0, nfbins=2**10):
"""
robuststftL(fx,nh=2**8,tstep=2**5,ng=1,df=1.0) will output an array of the
time-frequency robust spectrogram by estimating the vector median and
summing terms estimated by alpha coefficients.
Inputs:
fx = the function to have a spectrogram computed for
alpha = robust parameter [0,.5] -> 0 gives spectrogram, .5 gives median stft
nh = window length for each time step
tstep = time step between short windows
df = sampling frequency
nfbins = number of frequency bins
Outputs:
tfarray = robust L-estimation of array fx
tlst = time instances of each calculation
flst = array of positive frequencies
"""
# get length of input time series
nfx = len(fx)
# compute time shift list
mlst = np.arange(start=-nh / 2 + 1, stop=nh / 2 + 1, step=1, dtype='int')
# compute time locations to take STFT
tlst = np.arange(start=0, stop=nfx - nh + 1, step=tstep)
# make a frequency list for plotting exporting only positive frequencies
flst = np.fft.fftfreq(nfbins, 1 / df)
flstc = flst[nfbins / 2:]
# Note: these are actually the negative frequencies but works better for
# calculations
flstp = flst[0:nfbins / 2]
# make time window and normalize
sigmanh = nh / (6 * np.sqrt(2 * np.log(2)))
h = sps.gaussian(nh, sigmanh)
h = h / sum(h)
# create an empty array to put the tf in and initialize a complex value
tfarray = np.zeros((nfbins / 2, len(tlst)), dtype='complex128')
# take the hilbert transform of the signal to make complex and remove
# negative frequencies
fa = sps.hilbert(dctrend(fx))
fa = fa / fa.std()
# make a frequency list for plotting exporting only positive frequencies
# get only positive frequencies
flst = np.fft.fftfreq(nfbins, 1 / df)[nfbins / 2:]
# create list of coefficients
a = np.zeros(nh)
a[(nh - 2) * alpha:alpha * (2 - nh) + nh - 1] = 1. / \
(nh * (1 - 2 * alpha) + 4 * alpha)
for tpoint, nn in enumerate(tlst):
# calculate windowed correlation function of analytic function
fxwin = h * fa[nn:nn + nh]
for fpoint, mm in enumerate(flstc):
fxelement = fxwin * np.exp(1j * 2 * np.pi * mlst * mm / df)
fxreal = np.sort(fxelement.real)[::-1]
fximag = np.sort(fxelement.imag)[::-1]
tfpoint = sum(a * (fxreal + 1j * fximag))
if tfpoint == 0.0:
tfarray[fpoint, tpoint] = 1E-10
else:
tfarray[fpoint, tpoint] = tfpoint
# normalize tfarray
tfarray = (4. * nh * df) * tfarray
return tfarray, tlst, flstp
def smethod(fx, L=11, nh=2**8, tstep=2**7, ng=1,
df=1.0, nfbins=2**10, sigmaL=None):
"""
smethod(fx,L=11,nh=2**8,tstep=2**7,ng=1,df=1.0,nfbins=2**10) will calculate
the smethod by estimating the STFT first and computing the WV of window
length L in the frequency domain. For larger L more of WV estimation, if
L=0 get back STFT
Inputs:
fx = the function to have a S-methoc computed for, if computing cross
spectra input as [fx1,fx2]
L = window length in frequency domain
nh = window length for each time step
tstep = time step between short windows
ng = smoothing window along frequency plane should be odd
df = sampling frequency
nfbins = number of frequency bins
Outputs:
tfarray = S-method estimation of array fx
tlst = time instances of each calculation
flst = array of positive frequencies
"""
df = float(df)
if isinstance(fx, list):
fx = np.array(fx)
try:
fn, fm = fx.shape
if fm > fn:
fm, fn = fx.shape
except ValueError:
fn = len(fx)
fm = 1
if fm > 1:
print 'computing cross spectra'
# compute the analytic signal of function f and dctrend
# fa=sps.hilbert(dctrend(fx[0]))
# fb=sps.hilbert(dctrend(fx[1]))
fa = fx[0]
fb = fx[1]
fa = fa.reshape(fn)
fb = fb.reshape(fn)
pxa, tlst, flst = stft(
fa, nh=nh, tstep=tstep, ng=ng, df=df, nfbins=nfbins)
pxb, tlst, flst = stft(
fb, nh=nh, tstep=tstep, ng=ng, df=df, nfbins=nfbins)
pxx = pxa * pxb.conj()
else:
# compute the analytic signal of function f and dctrend
# fa=sps.hilbert(dctrend(fx))
fa = fx
fa = fa.reshape(fn)
fb = fa
pxx, tlst, flst = stft(
fa, nh=nh, tstep=tstep, ng=ng, df=df, nfbins=nfbins)
# pxb=pxa
# make an new array to put the new tfd in
tfarray = abs(pxx)**2
# get shape of spectrogram
nf, nt = tfarray.shape
# create a list of frequency shifts
Llst = np.arange(start=-L / 2 + 1, stop=L / 2 + 1, step=1, dtype='int')
# create a frequency gaussian window
if sigmaL is None:
sigmaL = L / (1 * np.sqrt(2 * np.log(2)))
p = sps.gaussian(L, sigmaL)
# make a matrix of windows
pm = np.zeros((L, nt))
for kk in range(nt):
pm[:, kk] = p
# loop over frequency and calculate the s-method
for ff in range(L / 2, nf - L / 2):
tfarray[ff, :] = tfarray[ff, :] + 2 * np.real(np.sum(pm * pxx[ff + Llst, :] *
pxx[ff - Llst, :].conj(), axis=0))
tfarray[L / 2:-L / 2] = tfarray[L / 2:-L / 2] / L
return tfarray, tlst, flst, pxx
def robustSmethod(fx, L=5, nh=2**7, tstep=2**5, nfbins=2**10, df=1.0,
robusttype='median', sigmal=None):
"""
robustSmethod(fx,L=15,nh=2**7,tstep=2**5,nfbins=2**10,df=1.0) computes the
robust Smethod via the robust spectrogram.
Inputs:
fx = array of data, if computing cross-spectra input as [fa,fb]
L = frequency smoothing window if robusttype='median'
nh = window length for STFT
tstep = time step for each STFT to be computed
nfbins = number of frequency bins to be calculate
df = sampling frequency
robusttype = type of robust STFT calculation can be 'median' or 'L'
simgal = full-width half max of gaussian window applied in frequency
Outputs:
tfarray = robust S-method estimation of array fx
tlst = time instances of each calculation
flst = array of positive frequencies
"""
if isinstance(fx, list):
fx = np.array(fx)
try:
fn, fm = fx.shape
if fm > fn:
fm, fn = fx.shape
except ValueError:
fn = len(fx)
fm = 1
if fm > 1:
print 'computing cross spectra'
# compute the analytic signal of function f and dctrend
fa = fx[0].reshape(fn)
fb = fx[1].reshape(fn)
if robusttype == 'median':
pxa, tlst, flst = robuststftMedian(fa, nh=nh, tstep=tstep, df=df,
nfbins=nfbins)
pxb, tlst, flst = robuststftMedian(fb, nh=nh, tstep=tstep, df=df,
nfbins=nfbins)
elif robusttype == 'L':
pxa, tlst, flst = robuststftL(
fa, nh=nh, tstep=tstep, df=df, nfbins=nfbins)
pxb, tlst, flst = robuststftL(
fb, nh=nh, tstep=tstep, df=df, nfbins=nfbins)
else:
raise ValueError('robusttype undefined')
pxx = pxa * pxb.conj()
else:
fa = fx.reshape(fn)
if robusttype == 'median':
pxx, tlst, flst = robuststftMedian(fa, nh=nh, tstep=tstep, df=df,
nfbins=nfbins)
elif robusttype == 'L':
pxx, tlst, flst = robuststftL(
fa, nh=nh, tstep=tstep, df=df, nfbins=nfbins)
else:
raise ValueError('robusttype undefined')
# compute frequency shift list
Llst = np.arange(start=-L / 2 + 1, stop=L / 2 + 1, step=1, dtype='int')
# compute the frequency window of length L
if sigmal is None:
sigmal = L / 3 * (np.sqrt(2 * np.log(2)))
lwin = gausswin(L, sigmal)
lwin = lwin / sum(lwin)
pm = np.zeros((L, len(tlst)))
for kk in range(len(tlst)):
pm[:, kk] = lwin
smarray = pxx.copy()
# compute S-method
for ff in range(L / 2, nfbins / 2 - L / 2):
smarray[ff, :] = smarray[ff, :] + 2 * np.real(np.sum(pm * pxx[ff + Llst, :] *
pxx[ff - Llst, :].conj(), axis=0))
# for tt in range(len(tlst)):
# for kk in range((L-1)/2,len(flst)-(L-1)/2):
# smarray[kk,tt]=abs(pxx[kk,tt])+np.sqrt(abs(2*sum(lwin*
# pxx[kk+Llst,tt]*pxx[kk-Llst,tt].conj())))
smarray = (2. / (L * nh)) * smarray
return smarray, tlst, flst, pxx
def reassignedSmethod(fx, nh=2**7 - 1, tstep=2**4, nfbins=2**9, df=1.0, alpha=4,
thresh=.01, L=5):
"""
reassignedSmethod(fx,nh=2**7-2,tstep=2**4,nfbins=2**9,df=1.0,alpha=4,
thresh=.05,L=5)
will calulate the reassigned S-method as described by Djurovic[1999] by
using the spectrogram to estimate the reassignment
Inputs:
fx = 1-d array to be processed
nh = window length for each time instance
tstep = step between time instances
nfbins = number of frequency bins, note output will be nfbins/2 due to
symmetry of the FFT
df = sampling rate (Hz)
alpha = inverse of full-width half max of gaussian window, smaller
numbers mean broader windows
thresh = threshold for reassignment, lower numbers more points
reassigned, higer numbers less points reassigned
L = length of window for S-method calculation, higher numbers tend
tend toward WVD
Outputs:
rtfarray = reassigned S-method shape of (nfbins/2,len(fx)/tstep)
tlst = list of time instances where rtfarray was calculated
flst = positive frequencies
sm = S-method array
"""
if isinstance(fx, list):
fx = np.array(fx)
try:
fn, fm = fx.shape
if fm > fn:
fm, fn = fx.shape
except ValueError:
fn = len(fx)
fm = 1
if fm > 1:
print 'computing cross spectra'
# compute the analytic signal of function f and dctrend
# fa=sps.hilbert(dctrend(fx[0]))
# fb=sps.hilbert(dctrend(fx[1]))
fa = fx[0]
fb = fx[1]
fa = fa.reshape(fn)
fb = fb.reshape(fn)
else:
fa = fx
fa = fa.reshape(fn)
fb = fa.copy()
nx = len(fx)
# compute gaussian window
h = gausswin(nh, alpha=alpha)
# h=np.hanning(nh)
lh = (nh - 1) / 2
# compute ramp window
th = h * np.arange(start=-lh, stop=lh + 1, step=1)
# compute derivative of window
dh = dwindow(h)
# make a time list of indexes
tlst = np.arange(start=0, stop=nx, step=tstep)
nt = len(tlst)
# make frequency list for plotting
flst = np.fft.fftfreq(nfbins, 1. / df)[:nfbins / 2]
# initialize some time-frequency arrays
tfh = np.zeros((nfbins, nt), dtype='complex128')
tfth = np.zeros((nfbins, nt), dtype='complex128')
tfdh = np.zeros((nfbins, nt), dtype='complex128')
# compute components for reassignment
for ii, tt in enumerate(tlst):
# create a time shift list
tau = np.arange(start=-min([np.round(nx / 2.), lh, tt - 1]),
stop=min([np.round(nx / 2.), lh, nx - tt - 1]) + 1)
# compute the frequency spots to be calculated
ff = np.remainder(nfbins + tau, nfbins)
# make lists of data points for each window calculation
xlst = tt + tau
hlst = lh + tau
normh = np.sqrt(np.sum(abs(h[hlst])**2))
tfh[ff, ii] = fx[xlst] * h[hlst].conj() / normh
tfth[ff, ii] = fx[xlst] * th[hlst].conj() / normh
tfdh[ff, ii] = fx[xlst] * dh[hlst].conj() / normh
# compute Fourier Transform
spech = np.fft.fft(tfh, axis=0)
specth = np.fft.fft(tfth, axis=0)
specdh = np.fft.fft(tfdh, axis=0)
# get only positive frequencies
spech = spech[nfbins / 2:, :]
specth = specth[nfbins / 2:, :]
specdh = specdh[nfbins / 2:, :]
# check to make sure no spurious zeros floating around
szf = np.where(abs(spech) < 1.E-6)
spech[szf] = 0.0 + 0.0j
zerofind = np.nonzero(abs(spech))
twspec = np.zeros((nfbins / 2, nt), dtype='float')
dwspec = np.zeros((nfbins / 2, nt), dtype='float')
twspec[zerofind] = np.round(np.real(specth[zerofind] / spech[zerofind]))
dwspec[zerofind] = np.round(np.imag((nfbins / 2.) * specdh[zerofind] /
spech[zerofind]) / (np.pi))
# get shape of spectrogram
nf, nt = spech.shape
#-----calculate s-method-----
Llst = np.arange(start=-L / 2 + 1, stop=L / 2 + 1, step=1, dtype='int')
# make and empty array of zeros
sm = np.zeros_like(spech)
# put values where L cannot be value of L, near top and bottom
sm[0:L / 2, :] = abs(spech[0:L / 2, :])**2
sm[-L / 2:, :] = abs(spech[-L / 2:, :])**2
# calculate s-method
for ff in range(L / 2, nf - L / 2 - 1):
sm[ff, :] = 2 * np.real(np.sum(spech[ff + Llst, :] * spech[ff - Llst, :].conj(),
axis=0)) / L
#------compute reassignment-----
rtfarray = np.zeros((nfbins / 2, nt))
threshold = thresh * np.max(abs(sm))
for nn in range(nt):
for kk in range(nf):
if abs(spech[kk, nn]) > threshold:
# get center of gravity index in time direction from
# spectrogram
nhat = int(nn + twspec[kk, nn])
nhat = int(min([max([nhat, 1]), nt - 1]))
# get center of gravity index in frequency direction from spec
khat = int(kk - dwspec[kk, nn])
khat = int(np.remainder(np.remainder(khat - 1, nfbins / 2) + nfbins / 2,
nfbins / 2))
rtfarray[khat, nhat] = rtfarray[khat, nhat] + abs(sm[kk, nn])
else:
rtfarray[kk, nn] = rtfarray[kk, nn] + sm[kk, nn]
# place values where L cannot be L
rtfarray[:L / 2, :] = abs(sm[:L / 2, :])
rtfarray[-L / 2:, :] = abs(sm[-L / 2:, :])
tz = np.where(rtfarray == 0)
rtfarray[tz] = 1.0
tz = np.where(sm == 0.0)
sm[tz] = 1.0
# scale
rtfarray = abs(rtfarray)
return rtfarray, tlst, flst, sm
def plottf(tfarray, tlst, flst, fignum=1, starttime=0, timeinc='hrs',
dt=1.0, title=None, vmm=None, cmap=None, aspect=None, interpolation=None,
cbori=None, cbshrink=None, cbaspect=None, cbpad=None, powscale='log',
normalize='n', yscale='log', period='n'):
"""plottf(tfarray,tlst,flst,fignum=1) will plot a calculated tfarray with
limits corresponding to tlst and flst.
Inputs:
starttime = starttime measured in timeincrement
tinc = 'hrs','min' or 'sec'
vmm = [vmin,vmax] a list for min and max
title = title string
cmap = colormap scheme default is jet, type help on matplotlib.cm
aspect = aspect of plot, default is auto, can be 'equal' or a scalar
interpolation = type of color interpolation, type help on
matplotlib.pyplot.imshow
cbori = colorbar orientation 'horizontal' or 'vertical'
cbshrink = percentage of 1 for shrinking colorbar
cbaspect = aspect ratio of long to short dimensions
cbpad = pad between colorbar and axis
powscale = linear or log for power
normalize = y or n, yes for normalization, n for no
yscale = linear or log plot yscale
period = 'y' or 'n' to plot in period instead of frequency
Outputs:
plot
"""
# time increment
if timeinc == 'hrs':
tinc = 3600 / dt
elif timeinc == 'min':
tinc = 60 / dt
elif timeinc == 'sec':
tinc = 1 / dt
else:
raise ValueError(timeinc + 'is not defined')
# colormap
if cmap is None:
cmap = 'jet'
else:
cmap = cmap
# aspect ratio
if aspect is None:
aspect = 'auto'
else:
aspect = aspect
# interpolation
if interpolation is None:
interpolation = 'gaussian'
else:
interpolation = interpolation
# colorbar orientation
if cbori is None:
cbori = 'vertical'
else:
cbori = cbori
# colorbar shinkage
if cbshrink is None:
cbshrink = .8
else:
cbshrink = cbshrink
# colorbar aspect
if cbaspect is None:
cbaspect = 20
else:
cbaspect = cbaspect
# colorbar pad
if cbpad is None:
cbpad = .05
else:
cbpad = cbpad
# scale
if powscale == 'log':
zerofind = np.where(abs(tfarray) == 0)
tfarray[zerofind] = 1.0
if normalize == 'y':
plottfarray = 10 * np.log10(abs(tfarray / np.max(abs(tfarray))))
else:
plottfarray = 10 * np.log10(abs(tfarray))
elif powscale == 'linear':
if normalize == 'y':
plottfarray = abs(tfarray / np.max(abs(tfarray)))
else:
plottfarray = abs(tfarray)
#period or frequency
if period == 'y':
flst[1:] = 1. / flst[1:]
flst[0] = 2 * flst[1]
elif period == 'n':
pass
# set properties for the plot
plt.rcParams['font.size'] = 9
plt.rcParams['figure.subplot.left'] = .12
plt.rcParams['figure.subplot.right'] = .99
plt.rcParams['figure.subplot.bottom'] = .12
plt.rcParams['figure.subplot.top'] = .96
plt.rcParams['figure.subplot.wspace'] = .25
plt.rcParams['figure.subplot.hspace'] = .20
# set the font dictionary
fdict = {'size': 10, 'weight': 'bold'}
# make a meshgrid if yscale is logarithmic
if yscale == 'log':
logt, logf = np.meshgrid(tlst, flst)
# make figure
fig1 = plt.figure(fignum, [10, 10], dpi=300)
ax = fig1.add_subplot(1, 1, 1)
if vmm is not None:
vmin = vmm[0]
vmax = vmm[1]
# add in log yscale
if yscale == 'log':
# need to flip the matrix so that origin is bottom right
cbp = ax.pcolormesh(logt, logf, np.flipud(plottfarray),
cmap=cmap, vmin=vmin, vmax=vmax)
ax.semilogy()
ax.set_ylim(flst[1], flst[-1])
ax.set_xlim(tlst[0], tlst[-1])
cb = plt.colorbar(cbp, orientation=cbori, shrink=cbshrink, pad=cbpad,
aspect=cbaspect)
else:
plt.imshow(plottfarray, extent=(tlst[0] / tinc + starttime,
tlst[-1] / tinc + starttime, flst[1], flst[-1]), aspect=aspect,
vmin=vmin, vmax=vmax, cmap=cmap,
interpolation=interpolation)
cb = plt.colorbar(orientation=cbori, shrink=cbshrink, pad=cbpad,
aspect=cbaspect)
else:
if yscale == 'log':
cbp = ax.pcolormesh(logt, logf, np.flipud(plottfarray),
cmap=cmap)
ax.semilogy()
ax.set_ylim(flst[1], flst[-1])
ax.set_xlim(tlst[0], tlst[-1])
cb = plt.colorbar(cbp, orientation=cbori, shrink=cbshrink, pad=cbpad,
aspect=cbaspect)
else:
plt.imshow(plottfarray, extent=(tlst[0] / tinc + starttime,
tlst[-1] / tinc + starttime, flst[1], flst[-1]), aspect=aspect,
cmap=cmap, interpolation=interpolation)
cb = plt.colorbar(orientation=cbori, shrink=cbshrink, pad=cbpad,
aspect=cbaspect)
ax.set_xlabel('time(' + timeinc + ')', fontdict=fdict)
if period == 'y':
ax.set_ylabel('period (s)', fontdict=fdict)
else:
ax.set_ylabel('frequency (Hz)', fontdict=fdict)
if title is not None:
ax.set_title(title, fontdict=fdict)
plt.show()
def plotAll(fx, tfarray, tlst, flst, fignum=1, starttime=0, timeinc='hrs',
dt=1.0, title=None, vmm=None, cmap=None, aspect=None, interpolation=None,
cbori=None, cbshrink=None, cbaspect=None, cbpad=None, normalize='n',
scale='log'):
"""plottf(tfarray,tlst,flst,fignum=1) will plot a calculated tfarray with
limits corresponding to tlst and flst. Can have:
Inputs:
starttime = starttime measured in timeincrement
timeincrement = 'hrs','min' or 'sec'
vmm = [vmin,vmax] a list for min and max
title = title string
cmap = colormap scheme default is jet, type help on matplotlib.cm
aspect = aspect of plot, default is auto, can be 'equal' or a scalar
interpolation = type of color interpolation, type help on
matplotlib.pyplot.imshow
cbori = colorbar orientation 'horizontal' or 'vertical'
cbshrink = percentage of 1 for shrinking colorbar
cbaspect = aspect ratio of long to short dimensions
cbpad = pad between colorbar and axis
normalization = y or n, y for normalization n for none
Outputs:
plot
"""
# time increment
if timeinc == 'hrs':
tinc = 3600 / dt
elif timeinc == 'min':
tinc = 60 / dt
elif timeinc == 'sec':
tinc = 1 / dt
else:
raise ValueError(timeinc + 'is not defined')
# colormap
if cmap is None:
cmap = 'jet'
else:
cmap = cmap
# aspect ratio
if aspect is None:
aspect = 'auto'
else:
aspect = aspect
# interpolation
if interpolation is None:
interpolation = 'gaussian'
else:
interpolation = interpolation
# colorbar orientation
if cbori is None:
cbori = 'vertical'
else:
cbori = cbori
# colorbar shinkage
if cbshrink is None:
cbshrink = .99
else:
cbshrink = cbshrink
# colorbar aspect
if cbaspect is None:
cbaspect = 20
else:
cbaspect = cbaspect
# colorbar pad
if cbpad is None:
cbpad = .1
else:
cbpad = cbpad
# scale
if scale == 'log':
zerofind = np.where(abs(tfarray) == 0)
tfarray[zerofind] = 1.0
if normalize == 'y':
plottfarray = 20 * np.log10(abs(tfarray / np.max(abs(tfarray))))
else:
plottfarray = 20 * np.log10(abs(tfarray))
elif scale == 'linear':
if normalize == 'y':
plottfarray = abs(plottfarray / np.max(abs(plottfarray)))**2
else:
plottfarray = abs(tfarray)**2
t = np.arange(len(fx)) * dt + starttime * dt
FX = np.fft.fft(padzeros(fx))
FXfreq = np.fft.fftfreq(len(FX), dt)
# set some plot parameters
plt.rcParams['font.size'] = 10
plt.rcParams['figure.subplot.left'] = .13
plt.rcParams['figure.subplot.right'] = .98
plt.rcParams['figure.subplot.bottom'] = .07
plt.rcParams['figure.subplot.top'] = .96
plt.rcParams['figure.subplot.wspace'] = .25
plt.rcParams['figure.subplot.hspace'] = .20
# plt.rcParams['font.family']='helvetica'
fig = plt.figure(fignum)
plt.clf()
# plot FFT of fx
fax = fig.add_axes([.05, .25, .1, .7])
plt.semilogx(abs(FX[0:len(FX) / 2] / max(abs(FX))),
FXfreq[0:len(FX) / 2], '-k')
plt.axis('tight')
plt.ylim(0, FXfreq[len(FX) / 2 - 1])
# fax.xaxis.set_major_locator(MultipleLocator(.5))
# plot TFD
pax = fig.add_axes([.25, .25, .75, .7])
if vmm is not None:
vmin = vmm[0]
vmax = vmm[1]
plt.imshow(plottfarray, extent=(tlst[0] / tinc, tlst[-1] / tinc,
flst[0], flst[-1]), aspect=aspect, vmin=vmin, vmax=vmax, cmap=cmap,
interpolation=interpolation)
else:
plt.imshow(plottfarray, extent=(tlst[0] / tinc, tlst[-1] / tinc,
flst[0], flst[-1]), aspect=aspect, cmap=cmap,
interpolation=interpolation)
plt.xlabel('Time(' + timeinc + ')', fontsize=12, fontweight='bold')
plt.ylabel('Frequency (Hz)', fontsize=12, fontweight='bold')
if title is not None:
plt.title(title, fontsize=14, fontweight='bold')
plt.colorbar(
orientation=cbori,
shrink=cbshrink,
pad=cbpad,
aspect=cbaspect)
# plot timeseries
tax = fig.add_axes([.25, .05, .60, .1])
plt.plot(t, fx, '-k')
plt.axis('tight')
plt.show()
def stfbss(X, nsources=5, ng=2**5 - 1, nh=2**9 - 1, tstep=2**6 - 1, df=1.0, nfbins=2**10,
tftol=1.E-8, normalize=True):
"""
btfssX,nsources=5,ng=2**5-1,nh=2**9-1,tstep=2**6-1,df=1.0,nfbins=2**10,
tftol=1.E-8,normalize=True)
estimates sources using a blind source algorithm based on spatial
time-frequency distributions. At the moment this algorithm uses the SPWVD
to estimate TF distributions.
Inputs:
X = m x n array of time series, where m is number of time series and n
is length of each time series
nsources = number of estimated sources
ng = frequency window length
nh = time window length
tstep = time step increment
df = sampling frequency (Hz)
nfbins = number of frequencies
tftol = tolerance for a time-frequency point to be estimated as a cross
term or as an auto term, the higher the number the more auto
terms.
normalization = True or False, True to normalize, False if already
normalized
Outputs:
Se = estimated individual signals up to a permutation and scale
Ae = estimated mixing matrix as X=A*S
"""
# get shape of timeseries matrix,
# m=number of channels
# tlen=length of timeseries
m, maxn = X.shape
n = nsources
# get number of time bins
ntbins = int(float(maxn) / tstep)
tfkwargs = {'ng': ng, 'nh': nh, 'df': df, 'nfbins': nfbins, 'tstep': tstep}
# remove dc component from time series and normalize
if normalize == True:
for ii in range(m):
X[ii, :] = X[ii, :] - np.mean(X[ii, :])
X[ii, :] = X[ii, :] / X[ii, :].std()
#=========================================================================
# Whiten data and Compute Whitening matrix
#=========================================================================
# whiten data to get a unitary matrix with unit variance and zero mean
# compute covariance matrix
Rxx = (np.dot(X, X.T)) / float(maxn)
# calculate eigen decomposition
[l, u] = np.linalg.eig(Rxx)
# sort eigenvalues from smallest to largest assuming largest are sources and
# smallest are noise
lspot = l.argsort()
eigval = l[lspot]
eigvec = u[:, lspot]
# calculate the noise variance as mean of non-principal components
sigman = np.mean(eigval[0:m - n])
# compute scaling factor for whitening matrix
wscale = 1 / np.sqrt(eigval[m - n:m] - sigman)
# compute whitening matrix
W = np.zeros((m, n))
for kk in range(n):
W[:, kk] = wscale[kk] * eigvec[:, m - n + kk].T
W = W.T
# compute whitened signal vector. Note the dimensionality is reduced from [mxn]
# to [nxn] making the computation simpler.
Z = np.dot(W, X)
#=========================================================================
# Compute Spatial Time Frequency Distribution
#=========================================================================
stfd = np.zeros((n, n, nfbins, ntbins + 1), dtype='complex128')
Za = np.array(Z.copy())
# compute auto terms
for ii in range(n):
pswvd, tswvd, fswvd = spwvd(Za[ii].reshape(maxn), **tfkwargs)
stfd[ii, ii, :, :] = pswvd
# compute cross terms
for jj in range(n):
for kk in range(jj, n):
pswvd, tswvd, fswvd = spwvd([Za[jj].reshape(maxn), Za[kk].reshape(maxn)],
**tfkwargs)
stfd[jj, kk, :, :] = pswvd
stfd[kk, jj, :, :] = pswvd.conj()
#=========================================================================
# Compute criteria for cross terms
#=========================================================================
stfdTr = np.zeros((nfbins, ntbins))
C = np.zeros((nfbins, ntbins))
for ff in range(nfbins):
for tt in range(ntbins):
# compensate for noise
stfd[:, :, ff, tt] = stfd[:, :, ff, tt] - \
sigman * np.matrix(W) * np.matrix(W.T)
# compute the trace
stfdTr[ff, tt] = abs(np.trace(stfd[:, :, ff, tt]))
# compute mean over entire t-f plane
trmean = stfdTr.mean()
# find t-f points that meet the criteria
fspot, tspot = np.nonzero(stfdTr > trmean)
for ll in range(len(fspot)):
treig = abs(np.linalg.eig(stfd[:, :, fspot[ll], tspot[ll]])[0])
if sum(treig) != 0 and sum(treig) > tftol:
C[fspot[ll], tspot[ll]] = max(treig) / sum(treig)
else:
C[fspot[ll], tspot[ll]] = 0
# compute gradients and jacobi matrices
negjacobi = np.zeros((nfbins, ntbins))
smallgrad = np.zeros((nfbins, ntbins))
maxpoints = np.zeros((nfbins, ntbins))
gradt, gradf = np.gradient(C)
Jtt, Jtf = np.gradient(gradt)
Jft, Jff = np.gradient(gradf)
# get points when L2 of gradient is smaller than tolerance level
smallgrad = np.where(np.sqrt(gradt**2 + gradf**2) < tftol, 1, 0)
# get points where the Jacobi is negative definite
detjacobi = Jtt * Jff - Jtf * Jft
negjacobi = np.where(detjacobi > 0, 1, 0) * np.where(Jtt < 0, 1, 0)\
* np.where((Jtt + Jff) < 0, 1, 0)
maxpoints = smallgrad * negjacobi
gfspot, gtspot = np.nonzero(maxpoints)
ntfpoints = len(gfspot)
if ntfpoints == 0:
raise ValueError('Found no tf points, relax tolerance')
else:
print 'Found ' + str(ntfpoints) + ' t-f points'
for rr in range(ntfpoints):
if rr == 0:
Rjd = stfd[:, :, gfspot[rr], gtspot[rr]]
else:
Rjd = np.concatenate(
(Rjd, stfd[:, :, gfspot[rr], gtspot[rr]]), axis=1)
Rjd = np.array(Rjd)
#=========================================================================
# Calculate Joint Diagonalization
#=========================================================================
# get size of array of matrices to be diagonalized
mtf, nm = Rjd.shape # mtf is number of t-f points, nm is number of matrices
# set up some initial parameters
V = np.eye(mtf)
# update boolean
encore = True
# Total number of rotations
updates = 0
sweep = 0
# print 'Computing Joint Diagonalization'
# Joint diagonalization proper
# ============================
while encore:
# reset some parameters
encore = False
sweep += 1
upds = 0
Vkeep = V
for p in range(mtf):
for q in range(p + 1, mtf):
# set up indices
qi = np.arange(start=q, stop=nm, step=mtf)
pi = np.arange(start=p, stop=nm, step=mtf)
# computation of Givens angle
g = np.array([Rjd[p, pi] - Rjd[q, qi], Rjd[p, qi], Rjd[q, pi]])
gg = np.real(np.dot(g, g.T))
ton = gg[0, 0] - gg[1, 1]
toff = gg[0, 1] + gg[1, 0]
theta = 0.5 * np.arctan2(toff, ton + np.sqrt(ton**2 + toff**2))
# Givens update
if abs(theta) > tftol:
encore = True
upds += 1
c = np.cos(theta)
s = np.sin(theta)
G = np.matrix([[c, -s], [s, c]])
pair = np.array([p, q])
V[:, pair] = V[:, pair] * G
Rjd[pair, :] = G.T * Rjd[pair, :]
Rjd[:, np.concatenate([pi, qi])] = np.append(
c * Rjd[:, pi] + s * Rjd[:, qi],
-s * Rjd[:, pi] + c * Rjd[:, qi], axis=1)
updates += upds
print 'Updated ' + str(updates) + ' times.'
# compute estimated signal matrix
Se = np.dot(V.T, Z)
# compute estimated mixing matrix
Ae = np.dot(np.linalg.pinv(W), V)
return Se, Ae
| gpl-3.0 |
pv/scikit-learn | examples/mixture/plot_gmm.py | 248 | 2817 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
gmm = mixture.GMM(n_components=5, covariance_type='full')
gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([(gmm, 'GMM'),
(dpgmm, 'Dirichlet Process GMM')]):
splot = plt.subplot(2, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.show()
| bsd-3-clause |
jwbono/PyNFG | pynfg/levelksolutions/mcrl.py | 1 | 12877 | # -*- coding: utf-8 -*-
"""
Implements Monte Carlo Reinforcement Learning for iterSemiNFG objects
Created on Mon Feb 18 09:03:32 2013
Copyright (C) 2013 James Bono
GNU Affero General Public License
"""
from __future__ import division
import time
import copy
import numpy as np
import matplotlib.pylab as plt
from pynfg.utilities.utilities import iterated_input_dict
import warnings
import sys
class EWMA_MCRL(object):
"""
Finds the **uncoordinated** best policy using reinforcement learning.
:arg Game: The iterated semi-NFG on which to perform the RL
:type Game: iterSemiNFG
:arg specs: A nested dictionary containing specifications of the
game. See below for details
:type specs: dict
The specs dictionary is a triply nested dictionary. The first
level of keys is player names. For each player there is an entry with key
Level : int
The player's level
The rest of the entries are basenames. The value of each basename is a
dictionary containing:
J : int, list, or np.array
The number of runs per training episode. If a schedule is desired, enter a list or np.array with size equal to N.
N : int
The number of training episodes
L0Dist : ndarray, str, None
If ndarray, then the level 0 CPT is set to
L0Dist. If L0Dist is 'uniform', then all Level 0 CPTs are set to
the uniform distribution. If L0Dist is None, then the level 0 CPT
is set to the CPT of the inputted game.
alpha : int, list or np.array
The exponential weight for the moving average. If a schedule is
desired, enter a list or np.array with size equal to N
delta : float
The discount factor
eps : float
The maximum step-size for policy improvements
pureout : bool
if True, the policy is turned into a pure policy at the end
of training by assigning argmax actions prob 1. Default is False
"""
def __init__(self, Game, specs):
self.Game = copy.deepcopy(Game)
self.specs = specs
self.trained_CPTs = {}
self.figs = {}
for player in Game.players:
basenames = set(map(lambda x: x.basename, Game.partition[player]))
for bn in basenames:
self.figs[bn]={}
self.trained_CPTs[player] = {}
self.trained_CPTs[player][bn] = {}
self.trained_CPTs[player][bn][0] = self._set_L0_CPT()
self.high_level = max(map(lambda x: self.specs[x]['Level'], Game.players))
def _set_L0_CPT(self):
""" Sets the level 0 CPT"""
Game = self.Game
ps = self.specs
for player in ps:
basenames = set(map(lambda x: x.basename, Game.partition[player]))
for bn in basenames:
if ps[player][bn]['L0Dist'] == 'uniform':
return Game.bn_part[bn][0].uniformCPT(setCPT=False)
elif ps[player][bn]['L0Dist'] is None:
warnings.warn("No entry for L0Dist for player %s,\
setting to current CPT" % player)
return Game.bn_part[bn][0].CPT
elif type(ps[player][bn]['L0Dist']) == np.ndarray:
return ps[player][bn]['L0Dist']
def train_node(self, bn, level, setCPT=False):
""" Use EWMA MC RL to approximate the optimal CPT at bn given Game
:arg bn: the basename of the node with the CPT to be trained
:type bn: str
:arg level: The level at which to train the basename
:type level: int
"""
sys.stdout.write('\r')
print 'Training ' + bn + ' at level '+ str(level)
specs = self.specs
Game = copy.deepcopy(self.Game)
player = Game.bn_part[bn][0].player
basedict = specs[player][bn]
J, N, alpha, delta, eps, pureout = basedict['J'], basedict['N'], \
basedict['alpha'], basedict['delta'], basedict['eps'], \
basedict['pureout']
#Set other CPTs to level-1. Works even if CPTs aren't pointers.
for o_player in Game.players:
bn_list = list(set(map(lambda x: x.basename, Game.partition[o_player])))
for base in bn_list:
if base != bn:
for dn in Game.bn_part[base]:
try:
dn.CPT = \
(self.trained_CPTs[o_player][base][level - 1])
except KeyError:
raise KeyError('Need to train other players at level %s'
% str(level-1))
# initializing training schedules from scalar inputs
if isinstance(J, (int)):
J = J*np.ones(N)
if isinstance(alpha, (int, long, float)):
alpha = alpha*np.ones(N)
if isinstance(eps, (int, long, float)):
eps = eps*np.ones(N)
# getting shorter/more descriptive variable names to work with
T0 = Game.starttime
T = Game.endtime+1
shape = Game.bn_part[bn][0].CPT.shape
shape_last = shape[-1]
for dn in Game.bn_part[bn]: # pointing all CPTs to T0, i.e. single policy
dn.CPT = Game.bn_part[bn][0].CPT
visit = set() # dict of the messages and mapairs visited throughout training
R = 0 # average reward with initial value of zero
A = 0 # normalizing constant for average reward
B = {} # dict associates messages and mapairs with beta exponents
D = {} # dict associates messages and mapairs with norm constants for Q,V
Q = np.zeros(shape) # Qtable
V = np.zeros(shape[:-1]) # Value table
Rseries = np.zeros(N) # tracking average reward for plotting convergence
np.seterr(invalid='ignore', divide='ignore')
for n in xrange(N):
sys.stdout.write('\r')
sys.stdout.write('Iteration ' + str(n))
sys.stdout.flush()
indicaten = np.zeros(Q.shape) # indicates visited mapairs
visitn = set() # dict of messages and mapairs visited in episode n
Rseries[n] = R # adding the most recent ave reward to the data series
A *= alpha[n] # rescaling A at start of new episode, see writeup
for j in xrange(int(J[n])):
visitj = set() # visitj must be cleared at the start of every run
for t in xrange(T0, T):
#import pdb; pdb.set_trace()
#Game.bn_part[bn][t-T0].CPT = copy.copy(Game.bn_part[bn][0].CPT)
Game.sample_timesteps(t, t) # sampling the timestep
rew = Game.reward(player, t) # getting the reward
mapair = Game.bn_part[bn][t-T0].get_CPTindex()
A += 1
r = R
R = (1/A)*((A-1)*r+rew)
xm = set() # used below to keep track of updated messages
for values in visitj:
b = B[values] # past values
d = D[values]
q = Q[values]
bb = (b+1) # update equations double letters are time t
dd = d+1
qq = (1/dd)*(d*q+(delta**(bb-1))*(rew))
B[values] = bb # update dictionaries
D[values] = dd
Q[values] = qq
message = values[:-1] # V indexed by message only
if message not in xm: # updating message only once
b = B[message] # past values
d = D[message]
v = V[message]
bb = (b+1) # update equations double letters are time t
dd = d+1
vv = (1/dd)*(d*v+(delta**(bb-1))*(rew))
B[message] = bb # update dictionaries
D[message] = dd
V[message] = vv
xm.add(message) # so that message isn't updated again
if mapair not in visitj: # first time in j visiting mapair
message = mapair[:-1]
messtrue = (message not in xm) # for checking message visited
B[mapair] = 1 # whenever mapair not in visitj
if mapair not in visitn and mapair not in visit:
D[mapair] = 1
Q[mapair] = rew
if messtrue:
D[message] = 1
V[message] = rew
elif mapair not in visitn:
D[mapair] = alpha[n]*D[mapair]+1
Q[mapair] = (1/D[mapair])*((D[mapair]-1)*Q[mapair]
+(rew))
if messtrue:
D[message] = alpha[n]*D[message]+1
V[message] = (1/D[message])*((D[message]-1)*\
V[message]+(rew))
else:
D[mapair] += 1
Q[mapair] = (1/D[mapair])*((D[mapair]-1)*Q[mapair]\
+ (rew))
if messtrue:
D[message] += 1
V[message] = (1/D[message])*((D[message]-1) *
V[message]+(rew))
if messtrue:
B[message] = 1
visit.add(mapair) # mapair added to visit sets the first time
visitn.add(mapair)
visitj.add(mapair)
indicaten[mapair] = 1 # only visited actions are updated
# update CPT with shift towards Qtable argmax actions.
shift = Q-V[...,np.newaxis]
idx = np.nonzero(shift) # indices of nonzero shifts (avoid divide by 0)
# normalizing shifts to be a % of message's biggest shift
shiftnorm = np.absolute(shift).max(axis=-1)[...,np.newaxis]
# for each mapair shift only eps% of the percent shift
updater = eps[n]*indicaten*Game.bn_part[bn][0].CPT/shiftnorm
# increment the CPT
Game.bn_part[bn][0].CPT[idx] += updater[idx]*shift[idx]
# normalize after the shift
CPTsum = Game.bn_part[bn][0].CPT.sum(axis=-1)
Game.bn_part[bn][0].CPT /= CPTsum[...,np.newaxis]
if pureout: #if True, output is a pure policy
Game.bn_part[bn][0].makeCPTpure()
self.trained_CPTs[player][bn][level] = Game.bn_part[bn][0].CPT
if setCPT:
for node in self.Game.bn_part[bn]:
node.CPT = Game.bn_part[bn][0].CPT
for tau in xrange(1, T-T0): #before exit, make CPTs independent in memory
Game.bn_part[bn][tau].CPT = copy.copy(Game.bn_part[bn][0].CPT)
plt.figure()
plt.plot(Rseries, label = str(bn + ' Level ' + str(level)))
#plotting rseries to gauge convergence
plt.legend()
fig = plt.gcf()
self.figs[bn][str(level)] = fig
sys.stdout.write('\n')
def solve_game(self, setCPT=False):
"""Solves the game for given player levels"""
Game = self.Game
ps = self.specs
for level in np.arange(1, self.high_level):
for player in Game.players:
basenames = set(map(lambda x: x.basename, Game.partition[player]))
for controlled in basenames:
self.train_node(controlled, level, setCPT=setCPT)
for player in Game.players:
basenames = set(map(lambda x: x.basename, Game.partition[player]))
for controlled in basenames:
if ps[player]['Level'] == self.high_level:
self.train_node(controlled, self.high_level, setCPT=setCPT)
def mcrl_dict(Game, Level, J, N, delta, alpha=.5, eps=.2, L0Dist=None,
pureout=False):
"""
Creates the specs shell for a game to be solved using MCRL.
:arg Game: An iterated SemiNFG
:type Game: SemiNFG
.. seealso::
See the EWMA_MCRL documentation (above) for details of the optional arguments
"""
return iterated_input_dict(Game, [('Level', Level)], [('L0Dist', L0Dist), ('J', J),
('N', N), ('delta', delta),
('alpha', alpha), ('eps', eps),
('pureout', pureout)])
| agpl-3.0 |
hksonngan/pynopticon | src/em/examples/pdfestimation.py | 4 | 1418 | #! /usr/bin/env python
# Last Change: Sun Jul 22 12:00 PM 2007 J
# Example of doing pdf estimation with EM algorithm. Requires matplotlib.
import numpy as N
import pylab as P
from scikits.learn.machine.em import EM, GM, GMM
import utils
oldfaithful = utils.get_faithful()
# We want the relationship between d(t) and w(t+1), but get_faithful gives
# d(t), w(t), so we have to shift to get the "usual" faithful data
waiting = oldfaithful[1:, 1:]
duration = oldfaithful[:len(waiting), :1]
dt = N.concatenate((duration, waiting), 1)
# Scale the data so that each component is in [0..1]
dt = utils.scale(dt)
# This function train a mixture model with k components, returns the trained
# model and the BIC
def cluster(data, k, mode = 'full'):
d = data.shape[1]
gm = GM(d, k, mode)
gmm = GMM(gm)
em = EM()
em.train(data, gmm, maxiter = 20)
return gm, gmm.bic(data)
# bc will contain a list of BIC values for each model trained
bc = []
mode = 'full'
P.figure()
for k in range(1, 5):
# Train a model of k component, and plots isodensity curve
P.subplot(2, 2, k)
gm, b = cluster(dt, k = k, mode = mode)
bc.append(b)
X, Y, Z, V = gm.density_on_grid()
P.contour(X, Y, Z, V)
P.plot(dt[:, 0], dt[:, 1], '.')
P.xlabel('duration time (scaled)')
P.ylabel('waiting time (scaled)')
print "According to the BIC, model with %d components is better" % (N.argmax(bc) + 1)
| gpl-3.0 |
robin-lai/scikit-learn | examples/model_selection/plot_validation_curve.py | 229 | 1823 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
gmatteo/pymatgen | pymatgen/phonon/tests/test_plotter.py | 5 | 3805 | import json
import os
import unittest
from io import open
from pymatgen.phonon.bandstructure import PhononBandStructureSymmLine
from pymatgen.phonon.dos import CompletePhononDos
from pymatgen.phonon.plotter import PhononBSPlotter, PhononDosPlotter, ThermoPlotter
from pymatgen.util.testing import PymatgenTest
class PhononDosPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "NaCl_complete_ph_dos.json"), "r") as f:
self.dos = CompletePhononDos.from_dict(json.load(f))
self.plotter = PhononDosPlotter(sigma=0.2, stack=True)
self.plotter_nostack = PhononDosPlotter(sigma=0.2, stack=False)
def test_add_dos_dict(self):
d = self.plotter.get_dos_dict()
self.assertEqual(len(d), 0)
self.plotter.add_dos_dict(self.dos.get_element_dos(), key_sort_func=lambda x: x.X)
d = self.plotter.get_dos_dict()
self.assertEqual(len(d), 2)
def test_get_dos_dict(self):
self.plotter.add_dos_dict(self.dos.get_element_dos(), key_sort_func=lambda x: x.X)
d = self.plotter.get_dos_dict()
for el in ["Na", "Cl"]:
self.assertIn(el, d)
def test_plot(self):
# Disabling latex for testing.
from matplotlib import rc
rc("text", usetex=False)
self.plotter.add_dos("Total", self.dos)
self.plotter.get_plot(units="mev")
self.plotter_nostack.add_dos("Total", self.dos)
self.plotter_nostack.get_plot(units="mev")
class PhononBSPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "NaCl_phonon_bandstructure.json"), "r") as f:
d = json.loads(f.read())
self.bs = PhononBandStructureSymmLine.from_dict(d)
self.plotter = PhononBSPlotter(self.bs)
def test_bs_plot_data(self):
self.assertEqual(
len(self.plotter.bs_plot_data()["distances"][0]),
51,
"wrong number of distances in the first branch",
)
self.assertEqual(len(self.plotter.bs_plot_data()["distances"]), 4, "wrong number of branches")
self.assertEqual(
sum([len(e) for e in self.plotter.bs_plot_data()["distances"]]),
204,
"wrong number of distances",
)
self.assertEqual(self.plotter.bs_plot_data()["ticks"]["label"][4], "Y", "wrong tick label")
self.assertEqual(
len(self.plotter.bs_plot_data()["ticks"]["label"]),
8,
"wrong number of tick labels",
)
def test_plot(self):
# Disabling latex for testing.
from matplotlib import rc
rc("text", usetex=False)
self.plotter.get_plot(units="mev")
def test_plot_compare(self):
# Disabling latex for testing.
from matplotlib import rc
rc("text", usetex=False)
self.plotter.plot_compare(self.plotter, units="mev")
class ThermoPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "NaCl_complete_ph_dos.json"), "r") as f:
self.dos = CompletePhononDos.from_dict(json.load(f))
self.plotter = ThermoPlotter(self.dos, self.dos.structure)
def test_plot_functions(self):
# Disabling latex for testing.
from matplotlib import rc
rc("text", usetex=False)
self.plotter.plot_cv(5, 100, 5, show=False)
self.plotter.plot_entropy(5, 100, 5, show=False)
self.plotter.plot_internal_energy(5, 100, 5, show=False)
self.plotter.plot_helmholtz_free_energy(5, 100, 5, show=False)
self.plotter.plot_thermodynamic_properties(5, 100, 5, show=False, fig_close=True)
if __name__ == "__main__":
unittest.main()
| mit |
taynaud/sparkit-learn | splearn/feature_selection/tests/test_variance_threshold.py | 1 | 3481 | import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_almost_equal
from sklearn.feature_selection import VarianceThreshold
from splearn.feature_selection import SparkVarianceThreshold
from splearn.rdd import DictRDD
from splearn.utils.testing import SplearnTestCase, assert_true
from splearn.utils.validation import check_rdd_dtype
class TestVarianceThreshold(SplearnTestCase):
def test_same_variances(self):
local = VarianceThreshold()
dist = SparkVarianceThreshold()
shapes = [((10, 5), None),
((1000, 20), None),
((1000, 20), 100),
((10000, 100), None),
((10000, 100), 600)]
for shape, block_size in shapes:
X_dense, X_dense_rdd = self.make_dense_rdd()
X_sparse, X_sparse_rdd = self.make_sparse_rdd()
Z = DictRDD([X_sparse_rdd, X_dense_rdd], columns=('X', 'Y'))
local.fit(X_dense)
dist.fit(X_dense_rdd)
assert_array_almost_equal(local.variances_, dist.variances_)
local.fit(X_sparse)
dist.fit(X_sparse_rdd)
assert_array_almost_equal(local.variances_, dist.variances_)
dist.fit(Z)
assert_array_almost_equal(local.variances_, dist.variances_)
def test_same_transform_result(self):
local = VarianceThreshold()
dist = SparkVarianceThreshold()
X_dense, X_dense_rdd = self.make_dense_rdd()
X_sparse, X_sparse_rdd = self.make_sparse_rdd()
Z_rdd = DictRDD([X_sparse_rdd, X_dense_rdd], columns=('X', 'Y'))
result_local = local.fit_transform(X_dense)
result_dist = dist.fit_transform(X_dense_rdd)
assert_true(check_rdd_dtype(result_dist, (np.ndarray,)))
assert_array_almost_equal(result_local, result_dist.toarray())
result_local = local.fit_transform(X_sparse)
result_dist = dist.fit_transform(X_sparse_rdd)
assert_true(check_rdd_dtype(result_dist, (sp.spmatrix,)))
assert_array_almost_equal(result_local.toarray(),
result_dist.toarray())
result_dist = dist.fit_transform(Z_rdd)[:, 'X']
assert_true(check_rdd_dtype(result_dist, (sp.spmatrix,)))
assert_array_almost_equal(result_local.toarray(),
result_dist.toarray())
def test_same_transform_with_treshold(self):
local = VarianceThreshold(.03)
dist = SparkVarianceThreshold(.03)
X_dense, X_dense_rdd = self.make_dense_rdd()
X_sparse, X_sparse_rdd = self.make_sparse_rdd()
Z_rdd = DictRDD([X_sparse_rdd, X_dense_rdd], columns=('X', 'Y'))
result_local = local.fit_transform(X_dense)
result_dist = dist.fit_transform(X_dense_rdd)
assert_true(check_rdd_dtype(result_dist, (np.ndarray,)))
assert_array_almost_equal(result_local, result_dist.toarray())
result_local = local.fit_transform(X_sparse)
result_dist = dist.fit_transform(X_sparse_rdd)
assert_true(check_rdd_dtype(result_dist, (sp.spmatrix,)))
assert_array_almost_equal(result_local.toarray(),
result_dist.toarray())
result_dist = dist.fit_transform(Z_rdd)[:, 'X']
assert_true(check_rdd_dtype(result_dist, (sp.spmatrix,)))
assert_array_almost_equal(result_local.toarray(),
result_dist.toarray())
| apache-2.0 |
mattdelhey/rice-scrape | scrape/scrape_sched.py | 2 | 4792 | import dryscrape
import sqlalchemy
import os
import time
import sys
import numpy as np
import pandas as pd
NetID = ''
Password = ''
project_dir = '/Users/mdelhey/rice-scrape/'
YEAR_SCRAPE = '2013'
TERM_SCRAPE = 'Spring'
dbuser = 'mdelhey'
dbname = 'ricedb'
dbhost = 'localhost'
tbl_out = 't_evaluations_raw'
tbl_action = 'replace' # replace / append / fail
f_out = 'data/evals_tmp.csv'
# Boilerplate
os.chdir(project_dir)
from helpers import try_row_scrape
from helpers import login_to_sched
try: __file__
except: __file__ = 'repl'
# get crn's from sql
rdb_con = sqlalchemy.create_engine('postgresql://%s@%s/%s' % (dbuser, dbhost, dbname))
qry = """ select * from t_courses_raw2 where year = '%s' and term = '%s'; """ % (YEAR_SCRAPE, TERM_SCRAPE)
data_courses = pd.read_sql(qry, rdb_con)
# Setup dataframe
eval_cols = ['courseid', 'yearterm', 'year', 'term', 'crn', 'instructor', 'r_organization',
'r_assignments', 'r_quality', 'r_challenge', 'r_workload', 'r_satisfies', 'r_grade',
'r_pf', 'n_organization', 'n_assignments', 'n_quality', 'n_challenge', 'n_workload',
'n_satisfies', 'n_grade', 'n_pf', 'raw_title', 'raw_comments']
data_evals = pd.DataFrame(None, columns=eval_cols)
#comment_cols = ['courseid', 'yearterm', 'year', 'term', 'crn', 'instructor', 'n_comments', 'comment']
#data_comments = pd.DataFrame(None, columns=comment_cols)
# set up a web scraping session
sess = dryscrape.Session(base_url = 'http://scheduleplanner.rice.edu/')
# we don't need images
sess.set_attribute('auto_load_images', False)
# visit courses.rice.edu
print '[%s] Visiting scheduleplanner.rice.edu' % __file__
sess.visit('/')
# visit arbitrary page to login
print '[%s] Logging in to scheduleplanner' % __file__
login_to_sched(NetID, Password, sess)
# Determine term code
if TERM_SCRAPE == 'Fall':
p_term = str(int(YEAR_SCRAPE) + 1) + '10'
if TERM_SCRAPE == 'Spring':
p_term = str(int(YEAR_SCRAPE)) + '20'
if TERM_SCRAPE == 'Summer':
p_term = str(int(YEAR_SCRAPE)) + '30'
# Conver to list
crns = list(data_courses['crn'].astype(int))
crns = crns[0:25]
# time scrape
start_time = time.time()
# Loop through crn's and scrape
print '[%s] Scraping evaluations for %i classes' % (__file__, len(crns))
for idx,c in enumerate(crns):
if ((idx % 50) == 0): print '[%s] ... Class %i' % (__file__, idx)
# Generate link, navigate to it
url = '/wsSchedule/Account/CourseEvals.aspx?H=%s&T=%s' % (c, p_term)
#print url
sess.visit(url)
#sess.render('tmp.png')
# Insert basic data
row = { i: None for i in data_evals.columns }
row['yearterm'] = YEAR_SCRAPE + ' ' + TERM_SCRAPE
row['term'] = TERM_SCRAPE
row['year'] = YEAR_SCRAPE
row['crn'] = c
row['courseid'] = row['yearterm'] + '_' + str(row['crn'])
row['instructor'] = try_row_scrape('//*[@id="lblInstructor"]', sess)
row['r_organization'] = try_row_scrape('//*[@id="lblClassMean1"]', sess)
row['n_organization'] = try_row_scrape('//*[@id="lblResponses1"]', sess)
row['r_assignments'] = try_row_scrape('//*[@id="lblClassMean2"]', sess)
row['n_assignments'] = try_row_scrape('//*[@id="lblResponses2"]', sess)
row['r_quality'] = try_row_scrape('//*[@id="lblClassMean3"]', sess)
row['n_quality'] = try_row_scrape('//*[@id="lblResponses3"]', sess)
row['r_challenge'] = try_row_scrape('//*[@id="lblClassMean4"]', sess)
row['n_challenge'] = try_row_scrape('//*[@id="lblResponses4"]', sess)
row['r_workload'] = try_row_scrape('//*[@id="lblClassMean5"]', sess)
row['n_workload'] = try_row_scrape('//*[@id="lblResponses5"]', sess)
row['r_satisfies'] = try_row_scrape('//*[@id="lblClassMean6"]', sess)
row['n_satisfies'] = try_row_scrape('//*[@id="lblResponses6"]', sess)
row['r_grade'] = try_row_scrape('//*[@id="lblClassMean7"]', sess)
row['n_grade'] = try_row_scrape('//*[@id="lblResponses7"]', sess)
row['r_pf'] = try_row_scrape('//*[@id="lblClassMean8"]', sess)
row['n_pf'] = try_row_scrape('//*[@id="lblResponses8"]', sess)
row['raw_title'] = try_row_scrape('//*[@id="lblTitle"]', sess)
row['raw_comments'] = try_row_scrape('//*[@id="gvEvalsComments"]/tbody', sess)
# append row
data_evals = data_evals.append(row, ignore_index=True)
print '[%s] scrape took %.2f minutes' % (__file__, (time.time() - start_time)/60)
print '[%s] saving csv to %s' % (__file__, f_out)
data_evals.to_csv(f_out, index=False)
print '[%s] saving (action = %s) to postgres (table = %s)' % (__file__, tbl_action, tbl_out)
rdb_con = sqlalchemy.create_engine('postgresql://%s@%s/%s' % (dbuser, dbhost, dbname))
data_evals.to_sql(tbl_out, rdb_con, if_exists = tbl_action, index = False)
| mit |
yyjiang/scikit-learn | examples/calibration/plot_calibration_curve.py | 225 | 5903 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.cross_validation import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
appapantula/scikit-learn | examples/ensemble/plot_voting_decision_regions.py | 230 | 2386 | """
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
| bsd-3-clause |
f3r/scikit-learn | sklearn/feature_selection/tests/test_from_model.py | 62 | 6762 | import numpy as np
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.linear_model import LogisticRegression, SGDClassifier, Lasso
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
iris = datasets.load_iris()
data, y = iris.data, iris.target
rng = np.random.RandomState(0)
def test_transform_linear_model():
for clf in (LogisticRegression(C=0.1),
LinearSVC(C=0.01, dual=False),
SGDClassifier(alpha=0.001, n_iter=50, shuffle=True,
random_state=0)):
for thresh in (None, ".09*mean", "1e-5 * median"):
for func in (np.array, sp.csr_matrix):
X = func(data)
clf.set_params(penalty="l1")
clf.fit(X, y)
X_new = assert_warns(
DeprecationWarning, clf.transform, X, thresh)
if isinstance(clf, SGDClassifier):
assert_true(X_new.shape[1] <= X.shape[1])
else:
assert_less(X_new.shape[1], X.shape[1])
clf.set_params(penalty="l2")
clf.fit(X_new, y)
pred = clf.predict(X_new)
assert_greater(np.mean(pred == y), 0.7)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=None)
for threshold in ["gobbledigook", ".5 * gobbledigook"]:
model = SelectFromModel(clf, threshold=threshold)
model.fit(data, y)
assert_raises(ValueError, model.transform, data)
def test_input_estimator_unchanged():
"""
Test that SelectFromModel fits on a clone of the estimator.
"""
est = RandomForestClassifier()
transformer = SelectFromModel(estimator=est)
transformer.fit(data, y)
assert_true(transformer.estimator is est)
@skip_if_32bit
def test_feature_importances():
X, y = datasets.make_classification(
n_samples=1000, n_features=10, n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False, random_state=0)
est = RandomForestClassifier(n_estimators=50, random_state=0)
for threshold, func in zip(["mean", "median"], [np.mean, np.median]):
transformer = SelectFromModel(estimator=est, threshold=threshold)
transformer.fit(X, y)
assert_true(hasattr(transformer.estimator_, 'feature_importances_'))
X_new = transformer.transform(X)
assert_less(X_new.shape[1], X.shape[1])
importances = transformer.estimator_.feature_importances_
feature_mask = np.abs(importances) > func(importances)
assert_array_almost_equal(X_new, X[:, feature_mask])
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
est = RandomForestClassifier(n_estimators=50, random_state=0)
transformer = SelectFromModel(estimator=est)
transformer.fit(X, y, sample_weight=sample_weight)
importances = transformer.estimator_.feature_importances_
transformer.fit(X, y, sample_weight=3 * sample_weight)
importances_bis = transformer.estimator_.feature_importances_
assert_almost_equal(importances, importances_bis)
# For the Lasso and related models, the threshold defaults to 1e-5
transformer = SelectFromModel(estimator=Lasso(alpha=0.1))
transformer.fit(X, y)
X_new = transformer.transform(X)
mask = np.abs(transformer.estimator_.coef_) > 1e-5
assert_array_equal(X_new, X[:, mask])
def test_partial_fit():
est = PassiveAggressiveClassifier(random_state=0, shuffle=False)
transformer = SelectFromModel(estimator=est)
transformer.partial_fit(data, y,
classes=np.unique(y))
old_model = transformer.estimator_
transformer.partial_fit(data, y,
classes=np.unique(y))
new_model = transformer.estimator_
assert_true(old_model is new_model)
X_transform = transformer.transform(data)
transformer.fit(np.vstack((data, data)), np.concatenate((y, y)))
assert_array_equal(X_transform, transformer.transform(data))
def test_warm_start():
est = PassiveAggressiveClassifier(warm_start=True, random_state=0)
transformer = SelectFromModel(estimator=est)
transformer.fit(data, y)
old_model = transformer.estimator_
transformer.fit(data, y)
new_model = transformer.estimator_
assert_true(old_model is new_model)
def test_prefit():
"""
Test all possible combinations of the prefit parameter.
"""
# Passing a prefit parameter with the selected model
# and fitting a unfit model with prefit=False should give same results.
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=0)
model = SelectFromModel(clf)
model.fit(data, y)
X_transform = model.transform(data)
clf.fit(data, y)
model = SelectFromModel(clf, prefit=True)
assert_array_equal(model.transform(data), X_transform)
# Check that the model is rewritten if prefit=False and a fitted model is
# passed
model = SelectFromModel(clf, prefit=False)
model.fit(data, y)
assert_array_equal(model.transform(data), X_transform)
# Check that prefit=True and calling fit raises a ValueError
model = SelectFromModel(clf, prefit=True)
assert_raises(ValueError, model.fit, data, y)
def test_threshold_string():
est = RandomForestClassifier(n_estimators=50, random_state=0)
model = SelectFromModel(est, threshold="0.5*mean")
model.fit(data, y)
X_transform = model.transform(data)
# Calculate the threshold from the estimator directly.
est.fit(data, y)
threshold = 0.5 * np.mean(est.feature_importances_)
mask = est.feature_importances_ > threshold
assert_array_equal(X_transform, data[:, mask])
def test_threshold_without_refitting():
"""Test that the threshold can be set without refitting the model."""
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=0)
model = SelectFromModel(clf, threshold=0.1)
model.fit(data, y)
X_transform = model.transform(data)
# Set a higher threshold to filter out more features.
model.threshold = 1.0
assert_greater(X_transform.shape[1], model.transform(data).shape[1])
| bsd-3-clause |
terkkila/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 348 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
rc/sfepy | examples/large_deformation/balloon.py | 2 | 9314 | r"""
Inflation of a Mooney-Rivlin hyperelastic balloon.
This example serves as a verification of the membrane term (``dw_tl_membrane``,
:class:`TLMembraneTerm <sfepy.terms.terms_membrane.TLMembraneTerm>`)
implementation.
Following Rivlin 1952 and Dumais, the analytical relation between a
relative stretch :math:`L = r / r_0` of a thin (membrane) sphere made of the
Mooney-Rivlin material of the undeformed radius :math:`r_0`, membrane
thickness :math:`h_0` and the inner pressure :math:`p` is
.. math::
p = 4 \frac{h_0}{r_0} (\frac{1}{L} - \frac{1}{L^7}) (c_1 + c_2 L^2) \;,
where :math:`c_1`, :math:`c_2` are the Mooney-Rivlin material parameters.
In the equations below, only the surface of the domain is mechanically
important - a stiff 2D membrane is embedded in the 3D space and coincides with
the balloon surface. The volume is very soft, to simulate a fluid-filled
cavity. A similar model could be used to model e.g. plant cells. The balloon
surface is loaded by prescribing the inner volume change :math:`\omega(t)`.
The fluid pressure in the cavity is a single scalar value, enforced by the
``'integral_mean_value'`` linear combination condition.
Find :math:`\ul{u}(\ul{X})` and a constant :math:`p` such that:
- balance of forces:
.. math::
\intl{\Omega\suz}{} \left( \ull{S}\eff(\ul{u})
- p\; J \ull{C}^{-1} \right) : \delta \ull{E}(\ul{v}; \ul{v}) \difd{V}
+ \intl{\Gamma\suz}{} \ull{S}\eff(\tilde{\ul{u}}) \delta
\ull{E}(\tilde{\ul{u}}; \tilde{\ul{v}}) h_0 \difd{S}
= 0 \;, \quad \forall \ul{v} \in [H^1_0(\Omega)]^3 \;,
- volume conservation:
.. math::
\int\limits_{\Omega_0} \left[\omega(t)-J(u)\right] q\, dx = 0
\qquad \forall q \in L^2(\Omega) \;,
where
.. list-table::
:widths: 20 80
* - :math:`\ull{F}`
- deformation gradient :math:`F_{ij} = \pdiff{x_i}{X_j}`
* - :math:`J`
- :math:`\det(F)`
* - :math:`\ull{C}`
- right Cauchy-Green deformation tensor :math:`C = F^T F`
* - :math:`\ull{E}(\ul{u})`
- Green strain tensor :math:`E_{ij} = \frac{1}{2}(\pdiff{u_i}{X_j} +
\pdiff{u_j}{X_i} + \pdiff{u_m}{X_i}\pdiff{u_m}{X_j})`
* - :math:`\ull{S}\eff(\ul{u})`
- effective second Piola-Kirchhoff stress tensor
The effective stress :math:`\ull{S}\eff(\ul{u})` is given by:
.. math::
\ull{S}\eff(\ul{u}) = \mu J^{-\frac{2}{3}}(\ull{I}
- \frac{1}{3}\tr(\ull{C}) \ull{C}^{-1})
+ \kappa J^{-\frac{4}{3}} (\tr(\ull{C}\ull{I} - \ull{C}
- \frac{2}{6}((\tr{\ull{C}})^2 - \tr{(\ull{C}^2)})\ull{C}^{-1})
\;.
The :math:`\tilde{\ul{u}}` and :math:`\tilde{\ul{v}}` variables correspond to
:math:`\ul{u}`, :math:`\ul{v}`, respectively, transformed to the membrane
coordinate frame.
Use the following command to show a comparison of the FEM solution with the
above analytical relation (notice the nonlinearity of the dependence)::
python simple.py examples/large_deformation/balloon.py -d 'plot: True'
The agreement should be very good, even though the mesh is coarse.
View the results using::
python postproc.py unit_ball.h5 --wireframe -b -d'u,plot_displacements,rel_scaling=1' --step=-1
This example uses the adaptive time-stepping solver (``'ts.adaptive'``) with
the default adaptivity function :func:`adapt_time_step()
<sfepy.solvers.ts_solvers.adapt_time_step>`. Plot the used time steps by::
python script/plot_times.py unit_ball.h5
"""
from __future__ import absolute_import
import os
import numpy as nm
from sfepy.base.base import Output
from sfepy.discrete.fem import MeshIO
from sfepy.linalg import get_coors_in_ball
from sfepy import data_dir
output = Output('balloon:')
def get_nodes(coors, radius, eps, mode):
if mode == 'ax1':
centre = nm.array([0.0, 0.0, -radius], dtype=nm.float64)
elif mode == 'ax2':
centre = nm.array([0.0, 0.0, radius], dtype=nm.float64)
elif mode == 'equator':
centre = nm.array([radius, 0.0, 0.0], dtype=nm.float64)
else:
raise ValueError('unknown mode %s!' % mode)
return get_coors_in_ball(coors, centre, eps)
def get_volume(ts, coors, region=None):
rs = 1.0 + 1.0 * ts.time
rv = get_rel_volume(rs)
output('relative stretch:', rs)
output('relative volume:', rv)
out = nm.empty((coors.shape[0],), dtype=nm.float64)
out.fill(rv)
return out
def get_rel_volume(rel_stretch):
"""
Get relative volume V/V0 from relative stretch r/r0 of a ball.
"""
return nm.power(rel_stretch, 3.0)
def get_rel_stretch(rel_volume):
"""
Get relative stretch r/r0 from relative volume V/V0 of a ball.
"""
return nm.power(rel_volume, 1.0/3.0)
def get_balloon_pressure(rel_stretch, h0, r0, c1, c2):
"""
Rivlin 1952 + Dumais:
P = 4*h0/r0 * (1/L-1/L^7).*(C1+L^2*C2)
"""
L = rel_stretch
p = 4.0 * h0 / r0 * (1.0/L - 1.0/L**7) * (c1 + c2 * L**2)
return p
def plot_radius(problem, state):
import matplotlib.pyplot as plt
from sfepy.postprocess.time_history import extract_time_history
ths, ts = extract_time_history('unit_ball.h5', 'p e 0')
p = ths['p'][0]
L = 1.0 + ts.times[:p.shape[0]]
L2 = 1.0 + nm.linspace(ts.times[0], ts.times[-1], 1000)
p2 = get_balloon_pressure(L2, 1e-2, 1, 3e5, 3e4)
plt.rcParams['lines.linewidth'] = 3
plt.rcParams['text.fontsize'] = 16
plt.plot(L2, p2, 'r', label='theory')
plt.plot(L, p, 'b*', ms=12, label='FEM')
plt.title('Mooney-Rivlin hyperelastic balloon inflation')
plt.xlabel(r'relative stretch $r/r_0$')
plt.ylabel(r'pressure $p$')
plt.legend(loc='best')
fig = plt.gcf()
fig.savefig('balloon_pressure_stretch.pdf')
plt.show()
def define(plot=False):
filename_mesh = data_dir + '/meshes/3d/unit_ball.mesh'
conf_dir = os.path.dirname(__file__)
io = MeshIO.any_from_filename(filename_mesh, prefix_dir=conf_dir)
bbox = io.read_bounding_box()
dd = bbox[1] - bbox[0]
radius = bbox[1, 0]
eps = 1e-8 * dd[0]
options = {
'nls' : 'newton',
'ls' : 'ls',
'ts' : 'ts',
'save_times' : 'all',
'output_dir' : '.',
'output_format' : 'h5',
}
if plot:
options['post_process_hook_final'] = plot_radius
fields = {
'displacement': (nm.float64, 3, 'Omega', 1),
'pressure': (nm.float64, 1, 'Omega', 0),
}
materials = {
'solid' : ({
'mu' : 50, # shear modulus of neoHookean term
'kappa' : 0.0, # shear modulus of Mooney-Rivlin term
},),
'walls' : ({
'mu' : 3e5, # shear modulus of neoHookean term
'kappa' : 3e4, # shear modulus of Mooney-Rivlin term
'h0' : 1e-2, # initial thickness of wall membrane
},),
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
'p' : ('unknown field', 'pressure', 1),
'q' : ('test field', 'pressure', 'p'),
'omega' : ('parameter field', 'pressure', {'setter' : 'get_volume'}),
}
regions = {
'Omega' : 'all',
'Ax1' : ('vertices by get_ax1', 'vertex'),
'Ax2' : ('vertices by get_ax2', 'vertex'),
'Equator' : ('vertices by get_equator', 'vertex'),
'Surface' : ('vertices of surface', 'facet'),
}
ebcs = {
'fix1' : ('Ax1', {'u.all' : 0.0}),
'fix2' : ('Ax2', {'u.[0, 1]' : 0.0}),
'fix3' : ('Equator', {'u.1' : 0.0}),
}
lcbcs = {
'pressure' : ('Omega', {'p.all' : None}, None, 'integral_mean_value'),
}
equations = {
'balance'
: """dw_tl_he_neohook.2.Omega(solid.mu, v, u)
+ dw_tl_he_mooney_rivlin.2.Omega(solid.kappa, v, u)
+ dw_tl_membrane.2.Surface(walls.mu, walls.kappa, walls.h0, v, u)
+ dw_tl_bulk_pressure.2.Omega(v, u, p)
= 0""",
'volume'
: """dw_tl_volume.2.Omega(q, u)
= dw_dot.2.Omega(q, omega)""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 6,
'eps_a' : 1e-4,
'eps_r' : 1e-8,
'macheps' : 1e-16,
'lin_red' : 1e-2,
'ls_red' : 0.5,
'ls_red_warp': 0.1,
'ls_on' : 100.0,
'ls_min' : 1e-5,
'check' : 0,
'delta' : 1e-6,
'is_plot' : False,
'problem' : 'nonlinear',
}),
'ts' : ('ts.adaptive', {
't0' : 0.0,
't1' : 5.0,
'dt' : None,
'n_step' : 11,
'dt_red_factor' : 0.8,
'dt_red_max' : 1e-3,
'dt_inc_factor' : 1.25,
'dt_inc_on_iter' : 4,
'dt_inc_wait' : 3,
'verbose' : 1,
'quasistatic' : True,
}),
}
functions = {
'get_ax1' : (lambda coors, domain:
get_nodes(coors, radius, eps, 'ax1'),),
'get_ax2' : (lambda coors, domain:
get_nodes(coors, radius, eps, 'ax2'),),
'get_equator' : (lambda coors, domain:
get_nodes(coors, radius, eps, 'equator'),),
'get_volume' : (get_volume,),
}
return locals()
| bsd-3-clause |
paulthulstrup/moose | modules/tensor_mechanics/tests/drucker_prager/small_deform3.py | 23 | 3585 | #!/usr/bin/env python
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
def expected(scheme, sqrtj2):
cohesion = 10
friction_degrees = 35
tip_smoother = 8
friction = friction_degrees * np.pi / 180.0
if (scheme == "native"):
aaa = cohesion
bbb = np.tan(friction)
elif (scheme == "outer_tip"):
aaa = 2 * np.sqrt(3) * cohesion * np.cos(friction) / (3.0 - np.sin(friction))
bbb = 2 * np.sin(friction) / np.sqrt(3) / (3.0 - np.sin(friction))
elif (scheme == "inner_tip"):
aaa = 2 * np.sqrt(3) * cohesion * np.cos(friction) / (3.0 + np.sin(friction))
bbb = 2 * np.sin(friction) / np.sqrt(3) / (3.0 + np.sin(friction))
elif (scheme == "lode_zero"):
aaa = cohesion * np.cos(friction)
bbb = np.sin(friction) / 3.0
elif (scheme == "inner_edge"):
aaa = 3 * cohesion * np.cos(friction) / np.sqrt(9.0 + 3.0 * np.power(np.sin(friction), 2))
bbb = np.sin(friction) / np.sqrt(9.0 + 3.0 * np.power(np.sin(friction), 2))
return (aaa - np.sqrt(tip_smoother * tip_smoother + sqrtj2 * sqrtj2)) / bbb
def sigma_mean(stress):
return (stress[0] + stress[3] + stress[5])/3.0
def sigma_bar(stress):
mean = sigma_mean(stress)
return np.sqrt(0.5 * (np.power(stress[0] - mean, 2) + 2*stress[1]*stress[1] + 2*stress[2]*stress[2] + np.power(stress[3] - mean, 2) + 2*stress[4]*stress[4] + np.power(stress[5] - mean, 2)))
def third_inv(stress):
mean = sigma_mean(stress)
return (stress[0] - mean)*(stress[3] - mean)*(stress[5] - mean)
def lode_angle(stress):
bar = sigma_bar(stress)
third = third_inv(stress)
return np.arcsin(-1.5 * np.sqrt(3.0) * third / np.power(bar, 3)) / 3.0
def moose_result(fn):
f = open(fn)
x = []
y = []
for line in f:
if not line.strip():
continue
line = line.strip()
if line.startswith("time") or line.startswith("0"):
continue
line = map(float, line.split(","))
if line[1] < -1E-10:
continue # this is an elastic deformation
trace = 3.0 * sigma_mean(line[3:])
bar = sigma_bar(line[3:])
x.append(trace)
y.append(bar)
f.close()
return (x, y)
plt.figure()
sqrtj2 = np.arange(0, 30, 0.25)
plt.plot(expected("native", sqrtj2), sqrtj2, 'k-', label = 'expected (native)')
mr = moose_result("gold/small_deform3_native.csv")
plt.plot(mr[0], mr[1], 'k^', label = 'MOOSE (native)')
plt.plot(expected("outer_tip", sqrtj2), sqrtj2, 'g-', label = 'expected (outer_tip)')
mr = moose_result("gold/small_deform3_outer_tip.csv")
plt.plot(mr[0], mr[1], 'g^', label = 'MOOSE (outer_tip)')
plt.plot(expected("inner_tip", sqrtj2), sqrtj2, 'b-', label = 'expected (inner_tip)')
mr = moose_result("gold/small_deform3_inner_tip.csv")
plt.plot(mr[0], mr[1], 'b^', label = 'MOOSE (inner_tip)')
plt.plot(expected("lode_zero", sqrtj2), sqrtj2, 'c-', label = 'expected (lode_zero)')
mr = moose_result("gold/small_deform3_lode_zero.csv")
plt.plot(mr[0], mr[1], 'c^', label = 'MOOSE (lode_zero)')
plt.plot(expected("inner_edge", sqrtj2), sqrtj2, 'r-', label = 'expected (inner_edge)')
mr = moose_result("gold/small_deform3_inner_edge.csv")
plt.plot(mr[0], mr[1], 'r^', label = 'MOOSE (inner_edge)')
legend = plt.legend(bbox_to_anchor=(1.16, 0.95))
for label in legend.get_texts():
label.set_fontsize('small')
plt.xlabel("Tr(stress)")
plt.ylabel("sqrt(J2)")
plt.title("Drucker-Prager yield function on meridional plane")
plt.axis([-25, 15, 0, 25])
plt.savefig("small_deform3.png")
sys.exit(0)
| lgpl-2.1 |
JeanKossaifi/scikit-learn | examples/mixture/plot_gmm_selection.py | 248 | 3223 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
Connor-R/NSBL | ad_hoc/standings_comparisons/NSBL_standings_vs_teamWAR.py | 1 | 2770 | from py_db import db
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import linregress
import argparse
import csv
import NSBL_helpers as helper
import os
# Investigating how well yearly observed teamWAR correlates to yearly observed wins within the sim
db = db('NSBL')
def initiate():
path = os.getcwd()+'/'
fip_Wins = []
era_Wins = []
pythag_Wins = []
observed_Wins = []
process(fip_Wins, era_Wins, pythag_Wins, observed_Wins)
plot(pythag_Wins, observed_Wins, path, 'pythag_wins', 'observed_wins')
plot(fip_Wins, observed_Wins, path, 'fip_wins', 'observed_wins')
plot(fip_Wins, pythag_Wins, path, 'fip_wins', 'pythag_wins')
plot(era_Wins, observed_Wins, path, 'era_wins', 'observed_wins')
plot(era_Wins, pythag_Wins, path, 'era_wins', 'pythag_wins')
def process(f_wins_list, r_wins_list, pythag_wins_list, wins_list):
teamWAR_q = """SELECT
year,
team_name,
games_played,
f_wins-0.25*dwar,
r_wins-0.25*dwar,
py_wins,
w
FROM processed_team_standings_advanced
JOIN (SELECT YEAR, team_name, MAX(games_played) AS 'games_played' FROM processed_team_standings_advanced GROUP BY YEAR, team_name) a USING (YEAR, team_name, games_played)
where games_played > 150
"""
team_WAR_qry = teamWAR_q
team_WAR_list = db.query(team_WAR_qry)
for team in team_WAR_list:
year, team_name, games_played, f_wins, r_wins, pythag_wins, w = team
f_wins_list.append(float(f_wins))
r_wins_list.append(float(r_wins))
pythag_wins_list.append(float(pythag_wins))
wins_list.append(float(w))
# print year, team_name, repWAR, fWAR, rWAR
# print f_wins, r_wins, pythag_wins, w
# print '\n'
def plot(x_list, y_list, path, x_name='x_title', y_name='y_title'):
size = len(x_list)
ay_min = 0.0
ay_max = 162.0
ax_min = 0.0
ax_max = 162.0
ylims = [ay_min,ay_max]
xlims = [ax_min,ax_max]
fit = linregress(x_list,y_list)
label = '$slope = ' + str(fit.slope) + '$ \n $r^2 = ' + str(fit.rvalue) + '$'
data = pd.DataFrame(
{x_name:x_list,
y_name:y_list
})
ax = sns.regplot(x=x_name, y=y_name, data=data, ci=None)
title_str = x_name + ' vs ' + y_name + ': Sample Size = '
ax.set_title(title_str + str(size))
figtit = path+"NSBL_standings_%s_vs_%s.png" % (x_name, y_name)
ax.plot(xlims, ylims, linestyle='dashed', alpha=0.9, zorder=0, color='black')
ax.text(ax_min + ((ax_max-ax_min)/20), ay_max - ((ay_max-ay_min)/10), label, style='normal')
ax.set_xlim(xlims)
ax.set_ylim(ylims)
fig = ax.get_figure()
fig.savefig(figtit)
fig.clf()
if __name__ == "__main__":
initiate()
| mit |
dvro/UnbalancedDataset | imblearn/under_sampling/instance_hardness_threshold.py | 2 | 7926 | """Class to perform under-sampling based on the instance hardness
threshold."""
from __future__ import print_function
from __future__ import division
import numpy as np
from collections import Counter
from sklearn.cross_validation import StratifiedKFold
from ..base import BaseBinarySampler
ESTIMATOR_KIND = ('knn', 'decision-tree', 'random-forest', 'adaboost',
'gradient-boosting', 'linear-svm')
class InstanceHardnessThreshold(BaseBinarySampler):
"""Class to perform under-sampling based on the instance hardness
threshold.
Parameters
----------
estimator : str, optional (default='linear-svm')
Classifier to be used in to estimate instance hardness of the samples.
The choices are the following: 'knn',
'decision-tree', 'random-forest', 'adaboost', 'gradient-boosting'
and 'linear-svm'.
ratio : str or float, optional (default='auto')
If 'auto', the ratio will be defined automatically to balance
the dataset. Otherwise, the ratio is defined as the number
of samples in the minority class over the the number of samples
in the majority class.
return_indices : bool, optional (default=False)
Whether or not to return the indices of the samples randomly
selected from the majority class.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random.
cv : int, optional (default=5)
Number of folds to be used when estimating samples' instance hardness.
n_jobs : int, optional (default=-1)
The number of threads to open if possible.
Attributes
----------
min_c_ : str or int
The identifier of the minority class.
max_c_ : str or int
The identifier of the majority class.
stats_c_ : dict of str/int : int
A dictionary in which the number of occurences of each class is
reported.
cv : int, optional (default=5)
Number of folds used when estimating samples' instance hardness.
X_shape_ : tuple of int
Shape of the data `X` during fitting.
Notes
-----
The method is based on [1]_.
This class does not support multi-class.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.under_sampling import RepeatedEditedNearestNeighbours
>>> X, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9],
... n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1,
... n_samples=1000, random_state=10)
>>> print('Original dataset shape {}'.format(Counter(y)))
Original dataset shape Counter({1: 900, 0: 100})
>>> renn = RepeatedEditedNearestNeighbours(random_state=42)
>>> X_res, y_res = renn.fit_sample(X, y)
>>> print('Resampled dataset shape {}'.format(Counter(y_res)))
Resampled dataset shape Counter({1: 883, 0: 100})
References
----------
.. [1] D. Smith, Michael R., Tony Martinez, and Christophe Giraud-Carrier.
"An instance level analysis of data complexity." Machine learning
95.2 (2014): 225-256.
"""
def __init__(self, estimator='linear-svm', ratio='auto',
return_indices=False, random_state=None, cv=5, n_jobs=-1,
**kwargs):
super(InstanceHardnessThreshold, self).__init__(ratio=ratio)
self.estimator = estimator
self.return_indices = return_indices
self.random_state = random_state
self.kwargs = kwargs
self.cv = cv
self.n_jobs = n_jobs
def _sample(self, X, y):
"""Resample the dataset.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : ndarray, shape (n_samples, )
Corresponding label for each sample in X.
Returns
-------
X_resampled : ndarray, shape (n_samples_new, n_features)
The array containing the resampled data.
y_resampled : ndarray, shape (n_samples_new)
The corresponding label of `X_resampled`
idx_under : ndarray, shape (n_samples, )
If `return_indices` is `True`, a boolean array will be returned
containing the which samples have been selected.
"""
if self.estimator not in ESTIMATOR_KIND:
raise NotImplementedError
# Select the appropriate classifier
if self.estimator == 'knn':
from sklearn.neighbors import KNeighborsClassifier
estimator = KNeighborsClassifier(
**self.kwargs)
elif self.estimator == 'decision-tree':
from sklearn.tree import DecisionTreeClassifier
estimator = DecisionTreeClassifier(
random_state=self.random_state,
**self.kwargs)
elif self.estimator == 'random-forest':
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier(
random_state=self.random_state,
**self.kwargs)
elif self.estimator == 'adaboost':
from sklearn.ensemble import AdaBoostClassifier
estimator = AdaBoostClassifier(
random_state=self.random_state,
**self.kwargs)
elif self.estimator == 'gradient-boosting':
from sklearn.ensemble import GradientBoostingClassifier
estimator = GradientBoostingClassifier(
random_state=self.random_state,
**self.kwargs)
elif self.estimator == 'linear-svm':
from sklearn.svm import SVC
estimator = SVC(probability=True,
random_state=self.random_state,
kernel='linear',
**self.kwargs)
else:
raise NotImplementedError
# Create the different folds
skf = StratifiedKFold(y, n_folds=self.cv, shuffle=False,
random_state=self.random_state)
probabilities = np.zeros(y.shape[0], dtype=float)
for train_index, test_index in skf:
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
estimator.fit(X_train, y_train)
probs = estimator.predict_proba(X_test)
classes = estimator.classes_
probabilities[test_index] = [
probs[l, np.where(classes == c)[0][0]]
for l, c in enumerate(y_test)]
# Compute the number of cluster needed
if self.ratio == 'auto':
num_samples = self.stats_c_[self.min_c_]
else:
num_samples = int(self.stats_c_[self.min_c_] / self.ratio)
# Find the percentile corresponding to the top num_samples
threshold = np.percentile(
probabilities[y != self.min_c_],
(1. - (num_samples / self.stats_c_[self.maj_c_])) * 100.)
mask = np.logical_or(probabilities >= threshold, y == self.min_c_)
# Sample the data
X_resampled = X[mask]
y_resampled = y[mask]
self.logger.info('Under-sampling performed: %s', Counter(
y_resampled))
# If we need to offer support for the indices
if self.return_indices:
idx_under = np.flatnonzero(mask)
return X_resampled, y_resampled, idx_under
else:
return X_resampled, y_resampled
| mit |