repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
mikofski/pvlib-python | pvlib/singlediode.py | 3 | 29321 | """
Low-level functions for solving the single diode equation.
"""
from functools import partial
import numpy as np
from pvlib.tools import _golden_sect_DataFrame
from scipy.optimize import brentq, newton
from scipy.special import lambertw
# set keyword arguments for all uses of newton in this module
newton = partial(newton, tol=1e-6, maxiter=100, fprime2=None)
# intrinsic voltage per cell junction for a:Si, CdTe, Mertens et al.
VOLTAGE_BUILTIN = 0.9 # [V]
def estimate_voc(photocurrent, saturation_current, nNsVth):
"""
Rough estimate of open circuit voltage useful for bounding searches for
``i`` of ``v`` when using :func:`~pvlib.pvsystem.singlediode`.
Parameters
----------
photocurrent : numeric
photo-generated current [A]
saturation_current : numeric
diode reverse saturation current [A]
nNsVth : numeric
product of thermal voltage ``Vth`` [V], diode ideality factor ``n``,
and number of series cells ``Ns``
Returns
-------
numeric
rough estimate of open circuit voltage [V]
Notes
-----
Calculating the open circuit voltage, :math:`V_{oc}`, of an ideal device
with infinite shunt resistance, :math:`R_{sh} \\to \\infty`, and zero
series resistance, :math:`R_s = 0`, yields the following equation [1]. As
an estimate of :math:`V_{oc}` it is useful as an upper bound for the
bisection method.
.. math::
V_{oc, est}=n Ns V_{th} \\log \\left( \\frac{I_L}{I_0} + 1 \\right)
.. [1] http://www.pveducation.org/pvcdrom/open-circuit-voltage
"""
return nNsVth * np.log(np.asarray(photocurrent) / saturation_current + 1.0)
def bishop88(diode_voltage, photocurrent, saturation_current,
resistance_series, resistance_shunt, nNsVth, d2mutau=0,
NsVbi=np.Inf, breakdown_factor=0., breakdown_voltage=-5.5,
breakdown_exp=3.28, gradients=False):
r"""
Explicit calculation of points on the IV curve described by the single
diode equation. Values are calculated as described in [1]_.
The single diode equation with recombination current and reverse bias
breakdown is
.. math::
I = I_{L} - I_{0} \left (\exp \frac{V_{d}}{nN_{s}V_{th}} - 1 \right )
- \frac{V_{d}}{R_{sh}}
- \frac{I_{L} \frac{d^{2}}{\mu \tau}}{N_{s} V_{bi} - V_{d}}
- a \frac{V_{d}}{R_{sh}} \left (1 - \frac{V_{d}}{V_{br}} \right )^{-m}
The input `diode_voltage` must be :math:`V + I R_{s}`.
.. warning::
* Usage of ``d2mutau`` is required with PVSyst
coefficients for cadmium-telluride (CdTe) and amorphous-silicon
(a:Si) PV modules only.
* Do not use ``d2mutau`` with CEC coefficients.
Parameters
----------
diode_voltage : numeric
diode voltage :math:`V_d` [V]
photocurrent : numeric
photo-generated current :math:`I_{L}` [A]
saturation_current : numeric
diode reverse saturation current :math:`I_{0}` [A]
resistance_series : numeric
series resistance :math:`R_{s}` [ohms]
resistance_shunt: numeric
shunt resistance :math:`R_{sh}` [ohms]
nNsVth : numeric
product of thermal voltage :math:`V_{th}` [V], diode ideality factor
:math:`n`, and number of series cells :math:`N_{s}` [V]
d2mutau : numeric, default 0
PVsyst parameter for cadmium-telluride (CdTe) and amorphous-silicon
(a-Si) modules that accounts for recombination current in the
intrinsic layer. The value is the ratio of intrinsic layer thickness
squared :math:`d^2` to the diffusion length of charge carriers
:math:`\mu \tau`. [V]
NsVbi : numeric, default np.inf
PVsyst parameter for cadmium-telluride (CdTe) and amorphous-silicon
(a-Si) modules that is the product of the PV module number of series
cells :math:`N_{s}` and the builtin voltage :math:`V_{bi}` of the
intrinsic layer. [V].
breakdown_factor : numeric, default 0
fraction of ohmic current involved in avalanche breakdown :math:`a`.
Default of 0 excludes the reverse bias term from the model. [unitless]
breakdown_voltage : numeric, default -5.5
reverse breakdown voltage of the photovoltaic junction :math:`V_{br}`
[V]
breakdown_exp : numeric, default 3.28
avalanche breakdown exponent :math:`m` [unitless]
gradients : bool
False returns only I, V, and P. True also returns gradients
Returns
-------
tuple
currents [A], voltages [V], power [W], and optionally
:math:`\frac{dI}{dV_d}`, :math:`\frac{dV}{dV_d}`,
:math:`\frac{dI}{dV}`, :math:`\frac{dP}{dV}`, and
:math:`\frac{d^2 P}{dV dV_d}`
Notes
-----
The PVSyst thin-film recombination losses parameters ``d2mutau`` and
``NsVbi`` should only be applied to cadmium-telluride (CdTe) and amorphous-
silicon (a-Si) PV modules, [2]_, [3]_. The builtin voltage :math:`V_{bi}`
should account for all junctions. For example: tandem and triple junction
cells would have builtin voltages of 1.8[V] and 2.7[V] respectively, based
on the default of 0.9[V] for a single junction. The parameter ``NsVbi``
should only account for the number of series cells in a single parallel
sub-string if the module has cells in parallel greater than 1.
References
----------
.. [1] "Computer simulation of the effects of electrical mismatches in
photovoltaic cell interconnection circuits" JW Bishop, Solar Cell (1988)
:doi:`10.1016/0379-6787(88)90059-2`
.. [2] "Improved equivalent circuit and Analytical Model for Amorphous
Silicon Solar Cells and Modules." J. Mertens, et al., IEEE Transactions
on Electron Devices, Vol 45, No 2, Feb 1998.
:doi:`10.1109/16.658676`
.. [3] "Performance assessment of a simulation model for PV modules of any
available technology", André Mermoud and Thibault Lejeune, 25th EUPVSEC,
2010
:doi:`10.4229/25thEUPVSEC2010-4BV.1.114`
"""
# calculate recombination loss current where d2mutau > 0
is_recomb = d2mutau > 0 # True where there is thin-film recombination loss
v_recomb = np.where(is_recomb, NsVbi - diode_voltage, np.inf)
i_recomb = np.where(is_recomb, photocurrent * d2mutau / v_recomb, 0)
# calculate temporary values to simplify calculations
v_star = diode_voltage / nNsVth # non-dimensional diode voltage
g_sh = 1.0 / resistance_shunt # conductance
if breakdown_factor > 0: # reverse bias is considered
brk_term = 1 - diode_voltage / breakdown_voltage
brk_pwr = np.power(brk_term, -breakdown_exp)
i_breakdown = breakdown_factor * diode_voltage * g_sh * brk_pwr
else:
i_breakdown = 0.
i = (photocurrent - saturation_current * np.expm1(v_star) # noqa: W503
- diode_voltage * g_sh - i_recomb - i_breakdown) # noqa: W503
v = diode_voltage - i * resistance_series
retval = (i, v, i*v)
if gradients:
# calculate recombination loss current gradients where d2mutau > 0
grad_i_recomb = np.where(is_recomb, i_recomb / v_recomb, 0)
grad_2i_recomb = np.where(is_recomb, 2 * grad_i_recomb / v_recomb, 0)
g_diode = saturation_current * np.exp(v_star) / nNsVth # conductance
if breakdown_factor > 0: # reverse bias is considered
brk_pwr_1 = np.power(brk_term, -breakdown_exp - 1)
brk_pwr_2 = np.power(brk_term, -breakdown_exp - 2)
brk_fctr = breakdown_factor * g_sh
grad_i_brk = brk_fctr * (brk_pwr + diode_voltage *
-breakdown_exp * brk_pwr_1)
grad2i_brk = (brk_fctr * -breakdown_exp # noqa: W503
* (2 * brk_pwr_1 + diode_voltage # noqa: W503
* (-breakdown_exp - 1) * brk_pwr_2)) # noqa: W503
else:
grad_i_brk = 0.
grad2i_brk = 0.
grad_i = -g_diode - g_sh - grad_i_recomb - grad_i_brk # di/dvd
grad_v = 1.0 - grad_i * resistance_series # dv/dvd
# dp/dv = d(iv)/dv = v * di/dv + i
grad = grad_i / grad_v # di/dv
grad_p = v * grad + i # dp/dv
grad2i = -g_diode / nNsVth - grad_2i_recomb - grad2i_brk # d2i/dvd
grad2v = -grad2i * resistance_series # d2v/dvd
grad2p = (
grad_v * grad + v * (grad2i/grad_v - grad_i*grad2v/grad_v**2)
+ grad_i
) # d2p/dv/dvd
retval += (grad_i, grad_v, grad, grad_p, grad2p)
return retval
def bishop88_i_from_v(voltage, photocurrent, saturation_current,
resistance_series, resistance_shunt, nNsVth,
d2mutau=0, NsVbi=np.Inf, breakdown_factor=0.,
breakdown_voltage=-5.5, breakdown_exp=3.28,
method='newton'):
"""
Find current given any voltage.
Parameters
----------
voltage : numeric
voltage (V) in volts [V]
photocurrent : numeric
photogenerated current (Iph or IL) [A]
saturation_current : numeric
diode dark or saturation current (Io or Isat) [A]
resistance_series : numeric
series resistance (Rs) in [Ohm]
resistance_shunt : numeric
shunt resistance (Rsh) [Ohm]
nNsVth : numeric
product of diode ideality factor (n), number of series cells (Ns), and
thermal voltage (Vth = k_b * T / q_e) in volts [V]
d2mutau : numeric, default 0
PVsyst parameter for cadmium-telluride (CdTe) and amorphous-silicon
(a-Si) modules that accounts for recombination current in the
intrinsic layer. The value is the ratio of intrinsic layer thickness
squared :math:`d^2` to the diffusion length of charge carriers
:math:`\\mu \\tau`. [V]
NsVbi : numeric, default np.inf
PVsyst parameter for cadmium-telluride (CdTe) and amorphous-silicon
(a-Si) modules that is the product of the PV module number of series
cells ``Ns`` and the builtin voltage ``Vbi`` of the intrinsic layer.
[V].
breakdown_factor : numeric, default 0
fraction of ohmic current involved in avalanche breakdown :math:`a`.
Default of 0 excludes the reverse bias term from the model. [unitless]
breakdown_voltage : numeric, default -5.5
reverse breakdown voltage of the photovoltaic junction :math:`V_{br}`
[V]
breakdown_exp : numeric, default 3.28
avalanche breakdown exponent :math:`m` [unitless]
method : str, default 'newton'
Either ``'newton'`` or ``'brentq'``. ''method'' must be ``'newton'``
if ``breakdown_factor`` is not 0.
Returns
-------
current : numeric
current (I) at the specified voltage (V). [A]
"""
# collect args
args = (photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, d2mutau, NsVbi,
breakdown_factor, breakdown_voltage, breakdown_exp)
def fv(x, v, *a):
# calculate voltage residual given diode voltage "x"
return bishop88(x, *a)[1] - v
if method.lower() == 'brentq':
# first bound the search using voc
voc_est = estimate_voc(photocurrent, saturation_current, nNsVth)
# brentq only works with scalar inputs, so we need a set up function
# and np.vectorize to repeatedly call the optimizer with the right
# arguments for possible array input
def vd_from_brent(voc, v, iph, isat, rs, rsh, gamma, d2mutau, NsVbi,
breakdown_factor, breakdown_voltage, breakdown_exp):
return brentq(fv, 0.0, voc,
args=(v, iph, isat, rs, rsh, gamma, d2mutau, NsVbi,
breakdown_factor, breakdown_voltage,
breakdown_exp))
vd_from_brent_vectorized = np.vectorize(vd_from_brent)
vd = vd_from_brent_vectorized(voc_est, voltage, *args)
elif method.lower() == 'newton':
# make sure all args are numpy arrays if max size > 1
# if voltage is an array, then make a copy to use for initial guess, v0
args, v0 = _prepare_newton_inputs((voltage,), args, voltage)
vd = newton(func=lambda x, *a: fv(x, voltage, *a), x0=v0,
fprime=lambda x, *a: bishop88(x, *a, gradients=True)[4],
args=args)
else:
raise NotImplementedError("Method '%s' isn't implemented" % method)
return bishop88(vd, *args)[0]
def bishop88_v_from_i(current, photocurrent, saturation_current,
resistance_series, resistance_shunt, nNsVth,
d2mutau=0, NsVbi=np.Inf, breakdown_factor=0.,
breakdown_voltage=-5.5, breakdown_exp=3.28,
method='newton'):
"""
Find voltage given any current.
Parameters
----------
current : numeric
current (I) in amperes [A]
photocurrent : numeric
photogenerated current (Iph or IL) [A]
saturation_current : numeric
diode dark or saturation current (Io or Isat) [A]
resistance_series : numeric
series resistance (Rs) in [Ohm]
resistance_shunt : numeric
shunt resistance (Rsh) [Ohm]
nNsVth : numeric
product of diode ideality factor (n), number of series cells (Ns), and
thermal voltage (Vth = k_b * T / q_e) in volts [V]
d2mutau : numeric, default 0
PVsyst parameter for cadmium-telluride (CdTe) and amorphous-silicon
(a-Si) modules that accounts for recombination current in the
intrinsic layer. The value is the ratio of intrinsic layer thickness
squared :math:`d^2` to the diffusion length of charge carriers
:math:`\\mu \\tau`. [V]
NsVbi : numeric, default np.inf
PVsyst parameter for cadmium-telluride (CdTe) and amorphous-silicon
(a-Si) modules that is the product of the PV module number of series
cells ``Ns`` and the builtin voltage ``Vbi`` of the intrinsic layer.
[V].
breakdown_factor : numeric, default 0
fraction of ohmic current involved in avalanche breakdown :math:`a`.
Default of 0 excludes the reverse bias term from the model. [unitless]
breakdown_voltage : numeric, default -5.5
reverse breakdown voltage of the photovoltaic junction :math:`V_{br}`
[V]
breakdown_exp : numeric, default 3.28
avalanche breakdown exponent :math:`m` [unitless]
method : str, default 'newton'
Either ``'newton'`` or ``'brentq'``. ''method'' must be ``'newton'``
if ``breakdown_factor`` is not 0.
Returns
-------
voltage : numeric
voltage (V) at the specified current (I) in volts [V]
"""
# collect args
args = (photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, d2mutau, NsVbi, breakdown_factor,
breakdown_voltage, breakdown_exp)
# first bound the search using voc
voc_est = estimate_voc(photocurrent, saturation_current, nNsVth)
def fi(x, i, *a):
# calculate current residual given diode voltage "x"
return bishop88(x, *a)[0] - i
if method.lower() == 'brentq':
# brentq only works with scalar inputs, so we need a set up function
# and np.vectorize to repeatedly call the optimizer with the right
# arguments for possible array input
def vd_from_brent(voc, i, iph, isat, rs, rsh, gamma, d2mutau, NsVbi,
breakdown_factor, breakdown_voltage, breakdown_exp):
return brentq(fi, 0.0, voc,
args=(i, iph, isat, rs, rsh, gamma, d2mutau, NsVbi,
breakdown_factor, breakdown_voltage,
breakdown_exp))
vd_from_brent_vectorized = np.vectorize(vd_from_brent)
vd = vd_from_brent_vectorized(voc_est, current, *args)
elif method.lower() == 'newton':
# make sure all args are numpy arrays if max size > 1
# if voc_est is an array, then make a copy to use for initial guess, v0
args, v0 = _prepare_newton_inputs((current,), args, voc_est)
vd = newton(func=lambda x, *a: fi(x, current, *a), x0=v0,
fprime=lambda x, *a: bishop88(x, *a, gradients=True)[3],
args=args)
else:
raise NotImplementedError("Method '%s' isn't implemented" % method)
return bishop88(vd, *args)[1]
def bishop88_mpp(photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, d2mutau=0, NsVbi=np.Inf,
breakdown_factor=0., breakdown_voltage=-5.5,
breakdown_exp=3.28, method='newton'):
"""
Find max power point.
Parameters
----------
photocurrent : numeric
photogenerated current (Iph or IL) [A]
saturation_current : numeric
diode dark or saturation current (Io or Isat) [A]
resistance_series : numeric
series resistance (Rs) in [Ohm]
resistance_shunt : numeric
shunt resistance (Rsh) [Ohm]
nNsVth : numeric
product of diode ideality factor (n), number of series cells (Ns), and
thermal voltage (Vth = k_b * T / q_e) in volts [V]
d2mutau : numeric, default 0
PVsyst parameter for cadmium-telluride (CdTe) and amorphous-silicon
(a-Si) modules that accounts for recombination current in the
intrinsic layer. The value is the ratio of intrinsic layer thickness
squared :math:`d^2` to the diffusion length of charge carriers
:math:`\\mu \\tau`. [V]
NsVbi : numeric, default np.inf
PVsyst parameter for cadmium-telluride (CdTe) and amorphous-silicon
(a-Si) modules that is the product of the PV module number of series
cells ``Ns`` and the builtin voltage ``Vbi`` of the intrinsic layer.
[V].
breakdown_factor : numeric, default 0
fraction of ohmic current involved in avalanche breakdown :math:`a`.
Default of 0 excludes the reverse bias term from the model. [unitless]
breakdown_voltage : numeric, default -5.5
reverse breakdown voltage of the photovoltaic junction :math:`V_{br}`
[V]
breakdown_exp : numeric, default 3.28
avalanche breakdown exponent :math:`m` [unitless]
method : str, default 'newton'
Either ``'newton'`` or ``'brentq'``. ''method'' must be ``'newton'``
if ``breakdown_factor`` is not 0.
Returns
-------
OrderedDict or pandas.DataFrame
max power current ``i_mp`` [A], max power voltage ``v_mp`` [V], and
max power ``p_mp`` [W]
"""
# collect args
args = (photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, d2mutau, NsVbi, breakdown_factor,
breakdown_voltage, breakdown_exp)
# first bound the search using voc
voc_est = estimate_voc(photocurrent, saturation_current, nNsVth)
def fmpp(x, *a):
return bishop88(x, *a, gradients=True)[6]
if method.lower() == 'brentq':
# break out arguments for numpy.vectorize to handle broadcasting
vec_fun = np.vectorize(
lambda voc, iph, isat, rs, rsh, gamma, d2mutau, NsVbi, vbr_a, vbr,
vbr_exp: brentq(fmpp, 0.0, voc,
args=(iph, isat, rs, rsh, gamma, d2mutau, NsVbi,
vbr_a, vbr, vbr_exp))
)
vd = vec_fun(voc_est, *args)
elif method.lower() == 'newton':
# make sure all args are numpy arrays if max size > 1
# if voc_est is an array, then make a copy to use for initial guess, v0
args, v0 = _prepare_newton_inputs((), args, voc_est)
vd = newton(
func=fmpp, x0=v0,
fprime=lambda x, *a: bishop88(x, *a, gradients=True)[7], args=args
)
else:
raise NotImplementedError("Method '%s' isn't implemented" % method)
return bishop88(vd, *args)
def _get_size_and_shape(args):
# find the right size and shape for returns
size, shape = 0, None # 0 or None both mean scalar
for arg in args:
try:
this_shape = arg.shape # try to get shape
except AttributeError:
this_shape = None
try:
this_size = len(arg) # try to get the size
except TypeError:
this_size = 0
else:
this_size = arg.size # if it has shape then it also has size
if shape is None:
shape = this_shape # set the shape if None
# update size and shape
if this_size > size:
size = this_size
if this_shape is not None:
shape = this_shape
return size, shape
def _prepare_newton_inputs(i_or_v_tup, args, v0):
# broadcast arguments for newton method
# the first argument should be a tuple, eg: (i,), (v,) or ()
size, shape = _get_size_and_shape(i_or_v_tup + args)
if size > 1:
args = [np.asarray(arg) for arg in args]
# newton uses initial guess for the output shape
# copy v0 to a new array and broadcast it to the shape of max size
if shape is not None:
v0 = np.broadcast_to(v0, shape).copy()
return args, v0
def _lambertw_v_from_i(resistance_shunt, resistance_series, nNsVth, current,
saturation_current, photocurrent):
# Record if inputs were all scalar
output_is_scalar = all(map(np.isscalar,
[resistance_shunt, resistance_series, nNsVth,
current, saturation_current, photocurrent]))
# This transforms Gsh=1/Rsh, including ideal Rsh=np.inf into Gsh=0., which
# is generally more numerically stable
conductance_shunt = 1. / resistance_shunt
# Ensure that we are working with read-only views of numpy arrays
# Turns Series into arrays so that we don't have to worry about
# multidimensional broadcasting failing
Gsh, Rs, a, I, I0, IL = \
np.broadcast_arrays(conductance_shunt, resistance_series, nNsVth,
current, saturation_current, photocurrent)
# Intitalize output V (I might not be float64)
V = np.full_like(I, np.nan, dtype=np.float64)
# Determine indices where 0 < Gsh requires implicit model solution
idx_p = 0. < Gsh
# Determine indices where 0 = Gsh allows explicit model solution
idx_z = 0. == Gsh
# Explicit solutions where Gsh=0
if np.any(idx_z):
V[idx_z] = a[idx_z] * np.log1p((IL[idx_z] - I[idx_z]) / I0[idx_z]) - \
I[idx_z] * Rs[idx_z]
# Only compute using LambertW if there are cases with Gsh>0
if np.any(idx_p):
# LambertW argument, cannot be float128, may overflow to np.inf
# overflow is explicitly handled below, so ignore warnings here
with np.errstate(over='ignore'):
argW = (I0[idx_p] / (Gsh[idx_p] * a[idx_p]) *
np.exp((-I[idx_p] + IL[idx_p] + I0[idx_p]) /
(Gsh[idx_p] * a[idx_p])))
# lambertw typically returns complex value with zero imaginary part
# may overflow to np.inf
lambertwterm = lambertw(argW).real
# Record indices where lambertw input overflowed output
idx_inf = np.logical_not(np.isfinite(lambertwterm))
# Only re-compute LambertW if it overflowed
if np.any(idx_inf):
# Calculate using log(argW) in case argW is really big
logargW = (np.log(I0[idx_p]) - np.log(Gsh[idx_p]) -
np.log(a[idx_p]) +
(-I[idx_p] + IL[idx_p] + I0[idx_p]) /
(Gsh[idx_p] * a[idx_p]))[idx_inf]
# Three iterations of Newton-Raphson method to solve
# w+log(w)=logargW. The initial guess is w=logargW. Where direct
# evaluation (above) results in NaN from overflow, 3 iterations
# of Newton's method gives approximately 8 digits of precision.
w = logargW
for _ in range(0, 3):
w = w * (1. - np.log(w) + logargW) / (1. + w)
lambertwterm[idx_inf] = w
# Eqn. 3 in Jain and Kapoor, 2004
# V = -I*(Rs + Rsh) + IL*Rsh - a*lambertwterm + I0*Rsh
# Recast in terms of Gsh=1/Rsh for better numerical stability.
V[idx_p] = (IL[idx_p] + I0[idx_p] - I[idx_p]) / Gsh[idx_p] - \
I[idx_p] * Rs[idx_p] - a[idx_p] * lambertwterm
if output_is_scalar:
return V.item()
else:
return V
def _lambertw_i_from_v(resistance_shunt, resistance_series, nNsVth, voltage,
saturation_current, photocurrent):
# Record if inputs were all scalar
output_is_scalar = all(map(np.isscalar,
[resistance_shunt, resistance_series, nNsVth,
voltage, saturation_current, photocurrent]))
# This transforms Gsh=1/Rsh, including ideal Rsh=np.inf into Gsh=0., which
# is generally more numerically stable
conductance_shunt = 1. / resistance_shunt
# Ensure that we are working with read-only views of numpy arrays
# Turns Series into arrays so that we don't have to worry about
# multidimensional broadcasting failing
Gsh, Rs, a, V, I0, IL = \
np.broadcast_arrays(conductance_shunt, resistance_series, nNsVth,
voltage, saturation_current, photocurrent)
# Intitalize output I (V might not be float64)
I = np.full_like(V, np.nan, dtype=np.float64) # noqa: E741, N806
# Determine indices where 0 < Rs requires implicit model solution
idx_p = 0. < Rs
# Determine indices where 0 = Rs allows explicit model solution
idx_z = 0. == Rs
# Explicit solutions where Rs=0
if np.any(idx_z):
I[idx_z] = IL[idx_z] - I0[idx_z] * np.expm1(V[idx_z] / a[idx_z]) - \
Gsh[idx_z] * V[idx_z]
# Only compute using LambertW if there are cases with Rs>0
# Does NOT handle possibility of overflow, github issue 298
if np.any(idx_p):
# LambertW argument, cannot be float128, may overflow to np.inf
argW = Rs[idx_p] * I0[idx_p] / (
a[idx_p] * (Rs[idx_p] * Gsh[idx_p] + 1.)) * \
np.exp((Rs[idx_p] * (IL[idx_p] + I0[idx_p]) + V[idx_p]) /
(a[idx_p] * (Rs[idx_p] * Gsh[idx_p] + 1.)))
# lambertw typically returns complex value with zero imaginary part
# may overflow to np.inf
lambertwterm = lambertw(argW).real
# Eqn. 2 in Jain and Kapoor, 2004
# I = -V/(Rs + Rsh) - (a/Rs)*lambertwterm + Rsh*(IL + I0)/(Rs + Rsh)
# Recast in terms of Gsh=1/Rsh for better numerical stability.
I[idx_p] = (IL[idx_p] + I0[idx_p] - V[idx_p] * Gsh[idx_p]) / \
(Rs[idx_p] * Gsh[idx_p] + 1.) - (
a[idx_p] / Rs[idx_p]) * lambertwterm
if output_is_scalar:
return I.item()
else:
return I
def _lambertw(photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts=None):
# Compute short circuit current
i_sc = _lambertw_i_from_v(resistance_shunt, resistance_series, nNsVth, 0.,
saturation_current, photocurrent)
# Compute open circuit voltage
v_oc = _lambertw_v_from_i(resistance_shunt, resistance_series, nNsVth, 0.,
saturation_current, photocurrent)
params = {'r_sh': resistance_shunt,
'r_s': resistance_series,
'nNsVth': nNsVth,
'i_0': saturation_current,
'i_l': photocurrent}
# Find the voltage, v_mp, where the power is maximized.
# Start the golden section search at v_oc * 1.14
p_mp, v_mp = _golden_sect_DataFrame(params, 0., v_oc * 1.14,
_pwr_optfcn)
# Find Imp using Lambert W
i_mp = _lambertw_i_from_v(resistance_shunt, resistance_series, nNsVth,
v_mp, saturation_current, photocurrent)
# Find Ix and Ixx using Lambert W
i_x = _lambertw_i_from_v(resistance_shunt, resistance_series, nNsVth,
0.5 * v_oc, saturation_current, photocurrent)
i_xx = _lambertw_i_from_v(resistance_shunt, resistance_series, nNsVth,
0.5 * (v_oc + v_mp), saturation_current,
photocurrent)
out = (i_sc, v_oc, i_mp, v_mp, p_mp, i_x, i_xx)
# create ivcurve
if ivcurve_pnts:
ivcurve_v = (np.asarray(v_oc)[..., np.newaxis] *
np.linspace(0, 1, ivcurve_pnts))
ivcurve_i = _lambertw_i_from_v(resistance_shunt, resistance_series,
nNsVth, ivcurve_v.T, saturation_current,
photocurrent).T
out += (ivcurve_i, ivcurve_v)
return out
def _pwr_optfcn(df, loc):
'''
Function to find power from ``i_from_v``.
'''
I = _lambertw_i_from_v(df['r_sh'], df['r_s'], # noqa: E741, N806
df['nNsVth'], df[loc], df['i_0'], df['i_l'])
return I * df[loc]
| bsd-3-clause |
mspkvp/MiningOpinionTweets | src/lda_without_tf_idf_politics.py | 1 | 3990 | from __future__ import print_function
from time import time
import csv
import sys
import os
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
import lda
import logging
logging.basicConfig(filename='lda_analyser.log', level=logging.DEBUG)
entities = ['passos_coelho',
'jose_socrates',
'antonio_costa',
'paulo_portas',
'cavaco_silva',
'barack_obama',
'vladimir_putin',
'donald_trump',
'catarina_martins',
'jeronimo_sousa',
'marcelo_rebelo_sousa',
'partido_socialista',
'partido_social_democrata',
'partido_popular',
'bloco_esquerda',
'partido_comunista'
]
if not os.path.exists("results"):
os.makedirs("results")
for n_topics in [10, 20, 50, 100]:
n_features = 10000
n_top_words = int(sys.argv[1]) + 1
corpus = []
topics_write_file = csv.writer(open("results/lda_topics_{}topics_{}words_{}.csv".format(n_topics,
n_top_words - 1,
"sports"), "wb"),
delimiter="\t", quotechar='|', quoting=csv.QUOTE_MINIMAL)
write_file = csv.writer(open("results/lda_topics_{}topics_{}words_mapping_{}.csv".format(n_topics,
n_top_words - 1,
"sports"), "wb"),
delimiter="\t", quotechar='|', quoting=csv.QUOTE_MINIMAL)
def print_top_words(model, doc_topic, feature_names, n_top_words, dictionary):
for i, topic_dist in enumerate(model):
topic_words = np.array(feature_names)[np.argsort(topic_dist)][:-n_top_words:-1]
#write_file.write('Topic {}: {}\n'.format(i, ' '.join(topic_words)))
topic_row = [str(i)]
topic_row.extend(topic_words)
topics_write_file.writerow(topic_row)
for i in range(len(corpus)):
document_row = [dictionary[i][0], dictionary[i][1]]
document_row.append(doc_topic[i].argmax())
#document_row.append(corpus[i])
write_file.writerow(document_row)
entity_day_dict = dict()
# read all files and store their contents on a dictionary
for i in os.listdir(os.getcwd() + "/filtered_tweets"):
for filename in os.listdir(os.getcwd() + "/filtered_tweets" + "/" + i):
if(filename.split(".")[0] in entities):
entity_day_dict[i+" "+filename] = open(os.getcwd() + "/filtered_tweets" + "/" + i + "/" + filename, 'r').read()
entity_day_key_index = dict()
i = 0
for key in entity_day_dict:
entity_day_key_index[i] = key.split(" ")
corpus.append(entity_day_dict[key])
i += 1
# Use tf (raw term count) features for LDA.
logging.info("Extracting tf features for LDA...")
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
t0 = time()
tf = tf_vectorizer.fit_transform(corpus)
logging.info("done in %0.3fs." % (time() - t0))
logging.info("Fitting LDA models with tf")
model = lda.LDA(n_topics=n_topics, n_iter=1500, random_state=1)
#LatentDirichletAllocation(n_topics=n_topics, max_iter=5, learning_method='online', #learning_offset=50., random_state=0)
t0 = time()
model.fit(tf)
logging.info("done in %0.3fs." % (time() - t0))
topic_word = model.topic_word_
doc_topic = model.doc_topic_
logging.info("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(topic_word, doc_topic, tf_feature_names, n_top_words, entity_day_key_index) | mit |
rudischilder/MAV_TU_Delft_gr10 | sw/airborne/test/math/compare_utm_enu.py | 77 | 2714 | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
import sys
import os
PPRZ_SRC = os.getenv("PAPARAZZI_SRC", "../../../..")
sys.path.append(PPRZ_SRC + "/sw/lib/python")
from pprz_math.geodetic import *
from pprz_math.algebra import DoubleRMat, DoubleEulers, DoubleVect3
from math import radians, degrees, tan
import matplotlib.pyplot as plt
import numpy as np
# Origin at ENAC
UTM_EAST0 = 377349 # in m
UTM_NORTH0 = 4824583 # in m
UTM_ZONE0 = 31
ALT0 = 147.000 # in m
utm_origin = UtmCoor_d(north=UTM_NORTH0, east=UTM_EAST0, alt=ALT0, zone=UTM_ZONE0)
print("origin %s" % utm_origin)
lla_origin = utm_origin.to_lla()
ecef_origin = lla_origin.to_ecef()
ltp_origin = ecef_origin.to_ltp_def()
print(ltp_origin)
# convergence angle to "true north" is approx 1 deg here
earth_radius = 6378137.0
n = 0.9996 * earth_radius
UTM_DELTA_EAST = 500000.
dist_to_meridian = utm_origin.east - UTM_DELTA_EAST
conv = dist_to_meridian / n * tan(lla_origin.lat)
# or (middle meridian of UTM zone 31 is at 3deg)
#conv = atan(tan(lla_origin.lon - radians(3))*sin(lla_origin.lat))
print("approx. convergence angle (north error compared to meridian): %f deg" % degrees(conv))
# Rotation matrix to correct for "true north"
R = DoubleEulers(psi=-conv).to_rmat()
# calculate ENU coordinates for 100 points in 100m distance
nb_points = 100
dist_points = 100
enu_res = np.zeros((nb_points, 2))
enu_res_c = np.zeros((nb_points, 2))
utm_res = np.zeros((nb_points, 2))
for i in range(0, nb_points):
utm = UtmCoor_d()
utm.north = i * dist_points + utm_origin.north
utm.east = i * dist_points+ utm_origin.east
utm.alt = utm_origin.alt
utm.zone = utm_origin.zone
#print(utm)
utm_res[i, 0] = utm.east - utm_origin.east
utm_res[i, 1] = utm.north - utm_origin.north
lla = utm.to_lla()
#print(lla)
ecef = lla.to_ecef()
enu = ecef.to_enu(ltp_origin)
enu_res[i, 0] = enu.x
enu_res[i, 1] = enu.y
enu_c = R * DoubleVect3(enu.x, enu.y, enu.z)
enu_res_c[i, 0] = enu_c.x
enu_res_c[i, 1] = enu_c.y
#print(enu)
dist = np.linalg.norm(utm_res, axis=1)
error = np.linalg.norm(utm_res - enu_res, axis=1)
error_c = np.linalg.norm(utm_res - enu_res_c, axis=1)
plt.figure(1)
plt.subplot(311)
plt.title("utm vs. enu")
plt.plot(enu_res[:, 0], enu_res[:, 1], 'g', label="ENU")
plt.plot(utm_res[:, 0], utm_res[:, 1], 'r', label="UTM")
plt.ylabel("y/north [m]")
plt.xlabel("x/east [m]")
plt.legend(loc='upper left')
plt.subplot(312)
plt.plot(dist, error, 'r')
plt.xlabel("dist from origin [m]")
plt.ylabel("error [m]")
plt.subplot(313)
plt.plot(dist, error_c, 'r')
plt.xlabel("dist from origin [m]")
plt.ylabel("error with north fix [m]")
plt.show()
| gpl-2.0 |
magic2du/contact_matrix | Contact_maps/mnist_psuedo_ipython_dl_ppi/code/DL_Stacked_Model_Mnist_Psuedo_01_22_01_2015.py | 1 | 26174 |
# coding: utf-8
# In[1]:
# this part imports libs and load data from csv file
import sys
sys.path.append('../../../libs/')
import csv
from dateutil import parser
from datetime import timedelta
from sklearn import svm
import numpy as np
import pandas as pd
import pickle
from sklearn.cross_validation import train_test_split
from sklearn import preprocessing
import sklearn
import scipy.stats as ss
import cPickle
import gzip
import os
import time
import numpy
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
import os.path
import IO_class
from IO_class import FileOperator
from sklearn import cross_validation
import sklearn
import numpy as np
import csv
from dateutil import parser
from datetime import timedelta
from sklearn import svm
import numpy as np
import pandas as pd
import pdb, PIL
import pickle
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import KFold
from sklearn import preprocessing
import sklearn
import scipy.stats as ss
from sklearn.svm import LinearSVC
import random
from DL_libs import *
from itertools import izip #new
import math
from sklearn.svm import SVC
# In[2]:
# set settings for this script
settings = {}
settings['with_auc_score'] = False
settings['reduce_ratio'] = 1
settings['SVM'] = 1
settings['SVM_RBF'] = 1
settings['SVM_POLY'] = 1
settings['DL'] = 1
settings['Log'] = 1
settings['SAE_Log'] = 1
settings['SAE_SVM'] = 1
settings['SAE_SVM_RBF'] = 1
settings['SAE_SVM_POLY'] = 0
settings['DL_S'] = 1
settings['SAE_S_SVM'] = 1
settings['SAE_S_SVM_RBF'] = 1
settings['SAE_S_SVM_POLY'] = 0
settings['number_iterations'] = 10
settings['finetune_lr'] = 0.1
settings['batch_size'] = 30
settings['pretraining_interations'] = 50001#10000
settings['pretrain_lr'] = 0.001
#settings['training_epochs'] = 300 #300
settings['training_interations'] = 50001 #300
settings['hidden_layers_sizes'] = [200, 200]
settings['corruption_levels'] = [0, 0]
settings['number_of_training'] = [10000]#[1000, 2500, 5000, 7500, 10000] # use all examples
settings['test_set_from_test'] = True
import logging
import time
current_date = time.strftime("%m_%d_%Y")
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logname = 'log_DL_handwritten_digits' + current_date + '.log'
handler = logging.FileHandler(logname)
handler.setLevel(logging.DEBUG)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
#logger.debug('This message should go to the log file')
for key, value in settings.items():
logger.info(key +': '+ str(value))
# In[3]:
f = gzip.open('mnist.pkl.gz', 'rb')
train_set, valid_set, test_set = cPickle.load(f)
X_train,y_train = train_set
X_valid,y_valid = valid_set
X_total=np.vstack((X_train, X_valid))
X_total = np.array(X_total, dtype= theano.config.floatX)
print'sample size', X_total.shape
y_total = np.concatenate([y_train, y_valid])
# In[5]:
################## generate data from training set###################
array_A =[]
array_B =[]
for i in range(100000):
array_A.append(np.random.random_integers(0, 59999))
array_B.append(np.random.random_integers(0, 59999))
pos_index = []
neg_index = []
for index in xrange(100000):
if y_total[array_A[index]] - y_total[array_B[index]] == 1:
pos_index.append(index)
else:
neg_index.append(index)
print 'number of positive examples', len(pos_index)
selected_neg_index= neg_index[ : len(pos_index)]
array_A = np.array(array_A)
array_B = np.array(array_B)
index_for_positive_image_A = array_A[pos_index]
index_for_positive_image_B = array_B[pos_index]
index_for_neg_image_A = array_A[selected_neg_index]
index_for_neg_image_B = array_B[selected_neg_index]
X_pos_A = X_total[index_for_positive_image_A]
X_pos_B = X_total[index_for_positive_image_B]
X_pos_whole = np.hstack((X_pos_A,X_pos_B))
X_neg_A = X_total[index_for_neg_image_A]
X_neg_B = X_total[index_for_neg_image_B]
X_neg_whole = np.hstack((X_neg_A, X_neg_B))
print X_pos_A.shape, X_pos_B.shape, X_pos_whole.shape
print X_neg_A.shape, X_neg_B.shape, X_neg_whole.shape
X_whole = np.vstack((X_pos_whole, X_neg_whole))
print X_whole.shape
y_pos = np.ones(X_pos_whole.shape[0])
y_neg = np.zeros(X_neg_whole.shape[0])
y_whole = np.concatenate([y_pos,y_neg])
print y_whole
# In[7]:
#pylab.imshow(imageB.reshape(28, 28), cmap="Greys")
# In[8]:
def saveAsCsv(with_auc_score, fname, score_dict, arguments): #new
newfile = False
if os.path.isfile('report_' + fname + '.csv'):
pass
else:
newfile = True
csvfile = open('report_' + fname + '.csv', 'a+')
writer = csv.writer(csvfile)
if newfile == True:
writer.writerow(['no.', 'number_of_training', 'method', 'isTest']+ score_dict.keys()) #, 'AUC'])
for arg in arguments:
writer.writerow([i for i in arg])
csvfile.close()
def run_models(settings = None):
analysis_scr = []
with_auc_score = settings['with_auc_score']
for subset_no in xrange(1,settings['number_iterations']+1):
print("Subset:", subset_no)
################## generate data ###################
array_A =[]
array_B =[]
for i in range(100000):
array_A.append(np.random.random_integers(0, 59999))
array_B.append(np.random.random_integers(0, 59999))
pos_index = []
neg_index = []
for index in xrange(100000):
if y_total[array_A[index]] - y_total[array_B[index]] == 1:
pos_index.append(index)
else:
neg_index.append(index)
print 'number of positive examples', len(pos_index)
selected_neg_index= neg_index[ : len(pos_index)]
array_A = np.array(array_A)
array_B = np.array(array_B)
index_for_positive_image_A = array_A[pos_index]
index_for_positive_image_B = array_B[pos_index]
index_for_neg_image_A = array_A[selected_neg_index]
index_for_neg_image_B = array_B[selected_neg_index]
X_pos_A = X_total[index_for_positive_image_A]
X_pos_B = X_total[index_for_positive_image_B]
X_pos_whole = np.hstack((X_pos_A,X_pos_B))
X_neg_A = X_total[index_for_neg_image_A]
X_neg_B = X_total[index_for_neg_image_B]
X_neg_whole = np.hstack((X_neg_A, X_neg_B))
print X_pos_A.shape, X_pos_B.shape, X_pos_whole.shape
print X_neg_A.shape, X_neg_B.shape, X_neg_whole.shape
X_whole = np.vstack((X_pos_whole, X_neg_whole))
print X_whole.shape
y_pos = np.ones(X_pos_whole.shape[0])
y_neg = np.zeros(X_neg_whole.shape[0])
y_whole = np.concatenate([y_pos,y_neg])
print y_whole.shape
x_train_pre_validation, x_test, y_train_pre_validation, y_test = train_test_split(X_whole,y_whole, test_size=0.2, random_state=211)
for number_of_training in settings['number_of_training']:
'''
x_train, x_validation, y_train, y_validation = train_test_split(x_train_pre_validation[:number_of_training],
y_train_pre_validation[:number_of_training],\
test_size=0.2, random_state=21)
'''
x_train, x_validation, y_train, y_validation = train_test_split(x_train_pre_validation[:],
y_train_pre_validation[:],\
test_size=0.4, random_state=21)
print x_train.shape, y_train.shape, x_validation.shape, \
y_validation.shape, x_test.shape, y_test.shape
x_train_minmax, x_validation_minmax, x_test_minmax = x_train, x_validation, x_test
train_X_reduced = x_train
train_y_reduced = y_train
test_X = x_test
test_y = y_test
###original data###
################ end of data ####################
standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced)
scaled_train_X = standard_scaler.transform(train_X_reduced)
scaled_test_X = standard_scaler.transform(test_X)
if settings['SVM']:
print "SVM"
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(scaled_train_X, y_train)
predicted_test_y = Linear_SVC.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training, 'SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append(( subset_no,number_of_training, 'SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SVM_RBF']:
print "SVM_RBF"
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(scaled_train_X, y_train)
predicted_test_y = L1_SVC_RBF_Selector.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training, 'SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((subset_no,number_of_training, 'SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SVM_POLY']:
print "SVM_POLY"
L1_SVC_POLY_Selector = SVC(C=1, kernel='poly').fit(scaled_train_X, train_y_reduced)
predicted_test_y = L1_SVC_POLY_Selector.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append(( subset_no, number_of_training,'SVM_POLY', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_POLY_Selector.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training,'SVM_POLY', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['Log']:
print "Log"
log_clf_l2 = sklearn.linear_model.LogisticRegression(C=1, penalty='l2')
log_clf_l2.fit(scaled_train_X, train_y_reduced)
predicted_test_y = log_clf_l2.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((subset_no,number_of_training, 'Log', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = log_clf_l2.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training,'Log', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
# direct deep learning
finetune_lr = settings['finetune_lr']
batch_size = settings['batch_size']
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
#pretrain_lr=0.001
pretrain_lr = settings['pretrain_lr']
training_epochs = cal_epochs(settings['training_interations'], x_train_minmax, batch_size = batch_size)
hidden_layers_sizes = settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
if settings['DL']:
print "direct deep learning"
sda = trainSda(x_train_minmax, y_train,
x_validation_minmax, y_validation,
x_test_minmax, test_y,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
test_predicted = sda.predict(x_test_minmax)
isTest = True; #new
analysis_scr.append((subset_no,number_of_training, 'DL', isTest) + tuple(performance_score(y_test, test_predicted).values()))
training_predicted = sda.predict(x_train_minmax)
isTest = False; #new
analysis_scr.append((subset_no,number_of_training, 'DL', isTest) + tuple(performance_score(y_train, training_predicted).values()))
####transformed original data####
x = train_X_reduced
a_MAE_original = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_original.transform(train_X_reduced)
new_x_test_minmax_A = a_MAE_original.transform(x_test_minmax)
standard_scaler = preprocessing.StandardScaler().fit(new_x_train_minmax_A)
new_x_train_scaled = standard_scaler.transform(new_x_train_minmax_A)
new_x_test_scaled = standard_scaler.transform(new_x_test_minmax_A)
new_x_train_combo = np.hstack((scaled_train_X, new_x_train_scaled))
new_x_test_combo = np.hstack((scaled_test_X, new_x_test_scaled))
if settings['SAE_SVM']:
# SAE_SVM
print 'SAE followed by SVM'
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(new_x_train_scaled, train_y_reduced)
predicted_test_y = Linear_SVC.predict(new_x_test_scaled)
isTest = True; #new
analysis_scr.append(( subset_no, number_of_training,'SAE_SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(new_x_train_scaled)
isTest = False; #new
analysis_scr.append(( subset_no, number_of_training,'SAE_SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_Log']:
print 'SAE followed by Log'
log_clf_l2 = sklearn.linear_model.LogisticRegression(C=1, penalty='l2')
log_clf_l2.fit(new_x_train_scaled, train_y_reduced)
predicted_test_y = log_clf_l2.predict(new_x_test_scaled)
isTest = True; #new
analysis_scr.append((subset_no,number_of_training, 'SAE_Log', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = log_clf_l2.predict(new_x_train_scaled)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training,'SAE_Log', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_SVM_RBF']:
# SAE_SVM
print 'SAE followed by SVM RBF'
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(new_x_train_scaled, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_scaled)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training, 'SAE_SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_scaled)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training, 'SAE_SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_SVM_POLY']:
# SAE_SVM
print 'SAE followed by SVM POLY'
L1_SVC_RBF_Selector = SVC(C=1, kernel='poly').fit(new_x_train_scaled, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_scaled)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training,'SAE_SVM_POLY', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_scaled)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training, 'SAE_SVM_POLY', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
#### separated transformed data ####
y_test = test_y
print 'deep learning using split network'
# get the new representation for A set. first 784-D
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
x = x_train_minmax[:, :x_train_minmax.shape[1]/2]
print "original shape for A", x.shape
a_MAE_A = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes = [x/2 for x in hidden_layers_sizes], corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(x_train_minmax[:, :x_train_minmax.shape[1]/2])
x = x_train_minmax[:, x_train_minmax.shape[1]/2:]
print "original shape for B", x.shape
a_MAE_B = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes = [x/2 for x in hidden_layers_sizes], corruption_levels=corruption_levels)
new_x_train_minmax_B = a_MAE_B.transform(x_train_minmax[:, x_train_minmax.shape[1]/2:])
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax[:, :x_test_minmax.shape[1]/2])
new_x_test_minmax_B = a_MAE_B.transform(x_test_minmax[:, x_test_minmax.shape[1]/2:])
new_x_validation_minmax_A = a_MAE_A.transform(x_validation_minmax[:, :x_validation_minmax.shape[1]/2])
new_x_validation_minmax_B = a_MAE_B.transform(x_validation_minmax[:, x_validation_minmax.shape[1]/2:])
new_x_train_minmax_whole = np.hstack((new_x_train_minmax_A, new_x_train_minmax_B))
new_x_test_minmax_whole = np.hstack((new_x_test_minmax_A, new_x_test_minmax_B))
new_x_validationt_minmax_whole = np.hstack((new_x_validation_minmax_A, new_x_validation_minmax_B))
standard_scaler = preprocessing.StandardScaler().fit(new_x_train_minmax_whole)
new_x_train_minmax_whole_scaled = standard_scaler.transform(new_x_train_minmax_whole)
new_x_test_minmax_whole_scaled = standard_scaler.transform(new_x_test_minmax_whole)
if settings['DL_S']:
# deep learning using split network
sda_transformed = trainSda(new_x_train_minmax_whole, y_train,
new_x_validationt_minmax_whole, y_validation ,
new_x_test_minmax_whole, y_test,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
predicted_test_y = sda_transformed.predict(new_x_test_minmax_whole)
y_test = test_y
isTest = True; #new
analysis_scr.append((subset_no, number_of_training,'DL_S', isTest) + tuple(performance_score(y_test, predicted_test_y, with_auc_score).values()))
training_predicted = sda_transformed.predict(new_x_train_minmax_whole)
isTest = False; #new
analysis_scr.append((subset_no,number_of_training, 'DL_S', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values()))
if settings['SAE_S_SVM']:
print 'SAE_S followed by SVM'
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(new_x_train_minmax_whole_scaled, train_y_reduced)
predicted_test_y = Linear_SVC.predict(new_x_test_minmax_whole_scaled)
isTest = True; #new
analysis_scr.append(( subset_no, number_of_training,'SAE_S_SVM', isTest) + tuple(performance_score(test_y, predicted_test_y, with_auc_score).values())) #new
predicted_train_y = Linear_SVC.predict(new_x_train_minmax_whole_scaled)
isTest = False; #new
analysis_scr.append(( subset_no,number_of_training, 'SAE_S_SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y, with_auc_score).values()))
if settings['SAE_S_SVM_RBF']:
print 'SAE S followed by SVM RBF'
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(new_x_train_minmax_whole_scaled, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_minmax_whole_scaled)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training, 'SAE_S_SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y, with_auc_score).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_minmax_whole_scaled)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training,'SAE_S_SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y, with_auc_score).values()))
if settings['SAE_S_SVM_POLY']:
# SAE_SVM
print 'SAE S followed by SVM POLY'
L1_SVC_RBF_Selector = SVC(C=1, kernel='poly').fit(new_x_train_minmax_whole_scaled, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_minmax_whole_scaled)
isTest = True; #new
analysis_scr.append((subset_no, number_of_training,'SAE_S_SVM_POLY', isTest) + tuple(performance_score(test_y, predicted_test_y, with_auc_score).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_minmax_whole_scaled)
isTest = False; #new
analysis_scr.append((subset_no, number_of_training,'SAE_S_SVM_POLY', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y, with_auc_score).values()))
report_name = 'DL_handwritten_digits' + '_size_'.join(map(str, hidden_layers_sizes)) + \
'_' + str(pretrain_lr) + '_' + str(finetune_lr) + '_' + \
'_' + str(settings['pretraining_interations']) + '_' + current_date
saveAsCsv(with_auc_score, report_name, performance_score(test_y, predicted_test_y, with_auc_score), analysis_scr)
return sda, a_MAE_original, a_MAE_A, a_MAE_B, analysis_scr
# In[9]:
sda, a_MAE_original, a_MAE_A, a_MAE_B, analysis_scr = run_models(settings)
# In[48]:
# save objects
sda, a_MAE_original, a_MAE_A, a_MAE_B, analysis_scr
with open('_'.join(map(str, settings['hidden_layers_sizes'])) +'_'.join(map(str, settings['corruption_levels']))+ '_' + current_date +'sda.pickle', 'wb') as handle:
pickle.dump(sda, handle)
with open('_'.join(map(str, settings['hidden_layers_sizes'])) +'_'.join(map(str, settings['corruption_levels']))+ '_' + current_date + 'a_MAE_original.pickle', 'wb') as handle:
pickle.dump(a_MAE_original, handle)
with open('_'.join(map(str, settings['hidden_layers_sizes'])) +'_'.join(map(str, settings['corruption_levels']))+ '_' + current_date + 'a_MAE_A.pickle', 'wb') as handle:
pickle.dump(a_MAE_A, handle)
with open('_'.join(map(str, settings['hidden_layers_sizes'])) +'_'.join(map(str, settings['corruption_levels']))+ '_' + current_date + 'a_MAE_B.pickle', 'wb') as handle:
pickle.dump(a_MAE_B, handle)
x = logging._handlers.copy()
for i in x:
log.removeHandler(i)
i.flush()
i.close()
# In[ ]:
# In[31]:
'''
weights_map_to_input_space = []
StackedNNobject = sda
image_dimension_x = 28*2
image_dimension_y = 28
if isinstance(StackedNNobject, SdA) or isinstance(StackedNNobject, MultipleAEs):
weights_product = StackedNNobject.dA_layers[0].W.get_value(borrow=True)
image = PIL.Image.fromarray(tile_raster_images(
X=weights_product.T,
img_shape=(image_dimension_x, image_dimension_y), tile_shape=(10, 10),
tile_spacing=(1, 1)))
sample_image_path = 'hidden_0_layer_weights.png'
image.save(sample_image_path)
weights_map_to_input_space.append(weights_product)
for i_layer in range(1, len(StackedNNobject.dA_layers)):
i_weigths = StackedNNobject.dA_layers[i_layer].W.get_value(borrow=True)
weights_product = np.dot(weights_product, i_weigths)
weights_map_to_input_space.append(weights_product)
image = PIL.Image.fromarray(tile_raster_images(
X=weights_product.T,
img_shape=(image_dimension_x, image_dimension_y), tile_shape=(10, 10),
tile_spacing=(1, 1)))
sample_image_path = 'hidden_'+ str(i_layer)+ '_layer_weights.png'
image.save(sample_image_path)
'''
# In[18]:
| gpl-2.0 |
cmoutard/mne-python | mne/parallel.py | 3 | 5045 | """Parallel util function
"""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: Simplified BSD
from .externals.six import string_types
import logging
import os
from . import get_config
from .utils import logger, verbose
from .fixes import _get_args
if 'MNE_FORCE_SERIAL' in os.environ:
_force_serial = True
else:
_force_serial = None
@verbose
def parallel_func(func, n_jobs, verbose=None, max_nbytes='auto'):
"""Return parallel instance with delayed function
Util function to use joblib only if available
Parameters
----------
func: callable
A function
n_jobs: int
Number of jobs to run in parallel
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
INFO or DEBUG will print parallel status, others will not.
max_nbytes : int, str, or None
Threshold on the minimum size of arrays passed to the workers that
triggers automated memmory mapping. Can be an int in Bytes,
or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays. Use 'auto' to
use the value set using mne.set_memmap_min_size.
Returns
-------
parallel: instance of joblib.Parallel or list
The parallel object
my_func: callable
func if not parallel or delayed(func)
n_jobs: int
Number of jobs >= 0
"""
# for a single job, we don't need joblib
if n_jobs == 1:
n_jobs = 1
my_func = func
parallel = list
return parallel, my_func, n_jobs
try:
from joblib import Parallel, delayed
except ImportError:
try:
from sklearn.externals.joblib import Parallel, delayed
except ImportError:
logger.warning('joblib not installed. Cannot run in parallel.')
n_jobs = 1
my_func = func
parallel = list
return parallel, my_func, n_jobs
# check if joblib is recent enough to support memmaping
p_args = _get_args(Parallel.__init__)
joblib_mmap = ('temp_folder' in p_args and 'max_nbytes' in p_args)
cache_dir = get_config('MNE_CACHE_DIR', None)
if isinstance(max_nbytes, string_types) and max_nbytes == 'auto':
max_nbytes = get_config('MNE_MEMMAP_MIN_SIZE', None)
if max_nbytes is not None:
if not joblib_mmap and cache_dir is not None:
logger.warning('"MNE_CACHE_DIR" is set but a newer version of '
'joblib is needed to use the memmapping pool.')
if joblib_mmap and cache_dir is None:
logger.info('joblib supports memapping pool but "MNE_CACHE_DIR" '
'is not set in MNE-Python config. To enable it, use, '
'e.g., mne.set_cache_dir(\'/tmp/shm\'). This will '
'store temporary files under /dev/shm and can result '
'in large memory savings.')
# create keyword arguments for Parallel
kwargs = {'verbose': 5 if logger.level <= logging.INFO else 0}
if joblib_mmap:
if cache_dir is None:
max_nbytes = None # disable memmaping
kwargs['temp_folder'] = cache_dir
kwargs['max_nbytes'] = max_nbytes
n_jobs = check_n_jobs(n_jobs)
parallel = Parallel(n_jobs, **kwargs)
my_func = delayed(func)
return parallel, my_func, n_jobs
def check_n_jobs(n_jobs, allow_cuda=False):
"""Check n_jobs in particular for negative values
Parameters
----------
n_jobs : int
The number of jobs.
allow_cuda : bool
Allow n_jobs to be 'cuda'. Default: False.
Returns
-------
n_jobs : int
The checked number of jobs. Always positive (or 'cuda' if
applicable.)
"""
if not isinstance(n_jobs, int):
if not allow_cuda:
raise ValueError('n_jobs must be an integer')
elif not isinstance(n_jobs, string_types) or n_jobs != 'cuda':
raise ValueError('n_jobs must be an integer, or "cuda"')
# else, we have n_jobs='cuda' and this is okay, so do nothing
elif _force_serial:
n_jobs = 1
logger.info('... MNE_FORCE_SERIAL set. Processing in forced '
'serial mode.')
elif n_jobs <= 0:
try:
import multiprocessing
n_cores = multiprocessing.cpu_count()
n_jobs = min(n_cores + n_jobs + 1, n_cores)
if n_jobs <= 0:
raise ValueError('If n_jobs has a negative value it must not '
'be less than the number of CPUs present. '
'You\'ve got %s CPUs' % n_cores)
except ImportError:
# only warn if they tried to use something other than 1 job
if n_jobs != 1:
logger.warning('multiprocessing not installed. Cannot run in '
'parallel.')
n_jobs = 1
return n_jobs
| bsd-3-clause |
dssg/wikienergy | disaggregator/weather.py | 1 | 13379 | """
.. module:: weather
:platform: Unix
:synopsis: Contains utilities for obtaining weather data and performing
temperature normalization. Also includes utilities for converting
temperatures to heating/cooling degree days.
.. moduleauthor:: Phil Ngo <ngo.phil@gmail.com>
.. moduleauthor:: Miguel Perez <miguel.a.perez4@gmail.com>
.. moduleauthor:: Stephen Suffian <stephen.suffian@gmail.com>
.. moduleauthor:: Sabina Tomkins <sabina.tomkins@gmail.com>
"""
import urllib2
import json
from datetime import datetime, timedelta, date
import collections
import pandas as pd
import numpy as np
import os
import utils
import ftplib
import StringIO
import gzip
def degree_day_regression(df, x_opt='both'):
'''
Function that runs the weather normalization regression on energy use data
df: dataframe that includes
use per day (upd)
heating degree days per day (hddpd)
cooling degree days per day (cddpd)
x_opt: options for the regression function
'hdd': run regression with just heating degree days
'cdd': run regression with just cooling degree days
'both' (default):
'''
if x_opt == 'hdd':
covar = {'HDD': df.hdd_per_day}
results = pd.ols(y=df.use_per_day, x = covar)
return pd.DataFrame([[results.beta[1], results.std_err[1],
results.beta[0], results.std_err[0],
results.r2, results.r2_adj, results.nobs ]],
columns = ['intercept', 'intercept_std_err',
'HDD', 'HDD_std_err',
'R2', 'R2_adj','N_reads'])
elif x_opt == 'cdd':
covar = {'CDD': df.cdd_per_day}
results = pd.ols(y=df.use_per_day, x = covar)
return pd.DataFrame([[results.beta[1], results.std_err[1],
results.beta[0], results.std_err[0],
results.r2, results.r2_adj, results.nobs]],
columns = ['intercept', 'intercept_std_err',
'CDD', 'CDD_std_err',
'R2', 'R2_adj','N_reads'])
elif x_opt == 'both':
covar = {'CDD': df.cdd_per_day, 'HDD': df.hdd_per_day}
results = pd.ols(y=df.use_per_day, x = covar)
return pd.DataFrame([[results.beta[2], results.std_err[2],
results.beta[0], results.std_err[0],
results.beta[1], results.std_err[1],
results.r2, results.r2_adj, results.nobs]],
columns = ['intercept', 'intercept_std_err',
'CDD', 'CDD_std_err',
'HDD','HDD_std_err',
'R2', 'R2_adj','N_reads'])
def get_hdd(ref_temp,df):
'''
Adds a column for heating degree days (converted from temp (F)).
'''
df['hdd']=ref_temp-df.temps
df['hdd'].loc[df.hdd<0]=0
df['hdd_cum']=df.hdd.cumsum()
return df
def get_cdd(ref_temp,df):
'''
Converts a temperature to HDD.
'''
df['cdd']=df.temps-ref_temp
df['cdd'].loc[df.cdd<0]=0
df['cdd_cum']=df.cdd.cumsum()
return df
def get_weather_data_as_df_from_zipcode(api_key,zipcode,start_date,end_date):
"""
Return a dataframe indexed by time containing hourly weather data.
Requires Weather underground api key.
"""
query_results = get_weather_data(api_key,"","",start_date,end_date,zipcode=zipcode)
temp_temps = pd.read_json(query_results)
temp_temps = _combine_date_time_and_index(temp_temps)
return _remove_low_outliers_df(temp_temps,'temp')
def get_weather_data_as_df(api_key,city,state,start_date,end_date):
"""
Return a dataframe indexed by time containing hourly weather data.
Requires Weather underground api key.
"""
query_results = get_weather_data(api_key,city,state,start_date,end_date)
temp_temps = pd.read_json(query_results)
temp_temps = _combine_date_time_and_index(temp_temps)
return _remove_low_outliers_df(temp_temps,'temp')
def get_weather_data(api_key,city,state,start_date,end_date,zipcode=None):
'''
Returns a json string given a city, state, and desired date (YYYYMMDD)
'''
if(start_date is not None and end_date is not None):
#format our date structure to pass to our http request
date_format = "%Y%m%d"
objects_list = []
print 'in weather function'
#count from start_date to end_date
num_days = (end_date - start_date).days
dates = start_date + timedelta(days=num_days)
formatted_dates = datetime.strftime(dates, date_format)
#create query which will iterate through desired weather period
if zipcode:
query = 'http://api.wunderground.com/api/'+ api_key +\
'/history_' + formatted_dates + '/q/' + zipcode + '.json'
else:
# use state and city
city=city.replace(" ","%20")
query = 'http://api.wunderground.com/api/'+ api_key +\
'/history_' + formatted_dates + '/q/' + state + '/' + city + '.json'
print "Weather query: {}".format(query)
#iterate through the number of days and query the api. dump json results every time
f = urllib2.urlopen(query)
#read query as a json string
json_string = f.read()
#parse/load json string
parsed_json = json.loads(json_string)
#Iterate through each json object and append it to an ordered dictionary
for i in parsed_json['history']['observations']:
d = collections.OrderedDict()
d['date'] = i['date']['mon'] + '/' + i['date']['mday'] + '/' + i['date']['year']
d['time'] = i['date']['pretty'][0:8]
d['temp'] = i['tempi']
d['conds'] = i['conds']
d['wdire'] = i['wdire']
d['wdird'] = i['wdird']
d['hail'] = i['hail']
d['thunder'] = i['thunder']
d['pressurei'] = i['pressurei']
d['snow'] = i['snow']
d['pressurem'] = i['pressurem']
d['fog'] = i['fog']
d['tornado'] = i['tornado']
d['hum'] = i['hum']
d['tempi'] = i['tempi']
d['tempm'] = i['tempm']
d['dewptm'] = i['dewptm']
d['dewpti'] = i['dewpti']
d['rain'] = i['rain']
d['visim'] = i['visi']
d['wspdi'] = i['wspdi']
d['wspdm'] = i['wspdm']
objects_list.append(d)
#dump the dictionary into a json object
j = json.dumps(objects_list)
#append our json object to a list for every day and return its data
return j
#If we just need the data for ONE day (pass None for end_date):
if(end_date is None):
start_date_str = datetime.strftime(start_date, date_format)
if zipcode:
query = 'http://api.wunderground.com/api/'+ api_key +\
'/history_' + start_date_str + '/q/' + zipcode + '.json'
else:
query = 'http://api.wunderground.com/api/'+ api_key +\
'/history_' + start_date_str + '/q/' + state + '/' + city + '.json'
f = urllib2.urlopen(query)
json_string = f.read()
parsed_json = json.loads(json_string)
objects_list = []
for i in parsed_json['history']['observations']:
d = collections.OrderedDict()
d['date'] = i['date']['mon'] + '/' + i['date']['mday'] + '/' + i['date']['year']
d['time'] = i['date']['pretty'][0:8]
d['temp'] = i['tempi']
d['conds'] = i['conds']
d['wdire'] = i['wdire']
d['wdird'] = i['wdird']
d['hail'] = i['hail']
d['thunder'] = i['thunder']
d['pressurei'] = i['pressurei']
d['snow'] = i['snow']
d['pressurem'] = i['pressurem']
d['fog'] = i['fog']
d['tornado'] = i['tornado']
d['hum'] = i['hum']
d['tempi'] = i['tempi']
d['tempm'] = i['tempm']
d['dewptm'] = i['dewptm']
d['dewpti'] = i['dewpti']
d['rain'] = i['rain']
d['visim'] = i['visi']
d['wspdi'] = i['wspdi']
d['wspdm'] = i['wspdm']
objects_list.append(d)
j = json.dumps(objects_list)
return j
class GSODWeatherSource:
def __init__(self,station_id,start_year,end_year):
if len(station_id) == 6:
# given station id is the six digit code, so need to get full name
gsod_station_index_filename = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(utils.__file__))),
'resources',
'GSOD-ISD_station_index.json')
with open(gsod_station_index_filename,'r') as f:
station_index = json.load(f)
# take first station in list
potential_station_ids = station_index[station_id]
else:
# otherwise, just use the given id
potential_station_ids = [station_id]
self._data = {}
ftp = ftplib.FTP("ftp.ncdc.noaa.gov")
ftp.login()
data = []
for year in xrange(start_year,end_year + 1):
string = StringIO.StringIO()
# not every station will be available in every year, so use the
# first one that works
for station_id in potential_station_ids:
try:
ftp.retrbinary('RETR /pub/data/gsod/{year}/{station_id}-{year}.op.gz'.format(station_id=station_id,year=year),string.write)
break
except (IOError,ftplib.error_perm):
pass
string.seek(0)
f = gzip.GzipFile(fileobj=string)
self._add_file(f)
string.close()
f.close()
ftp.quit()
def _add_file(self,f):
for line in f.readlines()[1:]:
columns=line.split()
self._data[columns[2]] = float(columns[3])
def get_weather_range(self,start,end):
temps = []
for days in range((end - start).days):
dt = start + timedelta(days=days)
temps.append(self._data.get(dt.strftime("%Y%m%d"),float("nan")))
return temps
def weather_normalize(trace,temperature,set_point):
'''
Returns a weather-normalized trace
'''
pass
def get_station_id_from_zip_code(zip_code,google_api_key,solar_api_key):
'''
Returns a station id given a zip code.
'''
[lat,lng]=get_lat_lng_from_zip_code(zip_code,google_api_key)
station_id=get_station_id_from_lat_lng(lat,lng,solar_api_key)
return station_id
def get_station_id_from_lat_lng(lat,lng,solar_api_key):
'''
Returns a station id given a lat long.
'''
f = urllib2.urlopen('http://developer.nrel.gov/api/solar/data_query/v1.json?api_key='+solar_api_key+'&lat='+str(lat)+'&lon='+str(lng))
json_string = f.read()
parsed_json = json.loads(json_string)
station_id_unicode=parsed_json['outputs']['tmy3']['id']
station_id=int(str.split(str(station_id_unicode),'-')[1])
return station_id
def get_lat_lng_from_zip_code(zip_code,google_api_key):
'''
Returns a lat long given a zip code.
'''
zip_code=zip_code.replace(' ','+')
zip_code=zip_code.replace(',','%2C')
f = urllib2.urlopen('https://maps.googleapis.com/maps/api/geocode/json?address='+zip_code+'&key='+google_api_key)
json_string = f.read()
parsed_json_lat_lng = json.loads(json_string)
lat=parsed_json_lat_lng['results'][0]['geometry']['location']['lat']
lng=parsed_json_lat_lng['results'][0]['geometry']['location']['lng']
return [lat,lng]
def _index_df_by_date(df):
df['date'] = pd.to_datetime(df['date'])
df.set_index('date', inplace=True)
df.index.snap() # snap to nearest frequency
def _combine_date_time_and_index(temp_df):
for i,date in enumerate(temp_df['date']):
hour_min=temp_df['time'][i].split(':')
hour=hour_min[0]
min_ampm=hour_min[1].split(' ')
minute=min_ampm[0]
if('PM' in min_ampm[1]):
hour=int(hour)+12
if(hour is 24):
hour=0
temp_df['date'][i]=date.replace(hour=int(hour),minute=int(minute))
_index_df_by_date(temp_df)
temp_df=temp_df.resample('H',how='mean')
return temp_df
def _remove_low_outliers_df(df,column_name):
'''
This removes weather outliers below -40 degrees. This is due to
inaccuracies in the weather API. This function requires the indexes
to be datetimes across a consistent time interval. It uses this time
interval to replace the outlier with its nearest neighbor.
'''
threshold = -40
outliers=df[column_name][(df[column_name] < threshold)].index
time_delta=df[column_name].index[1]-df[column_name].index[0]
offset=time_delta.seconds+time_delta.days*3600*24
a=0
for a,i in enumerate(outliers):
try: df[column_name][i]= df[column_name][i-pd.DateOffset(seconds=offset)]
except KeyError: df[column_name][i]= df[column_name][i+pd.DateOffset(seconds=offset)]
return df
| mit |
mjgrav2001/scikit-learn | examples/calibration/plot_calibration_multiclass.py | 272 | 6972 | """
==================================================
Probability Calibration for 3-class classification
==================================================
This example illustrates how sigmoid calibration changes predicted
probabilities for a 3-class classification problem. Illustrated is the
standard 2-simplex, where the three corners correspond to the three classes.
Arrows point from the probability vectors predicted by an uncalibrated
classifier to the probability vectors predicted by the same classifier after
sigmoid calibration on a hold-out validation set. Colors indicate the true
class of an instance (red: class 1, green: class 2, blue: class 3).
The base classifier is a random forest classifier with 25 base estimators
(trees). If this classifier is trained on all 800 training datapoints, it is
overly confident in its predictions and thus incurs a large log-loss.
Calibrating an identical classifier, which was trained on 600 datapoints, with
method='sigmoid' on the remaining 200 datapoints reduces the confidence of the
predictions, i.e., moves the probability vectors from the edges of the simplex
towards the center. This calibration results in a lower log-loss. Note that an
alternative would have been to increase the number of base estimators which
would have resulted in a similar decrease in log-loss.
"""
print(__doc__)
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import log_loss
np.random.seed(0)
# Generate data
X, y = make_blobs(n_samples=1000, n_features=2, random_state=42,
cluster_std=5.0)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:800], y[600:800]
X_train_valid, y_train_valid = X[:800], y[:800]
X_test, y_test = X[800:], y[800:]
# Train uncalibrated random forest classifier on whole train and validation
# data and evaluate on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
clf_probs = clf.predict_proba(X_test)
score = log_loss(y_test, clf_probs)
# Train random forest classifier, calibrate on validation data and evaluate
# on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
sig_clf.fit(X_valid, y_valid)
sig_clf_probs = sig_clf.predict_proba(X_test)
sig_score = log_loss(y_test, sig_clf_probs)
# Plot changes in predicted probabilities via arrows
plt.figure(0)
colors = ["r", "g", "b"]
for i in range(clf_probs.shape[0]):
plt.arrow(clf_probs[i, 0], clf_probs[i, 1],
sig_clf_probs[i, 0] - clf_probs[i, 0],
sig_clf_probs[i, 1] - clf_probs[i, 1],
color=colors[y_test[i]], head_width=1e-2)
# Plot perfect predictions
plt.plot([1.0], [0.0], 'ro', ms=20, label="Class 1")
plt.plot([0.0], [1.0], 'go', ms=20, label="Class 2")
plt.plot([0.0], [0.0], 'bo', ms=20, label="Class 3")
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
# Annotate points on the simplex
plt.annotate(r'($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)',
xy=(1.0/3, 1.0/3), xytext=(1.0/3, .23), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.plot([1.0/3], [1.0/3], 'ko', ms=5)
plt.annotate(r'($\frac{1}{2}$, $0$, $\frac{1}{2}$)',
xy=(.5, .0), xytext=(.5, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $\frac{1}{2}$, $\frac{1}{2}$)',
xy=(.0, .5), xytext=(.1, .5), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($\frac{1}{2}$, $\frac{1}{2}$, $0$)',
xy=(.5, .5), xytext=(.6, .6), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $0$, $1$)',
xy=(0, 0), xytext=(.1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($1$, $0$, $0$)',
xy=(1, 0), xytext=(1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $1$, $0$)',
xy=(0, 1), xytext=(.1, 1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
# Add grid
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Change of predicted probabilities after sigmoid calibration")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.legend(loc="best")
print("Log-loss of")
print(" * uncalibrated classifier trained on 800 datapoints: %.3f "
% score)
print(" * classifier trained on 600 datapoints and calibrated on "
"200 datapoint: %.3f" % sig_score)
# Illustrate calibrator
plt.figure(1)
# generate grid over 2-simplex
p1d = np.linspace(0, 1, 20)
p0, p1 = np.meshgrid(p1d, p1d)
p2 = 1 - p0 - p1
p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()]
p = p[p[:, 2] >= 0]
calibrated_classifier = sig_clf.calibrated_classifiers_[0]
prediction = np.vstack([calibrator.predict(this_p)
for calibrator, this_p in
zip(calibrated_classifier.calibrators_, p.T)]).T
prediction /= prediction.sum(axis=1)[:, None]
# Ploit modifications of calibrator
for i in range(prediction.shape[0]):
plt.arrow(p[i, 0], p[i, 1],
prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1],
head_width=1e-2, color=colors[np.argmax(p[i])])
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Illustration of sigmoid calibrator")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.show()
| bsd-3-clause |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/sklearn/ensemble/partial_dependence.py | 251 | 15097 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| mit |
whn09/tensorflow | tensorflow/python/estimator/inputs/inputs.py | 94 | 1290 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility methods to create simple input_fns."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long
from tensorflow.python.estimator.inputs.numpy_io import numpy_input_fn
from tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn
from tensorflow.python.util.all_util import remove_undocumented
# pylint: enable=unused-import,line-too-long
_allowed_symbols = [
'numpy_input_fn',
'pandas_input_fn'
]
remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
| apache-2.0 |
nirum/jetpack | jetpack/animation.py | 2 | 2567 | """Animation."""
from matplotlib import animation
import matplotlib.pyplot as plt
import numpy as np
from moviepy.editor import VideoClip
from moviepy.video.io.bindings import mplfig_to_npimage
from .chart_utils import plotwrapper, noticks
__all__ = ['save_movie', 'play', 'save_frames']
def save_movie(make_frame, duration, filename, fps=20):
"""Writes an animation to disk."""
anim = VideoClip(make_frame, duration=duration)
if filename.endswith('.gif'):
anim.write_gif(filename, fps=fps)
elif filename.endswith('.mp4'):
anim.write_videofile(filename, fps=fps)
else:
raise ValueError(f'Invalid file type for {filename}. Must be .gif or .mp4')
return anim
@plotwrapper
def play(frames, repeat=True, fps=15, cmap='seismic_r', clim=None, **kwargs):
"""Plays the stack of frames as a movie"""
fig = kwargs['fig']
ax = kwargs['ax']
img = ax.imshow(frames[0], aspect='equal')
noticks(ax=ax)
# Set up the colors
img.set_cmap(cmap)
img.set_interpolation('nearest')
if clim is None:
maxval = np.max(np.abs(frames))
img.set_clim([-maxval, maxval])
else:
img.set_clim(clim)
# Animation function (called sequentially)
def animate(i):
# ax.set_title('Frame {0:#d}'.format(i + 1))
img.set_data(frames[i])
# Call the animator
dt = 1000 / fps
anim = animation.FuncAnimation(fig,
animate,
np.arange(frames.shape[0]),
interval=dt,
repeat=repeat)
return anim
@plotwrapper
def save_frames(frames,
filename,
cmap='seismic_r',
T=None,
clim=None,
fps=15,
figsize=None,
**kwargs):
"""Saves the stack of frames as a movie"""
fig = kwargs['fig']
ax = kwargs['ax']
# total length
if T is None:
T = frames.shape[0]
X = frames.copy()
img = ax.imshow(X[0])
ax.axis('off')
ax.set_aspect('equal')
ax.set_xlim(0, X.shape[1])
ax.set_ylim(0, X.shape[2])
ax.set_xticks([])
ax.set_yticks([])
# Set up the colors
img.set_cmap(cmap)
img.set_interpolation('nearest')
if clim is None:
maxval = np.max(np.abs(X))
img.set_clim([-maxval, maxval])
else:
img.set_clim(clim)
plt.show()
plt.draw()
dt = 1 / fps
def animate(t):
i = np.mod(int(t / dt), T)
# ax.set_title(f't={i*0.01:2.2f} s')
img.set_data(X[i])
return mplfig_to_npimage(fig)
save_movie(animate, T * dt, filename, fps=fps)
| mit |
hainm/statsmodels | statsmodels/sandbox/tools/try_mctools.py | 34 | 1944 | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 30 15:20:45 2011
@author: josef
"""
from statsmodels.compat.python import lrange
import numpy as np
from scipy import stats
from statsmodels.sandbox.tools.mctools import StatTestMC
from statsmodels.sandbox.stats.diagnostic import (
acorr_ljungbox, unitroot_adf)
def normalnoisesim(nobs=500, loc=0.0):
return (loc+np.random.randn(nobs))
def lb(x):
s,p = acorr_ljungbox(x, lags=4)
return np.r_[s, p]
mc1 = StatTestMC(normalnoisesim, lb)
mc1.run(5000, statindices=lrange(4))
print(mc1.summary_quantiles([1,2,3], stats.chi2([2,3,4]).ppf,
varnames=['lag 1', 'lag 2', 'lag 3'],
title='acorr_ljungbox'))
print('\n\n')
frac = [0.01, 0.025, 0.05, 0.1, 0.975]
crit = stats.chi2([2,3,4]).ppf(np.atleast_2d(frac).T)
print(mc1.summary_cdf([1,2,3], frac, crit,
varnames=['lag 1', 'lag 2', 'lag 3'],
title='acorr_ljungbox'))
print(mc1.cdf(crit, [1,2,3])[1])
#----------------------
def randwalksim(nobs=500, drift=0.0):
return (drift+np.random.randn(nobs)).cumsum()
def adf20(x):
return unitroot_adf(x, 2, trendorder=0, autolag=None)
print(adf20(np.random.randn(100)))
mc2 = StatTestMC(randwalksim, adf20)
mc2.run(10000, statindices=[0,1])
frac = [0.01, 0.05, 0.1]
#bug
crit = np.array([-3.4996365338407074, -2.8918307730370025, -2.5829283377617176])[:,None]
print(mc2.summary_cdf([0], frac, crit,
varnames=['adf'],
title='adf'))
#bug
#crit2 = np.column_stack((crit, frac))
#print mc2.summary_cdf([0, 1], frac, crit,
# varnames=['adf'],
# title='adf')
print(mc2.quantiles([0]))
print(mc2.cdf(crit, [0]))
doplot=1
if doplot:
import matplotlib.pyplot as plt
mc1.plot_hist([3],stats.chi2([4]).pdf)
plt.title('acorr_ljungbox - MC versus chi2')
plt.show()
| bsd-3-clause |
bencebecsy/galaxy-priors | make_glade_histograms.py | 1 | 4595 | #!/usr/bin/python
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
DPI=300
infile=sys.argv[1]
glade = np.load(infile)
#filter
filt = range(glade.shape[0])
#print glade.shape[0]
#columns:(0) PGC number (1) GWGC name (2) HyperLEDA name (3) 2MASS name (4) SDSS-DR12 name
# (5) flag1 Q/G (6) RA [deg] (7) dec [deg] (8) dist [Mpc] (9) dist_err [Mpc]
# (10) z (11) apparent B mag (12) B_err (13) apparent J mag (14) J_err (15) apparent H mag
# (16) H_err (17) apparent K mag (18) K_err (19) flag2 0: no measured dist or z 1: measured z 2: measured dist
# (20) flag3 0: measured B mag 1: ML estimated B mag (21) B_err_min (22) B_err_max
# (23) flag4 0: z from measured dist 1: ML estimated z (24) z_err_min (25) z_err_max
ra = glade[filt,6].astype(float)
dec = glade[filt,7].astype(float)
dist = glade[filt,8].astype(float)
b = glade[filt,11].astype(float)
j = glade[filt,13].astype(float)
h = glade[filt,15].astype(float)
k = glade[filt,17].astype(float)
print dist.dtype
#################################
#DISTANCE
#################################
dist = dist[~np.isnan(dist)]
print dist.size
plt.figure(0)
plt.hist(dist, bins=100)
plt.title("Mean=%.3f, RMS=%.3f, Number of points=%d"%(np.mean(dist), np.sqrt(np.mean(dist**2)), dist.size))
plt.xlabel("Distance [Mpc]")
plt.ylabel("#")
plt.gca().set_yscale('log', nonposy='clip')
plt.savefig('glade_hist_dist_linlog.png', dpi=DPI)
#log-log plot
plt.figure(7)
plt.hist(dist, bins=np.logspace(0 ,np.log10(np.amax(dist)), 100))
plt.title("Mean=%.3f, RMS=%.3f, Number of points=%d"%(np.mean(dist), np.sqrt(np.mean(dist**2)), dist.size))
plt.xlabel("Distance [Mpc]")
plt.ylabel("#")
plt.gca().set_xscale('log')
plt.gca().set_yscale('log', nonposy='clip')
plt.savefig('glade_hist_dist_loglog.png', dpi=DPI)
plt.figure(8)
plt.hist(dist, bins=100, range=(dist.min(),100.0))
plt.title("Mean=%.3f, RMS=%.3f, Number of points=%d"%(np.mean(dist), np.sqrt(np.mean(dist**2)), dist.size))
plt.xlabel("Distance [Mpc]")
plt.ylabel("#")
plt.gca().set_yscale('log', nonposy='clip')
plt.savefig('glade_hist_dist_linlog_zoom.png', dpi=DPI)
#################################
#RA
#################################
ra = ra[~np.isnan(ra)]
print ra.size
plt.figure(1)
plt.hist(ra, bins=100)
plt.title("Mean=%.3f, RMS=%.3f, Number of points=%d"%(np.mean(ra), np.sqrt(np.mean(ra**2)), ra.size))
plt.xlabel("RA [deg]")
plt.ylabel("#")
#plt.gca().set_yscale('log')
plt.savefig('glade_hist_ra.png', dpi=DPI)
#################################
#DEC
#################################
dec = dec[~np.isnan(dec)]
print dec.size
plt.figure(2)
plt.hist(dec, bins=100)
plt.title("Mean=%.3f, RMS=%.3f, Number of points=%d"%(np.mean(dec), np.sqrt(np.mean(dec**2)), dec.size))
plt.xlabel("Dec [deg]")
plt.ylabel("#")
plt.gca().set_yscale('log', nonposy='clip')
plt.savefig('glade_hist_dec.png', dpi=DPI)
#################################
#B MAGNITUDE (lambda_middle = 365 nm)
#################################
b = b[~np.isnan(b)]
print b.size
plt.figure(3)
plt.hist(b, bins=100)
plt.title("Mean=%.3f, RMS=%.3f, Number of points=%d"%(np.mean(b), np.sqrt(np.mean(b**2)), b.size))
plt.xlabel("Apparent B magnitude")
plt.ylabel("#")
plt.gca().set_yscale('log', nonposy='clip')
plt.savefig('glade_hist_b.png', dpi=DPI)
#################################
#J MAGNITUDE (lambda_middle = 1220 nm)
#################################
j = j[~np.isnan(j)]
print j.size
plt.figure(4)
plt.hist(j, bins=100)
plt.title("Mean=%.3f, RMS=%.3f, Number of points=%d"%(np.mean(j), np.sqrt(np.mean(j**2)), j.size))
plt.xlabel("Apparent J magnitude")
plt.ylabel("#")
plt.gca().set_yscale('log', nonposy='clip')
plt.savefig('glade_hist_j.png', dpi=DPI)
#################################
#H MAGNITUDE (lambda_middle = 1630 nm)
#################################
h = h[~np.isnan(h)]
print h.size
plt.figure(5)
plt.hist(h, bins=100)
plt.title("Mean=%.3f, RMS=%.3f, Number of points=%d"%(np.mean(h), np.sqrt(np.mean(h**2)), h.size))
plt.xlabel("Apparent H magnitude")
plt.ylabel("#")
plt.gca().set_yscale('log', nonposy='clip')
plt.savefig('glade_hist_h.png', dpi=DPI)
#################################
#K MAGNITUDE (lambda_middle = 2190 nm)
#################################
k = k[~np.isnan(k)]
print k.size
plt.figure(6)
plt.hist(k, bins=100)
plt.title("Mean=%.3f, RMS=%.3f, Number of points=%d"%(np.mean(k), np.sqrt(np.mean(k**2)), k.size))
plt.xlabel("Apparent K magnitude")
plt.ylabel("#")
plt.gca().set_yscale('log', nonposy='clip')
plt.savefig('glade_hist_k.png', dpi=DPI)
| mit |
TNT-Samuel/Coding-Projects | DNS Server/Source - Copy/Lib/site-packages/dask/dataframe/hyperloglog.py | 7 | 2515 | # -*- coding: utf-8 -*-
u"""Implementation of HyperLogLog
This implements the HyperLogLog algorithm for cardinality estimation, found
in
Philippe Flajolet, Éric Fusy, Olivier Gandouet and Frédéric Meunier.
"HyperLogLog: the analysis of a near-optimal cardinality estimation
algorithm". 2007 Conference on Analysis of Algorithms. Nice, France
(2007)
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
from .hashing import hash_pandas_object
def compute_first_bit(a):
"Compute the position of the first nonzero bit for each int in an array."
# TODO: consider making this less memory-hungry
bits = np.bitwise_and.outer(a, 1 << np.arange(32))
bits = bits.cumsum(axis=1).astype(np.bool)
return 33 - bits.sum(axis=1)
def compute_hll_array(obj, b):
# b is the number of bits
if not 8 <= b <= 16:
raise ValueError('b should be between 8 and 16')
num_bits_discarded = 32 - b
m = 1 << b
# Get an array of the hashes
hashes = hash_pandas_object(obj, index=False)
if isinstance(hashes, pd.Series):
hashes = hashes._values
hashes = hashes.astype(np.uint32)
# Of the first b bits, which is the first nonzero?
j = hashes >> num_bits_discarded
first_bit = compute_first_bit(hashes)
# Pandas can do the max aggregation
df = pd.DataFrame({'j': j, 'first_bit': first_bit})
series = df.groupby('j').max()['first_bit']
# Return a dense array so we can concat them and get a result
# that is easy to deal with
return series.reindex(np.arange(m), fill_value=0).values.astype(np.uint8)
def reduce_state(Ms, b):
m = 1 << b
# We concatenated all of the states, now we need to get the max
# value for each j in both
Ms = Ms.reshape((len(Ms) // m), m)
return Ms.max(axis=0)
def estimate_count(Ms, b):
m = 1 << b
# Combine one last time
M = reduce_state(Ms, b)
# Estimate cardinality, no adjustments
alpha = 0.7213 / (1 + 1.079 / m)
E = alpha * m / (2.0 ** -M.astype('f8')).sum() * m
# ^^^^ starts as unsigned, need a signed type for
# negation operator to do something useful
# Apply adjustments for small / big cardinalities, if applicable
if E < 2.5 * m:
V = (M == 0).sum()
if V:
return m * np.log(m / V)
if E > 2**32 / 30.0:
return -2**32 * np.log1p(-E / 2**32)
return E
| gpl-3.0 |
endolith/scikit-image | doc/ext/notebook_doc.py | 44 | 3042 | __all__ = ['python_to_notebook', 'Notebook']
import json
import copy
import warnings
# Skeleton notebook in JSON format
skeleton_nb = """{
"metadata": {
"name":""
},
"nbformat": 3,
"nbformat_minor": 0,
"worksheets": [
{
"cells": [
{
"cell_type": "code",
"collapsed": false,
"input": [
"%matplotlib inline"
],
"language": "python",
"metadata": {},
"outputs": []
}
],
"metadata": {}
}
]
}"""
class Notebook(object):
"""
Notebook object for building an IPython notebook cell-by-cell.
"""
def __init__(self):
# cell type code
self.cell_code = {
'cell_type': 'code',
'collapsed': False,
'input': [
'# Code Goes Here'
],
'language': 'python',
'metadata': {},
'outputs': []
}
# cell type markdown
self.cell_md = {
'cell_type': 'markdown',
'metadata': {},
'source': [
'Markdown Goes Here'
]
}
self.template = json.loads(skeleton_nb)
self.cell_type = {'input': self.cell_code, 'source': self.cell_md}
self.valuetype_to_celltype = {'code': 'input', 'markdown': 'source'}
def add_cell(self, value, cell_type='code'):
"""Add a notebook cell.
Parameters
----------
value : str
Cell content.
cell_type : {'code', 'markdown'}
Type of content (default is 'code').
"""
if cell_type in ['markdown', 'code']:
key = self.valuetype_to_celltype[cell_type]
cells = self.template['worksheets'][0]['cells']
cells.append(copy.deepcopy(self.cell_type[key]))
# assign value to the last cell
cells[-1][key] = value
else:
warnings.warn('Ignoring unsupported cell type (%s)' % cell_type)
def json(self):
"""Return a JSON representation of the notebook.
Returns
-------
str
JSON notebook.
"""
return json.dumps(self.template, indent=2)
def test_notebook_basic():
nb = Notebook()
assert(json.loads(nb.json()) == json.loads(skeleton_nb))
def test_notebook_add():
nb = Notebook()
str1 = 'hello world'
str2 = 'f = lambda x: x * x'
nb.add_cell(str1, cell_type='markdown')
nb.add_cell(str2, cell_type='code')
d = json.loads(nb.json())
cells = d['worksheets'][0]['cells']
values = [c['input'] if c['cell_type'] == 'code' else c['source']
for c in cells]
assert values[1] == str1
assert values[2] == str2
assert cells[1]['cell_type'] == 'markdown'
assert cells[2]['cell_type'] == 'code'
if __name__ == "__main__":
import numpy.testing as npt
npt.run_module_suite()
| bsd-3-clause |
rnowling/aranyani | setup.py | 2 | 1136 | """
Copyright 2020 Ronald J. Nowling
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from setuptools import setup
setup(name="asaph",
version=2.0,
description="SNP analysis",
author="Ronald J. Nowling",
author_email="rnowling@gmail.com",
license="Apache License, Version 2.0",
zip_safe=False,
packages=["asaph"],
python_requires=">=3.6",
install_requires = ["numpy>=0.19.1", "scipy>=0.19.1", "matplotlib", "seaborn", "sklearn", "joblib", "pandas"],
scripts=["bin/asaph_pca", "bin/asaph_query",
"bin/asaph_detect_and_localize", "bin/asaph_genotype", "bin/asaph_generate_data"])
| apache-2.0 |
heli522/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 218 | 3893 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
aje/POT | examples/plot_otda_classes.py | 2 | 4352 | # -*- coding: utf-8 -*-
"""
========================
OT for domain adaptation
========================
This example introduces a domain adaptation in a 2D setting and the 4 OTDA
approaches currently supported in POT.
"""
# Authors: Remi Flamary <remi.flamary@unice.fr>
# Stanislas Chambon <stan.chambon@gmail.com>
#
# License: MIT License
import matplotlib.pylab as pl
import ot
##############################################################################
# Generate data
# -------------
n_source_samples = 150
n_target_samples = 150
Xs, ys = ot.datasets.get_data_classif('3gauss', n_source_samples)
Xt, yt = ot.datasets.get_data_classif('3gauss2', n_target_samples)
##############################################################################
# Instantiate the different transport algorithms and fit them
# -----------------------------------------------------------
# EMD Transport
ot_emd = ot.da.EMDTransport()
ot_emd.fit(Xs=Xs, Xt=Xt)
# Sinkhorn Transport
ot_sinkhorn = ot.da.SinkhornTransport(reg_e=1e-1)
ot_sinkhorn.fit(Xs=Xs, Xt=Xt)
# Sinkhorn Transport with Group lasso regularization
ot_lpl1 = ot.da.SinkhornLpl1Transport(reg_e=1e-1, reg_cl=1e0)
ot_lpl1.fit(Xs=Xs, ys=ys, Xt=Xt)
# Sinkhorn Transport with Group lasso regularization l1l2
ot_l1l2 = ot.da.SinkhornL1l2Transport(reg_e=1e-1, reg_cl=2e0, max_iter=20,
verbose=True)
ot_l1l2.fit(Xs=Xs, ys=ys, Xt=Xt)
# transport source samples onto target samples
transp_Xs_emd = ot_emd.transform(Xs=Xs)
transp_Xs_sinkhorn = ot_sinkhorn.transform(Xs=Xs)
transp_Xs_lpl1 = ot_lpl1.transform(Xs=Xs)
transp_Xs_l1l2 = ot_l1l2.transform(Xs=Xs)
##############################################################################
# Fig 1 : plots source and target samples
# ---------------------------------------
pl.figure(1, figsize=(10, 5))
pl.subplot(1, 2, 1)
pl.scatter(Xs[:, 0], Xs[:, 1], c=ys, marker='+', label='Source samples')
pl.xticks([])
pl.yticks([])
pl.legend(loc=0)
pl.title('Source samples')
pl.subplot(1, 2, 2)
pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples')
pl.xticks([])
pl.yticks([])
pl.legend(loc=0)
pl.title('Target samples')
pl.tight_layout()
##############################################################################
# Fig 2 : plot optimal couplings and transported samples
# ------------------------------------------------------
param_img = {'interpolation': 'nearest', 'cmap': 'spectral'}
pl.figure(2, figsize=(15, 8))
pl.subplot(2, 4, 1)
pl.imshow(ot_emd.coupling_, **param_img)
pl.xticks([])
pl.yticks([])
pl.title('Optimal coupling\nEMDTransport')
pl.subplot(2, 4, 2)
pl.imshow(ot_sinkhorn.coupling_, **param_img)
pl.xticks([])
pl.yticks([])
pl.title('Optimal coupling\nSinkhornTransport')
pl.subplot(2, 4, 3)
pl.imshow(ot_lpl1.coupling_, **param_img)
pl.xticks([])
pl.yticks([])
pl.title('Optimal coupling\nSinkhornLpl1Transport')
pl.subplot(2, 4, 4)
pl.imshow(ot_l1l2.coupling_, **param_img)
pl.xticks([])
pl.yticks([])
pl.title('Optimal coupling\nSinkhornL1l2Transport')
pl.subplot(2, 4, 5)
pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o',
label='Target samples', alpha=0.3)
pl.scatter(transp_Xs_emd[:, 0], transp_Xs_emd[:, 1], c=ys,
marker='+', label='Transp samples', s=30)
pl.xticks([])
pl.yticks([])
pl.title('Transported samples\nEmdTransport')
pl.legend(loc="lower left")
pl.subplot(2, 4, 6)
pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o',
label='Target samples', alpha=0.3)
pl.scatter(transp_Xs_sinkhorn[:, 0], transp_Xs_sinkhorn[:, 1], c=ys,
marker='+', label='Transp samples', s=30)
pl.xticks([])
pl.yticks([])
pl.title('Transported samples\nSinkhornTransport')
pl.subplot(2, 4, 7)
pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o',
label='Target samples', alpha=0.3)
pl.scatter(transp_Xs_lpl1[:, 0], transp_Xs_lpl1[:, 1], c=ys,
marker='+', label='Transp samples', s=30)
pl.xticks([])
pl.yticks([])
pl.title('Transported samples\nSinkhornLpl1Transport')
pl.subplot(2, 4, 8)
pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o',
label='Target samples', alpha=0.3)
pl.scatter(transp_Xs_l1l2[:, 0], transp_Xs_l1l2[:, 1], c=ys,
marker='+', label='Transp samples', s=30)
pl.xticks([])
pl.yticks([])
pl.title('Transported samples\nSinkhornL1l2Transport')
pl.tight_layout()
pl.show()
| mit |
huanzhang12/LightGBM | tests/python_package_test/test_engine.py | 1 | 19707 | # coding: utf-8
# pylint: skip-file
import copy
import math
import os
import unittest
import lightgbm as lgb
import random
import numpy as np
from sklearn.datasets import (load_boston, load_breast_cancer, load_digits,
load_iris, load_svmlight_file)
from sklearn.metrics import log_loss, mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split, TimeSeriesSplit
try:
import pandas as pd
IS_PANDAS_INSTALLED = True
except ImportError:
IS_PANDAS_INSTALLED = False
try:
import cPickle as pickle
except ImportError:
import pickle
def multi_logloss(y_true, y_pred):
return np.mean([-math.log(y_pred[i][y]) for i, y in enumerate(y_true)])
class TestEngine(unittest.TestCase):
def test_binary(self):
X, y = load_breast_cancer(True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=50,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
ret = log_loss(y_test, gbm.predict(X_test))
self.assertLess(ret, 0.15)
self.assertAlmostEqual(evals_result['valid_0']['binary_logloss'][-1], ret, places=5)
def test_rf(self):
X, y = load_breast_cancer(True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'boosting_type': 'rf',
'objective': 'binary',
'bagging_freq': 1,
'bagging_fraction': 0.5,
'feature_fraction': 0.5,
'num_leaves': 50,
'metric': 'binary_logloss',
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=50,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
ret = log_loss(y_test, gbm.predict(X_test))
self.assertLess(ret, 0.25)
self.assertAlmostEqual(evals_result['valid_0']['binary_logloss'][-1], ret, places=5)
def test_regreesion(self):
X, y = load_boston(True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'metric': 'l2',
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=50,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
ret = mean_squared_error(y_test, gbm.predict(X_test))
self.assertLess(ret, 16)
self.assertAlmostEqual(evals_result['valid_0']['l2'][-1], ret, places=5)
def test_missing_value_handle(self):
X_train = np.zeros((1000, 1))
y_train = np.zeros(1000)
trues = random.sample(range(1000), 200)
for idx in trues:
X_train[idx, 0] = np.nan
y_train[idx] = 1
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'metric': 'l2',
'verbose': -1,
'boost_from_average': False
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=20,
valid_sets=lgb_eval,
verbose_eval=True,
evals_result=evals_result)
ret = mean_squared_error(y_train, gbm.predict(X_train))
self.assertLess(ret, 0.005)
self.assertAlmostEqual(evals_result['valid_0']['l2'][-1], ret, places=5)
def test_missing_value_handle_na(self):
x = [0, 1, 2, 3, 4, 5, 6, 7, np.nan]
y = [1, 1, 1, 1, 0, 0, 0, 0, 1]
X_train = np.array(x).reshape(len(x), 1)
y_train = np.array(y)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'objective': 'binary',
'metric': 'auc',
'verbose': -1,
'boost_from_average': False,
'min_data': 1,
'num_leaves': 2,
'learning_rate': 1,
'min_data_in_bin': 1,
'zero_as_missing': False
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=1,
valid_sets=lgb_eval,
verbose_eval=True,
evals_result=evals_result)
pred = gbm.predict(X_train)
self.assertAlmostEqual(pred[-1], pred[0], places=5)
def test_missing_value_handle_zero(self):
x = [0, 1, 2, 3, 4, 5, 6, 7, np.nan]
y = [0, 1, 1, 1, 0, 0, 0, 0, 0]
X_train = np.array(x).reshape(len(x), 1)
y_train = np.array(y)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'objective': 'binary',
'metric': 'auc',
'verbose': -1,
'boost_from_average': False,
'min_data': 1,
'num_leaves': 2,
'learning_rate': 1,
'min_data_in_bin': 1,
'zero_as_missing': True
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=1,
valid_sets=lgb_eval,
verbose_eval=True,
evals_result=evals_result)
pred = gbm.predict(X_train)
self.assertAlmostEqual(pred[-1], pred[-2], places=5)
self.assertAlmostEqual(pred[-1], pred[0], places=5)
def test_missing_value_handle_none(self):
x = [0, 1, 2, 3, 4, 5, 6, 7, np.nan]
y = [0, 1, 1, 1, 0, 0, 0, 0, 0]
X_train = np.array(x).reshape(len(x), 1)
y_train = np.array(y)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_train, y_train)
params = {
'objective': 'binary',
'metric': 'auc',
'verbose': -1,
'boost_from_average': False,
'min_data': 1,
'num_leaves': 2,
'learning_rate': 1,
'min_data_in_bin': 1,
'use_missing': False
}
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=1,
valid_sets=lgb_eval,
verbose_eval=True,
evals_result=evals_result)
pred = gbm.predict(X_train)
self.assertAlmostEqual(pred[0], pred[1], places=5)
self.assertAlmostEqual(pred[-1], pred[0], places=5)
def test_multiclass(self):
X, y = load_digits(10, True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'multiclass',
'metric': 'multi_logloss',
'num_class': 10,
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train, params=params)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=50,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
ret = multi_logloss(y_test, gbm.predict(X_test))
self.assertLess(ret, 0.2)
self.assertAlmostEqual(evals_result['valid_0']['multi_logloss'][-1], ret, places=5)
def test_multiclass_prediction_early_stopping(self):
X, y = load_digits(10, True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'multiclass',
'metric': 'multi_logloss',
'num_class': 10,
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train, params=params)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=50,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result)
pred_parameter = {"pred_early_stop": True, "pred_early_stop_freq": 5, "pred_early_stop_margin": 1.5}
ret = multi_logloss(y_test, gbm.predict(X_test, pred_parameter=pred_parameter))
self.assertLess(ret, 0.8)
self.assertGreater(ret, 0.5) # loss will be higher than when evaluating the full model
pred_parameter = {"pred_early_stop": True, "pred_early_stop_freq": 5, "pred_early_stop_margin": 5.5}
ret = multi_logloss(y_test, gbm.predict(X_test, pred_parameter=pred_parameter))
self.assertLess(ret, 0.2)
def test_early_stopping(self):
X, y = load_breast_cancer(True)
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'verbose': -1
}
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
valid_set_name = 'valid_set'
# no early stopping
gbm = lgb.train(params, lgb_train,
num_boost_round=10,
valid_sets=lgb_eval,
valid_names=valid_set_name,
verbose_eval=False,
early_stopping_rounds=5)
self.assertEqual(gbm.best_iteration, 0)
self.assertIn(valid_set_name, gbm.best_score)
self.assertIn('binary_logloss', gbm.best_score[valid_set_name])
# early stopping occurs
gbm = lgb.train(params, lgb_train,
valid_sets=lgb_eval,
valid_names=valid_set_name,
verbose_eval=False,
early_stopping_rounds=5)
self.assertLessEqual(gbm.best_iteration, 100)
self.assertIn(valid_set_name, gbm.best_score)
self.assertIn('binary_logloss', gbm.best_score[valid_set_name])
def test_continue_train_and_dump_model(self):
X, y = load_boston(True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'regression',
'metric': 'l1',
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train, free_raw_data=False)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, free_raw_data=False)
init_gbm = lgb.train(params, lgb_train, num_boost_round=20)
model_name = 'model.txt'
init_gbm.save_model(model_name)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=30,
valid_sets=lgb_eval,
verbose_eval=False,
# test custom eval metrics
feval=(lambda p, d: ('mae', mean_absolute_error(p, d.get_label()), False)),
evals_result=evals_result,
init_model='model.txt')
ret = mean_absolute_error(y_test, gbm.predict(X_test))
self.assertLess(ret, 3.5)
self.assertAlmostEqual(evals_result['valid_0']['l1'][-1], ret, places=5)
for l1, mae in zip(evals_result['valid_0']['l1'], evals_result['valid_0']['mae']):
self.assertAlmostEqual(l1, mae, places=5)
# test dump model
self.assertIn('tree_info', gbm.dump_model())
self.assertIsInstance(gbm.feature_importance(), np.ndarray)
os.remove(model_name)
def test_continue_train_multiclass(self):
X, y = load_iris(True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
params = {
'objective': 'multiclass',
'metric': 'multi_logloss',
'num_class': 3,
'verbose': -1
}
lgb_train = lgb.Dataset(X_train, y_train, params=params, free_raw_data=False)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params, free_raw_data=False)
init_gbm = lgb.train(params, lgb_train, num_boost_round=20)
evals_result = {}
gbm = lgb.train(params, lgb_train,
num_boost_round=30,
valid_sets=lgb_eval,
verbose_eval=False,
evals_result=evals_result,
init_model=init_gbm)
ret = multi_logloss(y_test, gbm.predict(X_test))
self.assertLess(ret, 1.5)
self.assertAlmostEqual(evals_result['valid_0']['multi_logloss'][-1], ret, places=5)
def test_cv(self):
X, y = load_boston(True)
X_train, _, y_train, _ = train_test_split(X, y, test_size=0.1, random_state=42)
params = {'verbose': -1}
lgb_train = lgb.Dataset(X_train, y_train)
# shuffle = False, override metric in params
params_with_metric = {'metric': 'l2', 'verbose': -1}
lgb.cv(params_with_metric, lgb_train, num_boost_round=10, nfold=3, shuffle=False,
metrics='l1', verbose_eval=False)
# shuffle = True, callbacks
lgb.cv(params, lgb_train, num_boost_round=10, nfold=3, shuffle=True,
metrics='l1', verbose_eval=False,
callbacks=[lgb.reset_parameter(learning_rate=lambda i: 0.1 - 0.001 * i)])
# self defined folds
tss = TimeSeriesSplit(3)
folds = tss.split(X_train)
lgb.cv(params_with_metric, lgb_train, num_boost_round=10, folds=folds, verbose_eval=False)
# lambdarank
X_train, y_train = load_svmlight_file(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../examples/lambdarank/rank.train'))
q_train = np.loadtxt(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../examples/lambdarank/rank.train.query'))
params_lambdarank = {'objective': 'lambdarank', 'verbose': -1}
lgb_train = lgb.Dataset(X_train, y_train, group=q_train)
lgb.cv(params_lambdarank, lgb_train, num_boost_round=10, nfold=3, metrics='l2', verbose_eval=False)
def test_feature_name(self):
X, y = load_boston(True)
X_train, _, y_train, _ = train_test_split(X, y, test_size=0.1, random_state=42)
params = {'verbose': -1}
lgb_train = lgb.Dataset(X_train, y_train)
feature_names = ['f_' + str(i) for i in range(13)]
gbm = lgb.train(params, lgb_train, num_boost_round=5, feature_name=feature_names)
self.assertListEqual(feature_names, gbm.feature_name())
# test feature_names with whitespaces
feature_names_with_space = ['f ' + str(i) for i in range(13)]
gbm = lgb.train(params, lgb_train, num_boost_round=5, feature_name=feature_names_with_space)
self.assertListEqual(feature_names, gbm.feature_name())
def test_save_load_copy_pickle(self):
def test_template(init_model=None, return_model=False):
X, y = load_boston(True)
params = {
'objective': 'regression',
'metric': 'l2',
'verbose': -1
}
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
lgb_train = lgb.Dataset(X_train, y_train)
gbm_template = lgb.train(params, lgb_train, num_boost_round=10, init_model=init_model)
return gbm_template if return_model else mean_squared_error(y_test, gbm_template.predict(X_test))
gbm = test_template(return_model=True)
ret_origin = test_template(init_model=gbm)
other_ret = []
gbm.save_model('lgb.model')
other_ret.append(test_template(init_model='lgb.model'))
gbm_load = lgb.Booster(model_file='lgb.model')
other_ret.append(test_template(init_model=gbm_load))
other_ret.append(test_template(init_model=copy.copy(gbm)))
other_ret.append(test_template(init_model=copy.deepcopy(gbm)))
with open('lgb.pkl', 'wb') as f:
pickle.dump(gbm, f)
with open('lgb.pkl', 'rb') as f:
gbm_pickle = pickle.load(f)
other_ret.append(test_template(init_model=gbm_pickle))
gbm_pickles = pickle.loads(pickle.dumps(gbm))
other_ret.append(test_template(init_model=gbm_pickles))
for ret in other_ret:
self.assertAlmostEqual(ret_origin, ret, places=5)
@unittest.skipIf(not IS_PANDAS_INSTALLED, 'pandas not installed')
def test_pandas_categorical(self):
X = pd.DataFrame({"A": np.random.permutation(['a', 'b', 'c', 'd'] * 75), # str
"B": np.random.permutation([1, 2, 3] * 100), # int
"C": np.random.permutation([0.1, 0.2, -0.1, -0.1, 0.2] * 60), # float
"D": np.random.permutation([True, False] * 150)}) # bool
y = np.random.permutation([0, 1] * 150)
X_test = pd.DataFrame({"A": np.random.permutation(['a', 'b', 'e'] * 20),
"B": np.random.permutation([1, 3] * 30),
"C": np.random.permutation([0.1, -0.1, 0.2, 0.2] * 15),
"D": np.random.permutation([True, False] * 30)})
for col in ["A", "B", "C", "D"]:
X[col] = X[col].astype('category')
X_test[col] = X_test[col].astype('category')
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'verbose': -1
}
lgb_train = lgb.Dataset(X, y)
gbm0 = lgb.train(params, lgb_train, num_boost_round=10, verbose_eval=False)
pred0 = list(gbm0.predict(X_test))
lgb_train = lgb.Dataset(X, y)
gbm1 = lgb.train(params, lgb_train, num_boost_round=10, verbose_eval=False,
categorical_feature=[0])
pred1 = list(gbm1.predict(X_test))
lgb_train = lgb.Dataset(X, y)
gbm2 = lgb.train(params, lgb_train, num_boost_round=10, verbose_eval=False,
categorical_feature=['A'])
pred2 = list(gbm2.predict(X_test))
lgb_train = lgb.Dataset(X, y)
gbm3 = lgb.train(params, lgb_train, num_boost_round=10, verbose_eval=False,
categorical_feature=['A', 'B', 'C', 'D'])
pred3 = list(gbm3.predict(X_test))
lgb_train = lgb.Dataset(X, y)
gbm3.save_model('categorical.model')
gbm4 = lgb.Booster(model_file='categorical.model')
pred4 = list(gbm4.predict(X_test))
np.testing.assert_almost_equal(pred0, pred1)
np.testing.assert_almost_equal(pred0, pred2)
np.testing.assert_almost_equal(pred0, pred3)
np.testing.assert_almost_equal(pred0, pred4)
| mit |
maheshakya/scikit-learn | sklearn/linear_model/tests/test_ransac.py | 40 | 12814 | import numpy as np
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from scipy import sparse
from sklearn.utils.testing import assert_less
from sklearn.linear_model import LinearRegression, RANSACRegressor
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
outliers = np.array((10, 30, 200))
data[outliers[0], :] = (1000, 1000)
data[outliers[1], :] = (-1000, -1000)
data[outliers[2], :] = (-100, -50)
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
X = np.random.rand(10, 2)
y = np.random.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=11,
random_state=0)
assert getattr(ransac_estimator, 'n_trials_', None) is None
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 2)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros((100, 1)))
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
if __name__ == "__main__":
np.testing.run_module_suite()
| bsd-3-clause |
anassinator/beethoven | src/web/frequencyToNotesExample.py | 1 | 2431 | from numpy import *
from matplotlib import pyplot as plt
from time import sleep
def frange(x,y,jump):
while x<y:
yield x
x+=jump
frequencyMin = 65.406391325149658
frequencyMax = 3520
disMin = [50.*sin(2.*frequencyMin*pi*(x/1000.))+50. for x in frange(0.,1000.,0.125)]
disMax = [50.*sin(2.*frequencyMax*pi*(x/1000.))+50. for x in frange(0.,1000.,0.125)]
freMin = absolute(fft.fft(disMin)[:4001])
freMax = absolute(fft.fft(disMax)[:4001])
recording = True
start = 0
while recording:
goodFreMin = []
goodFreMax = []
frequencies = [65.40639132514966, 69.29565774421802, 73.41619197935191, 77.78174593052023, 82.4068892282175, 87.30705785825099, 92.4986056779086, 97.99885899543733, 103.8261743949863, 110.0, 116.54094037952248, 123.47082531403103, 130.8127826502993, 138.59131548843604, 146.8323839587038, 155.56349186104046, 164.813778456435, 174.61411571650194, 184.9972113558172, 195.99771799087466, 207.65234878997256, 220.0, 233.08188075904496, 246.94165062806206, 261.6255653005986, 277.1826309768721, 293.6647679174076, 311.1269837220809, 329.6275569128699, 349.2282314330039, 369.9944227116344, 391.99543598174927, 415.3046975799451, 440.0, 466.1637615180899, 493.8833012561241, 523.2511306011972, 554.3652619537442, 587.3295358348151, 622.2539674441618, 659.2551138257398, 698.4564628660078, 739.9888454232688, 783.9908719634985, 830.6093951598903, 880.0, 932.3275230361799, 987.766602512248, 1046.5022612023945, 1108.7305239074883, 1174.65907166963, 1244.5079348883237, 1318.5102276514797, 1396.9129257320155, 1479.9776908465376, 1567.981743926997, 1661.2187903197805, 1760.0, 1864.6550460723593, 1975.533205024496, 2093.004522404789, 2217.4610478149766, 2349.31814333926, 2489.0158697766474, 2637.020455302959, 2793.825851464031, 2959.955381693075, 3135.9634878539937, 3322.437580639561, 3520.0, 3729.3100921447185]
for x in range(1,4001):
if freMin[x] > 20000:
goodFreMin.append((x,freMin[x]))
if freMax[x] > 20000:
goodFreMax.append((x,freMax[x]))
goodMin = []
goodMax = []
for x in range(0,len(frequencies)):
pos = [i for i, v in enumerate(goodFreMin) if v[0]-0.5 <= frequencies[x] and v[0]+0.5 >= frequencies[x]]
if pos:
goodMin.append(x+1)
for x in range(0,len(frequencies)):
pos = [i for i, v in enumerate(goodFreMax) if v[0]-0.5 <= frequencies[x] and v[0]+0.5 >= frequencies[x]]
if pos:
goodMax.append(x+1)
print goodMin
print goodMax
recording = False
| mit |
tensorflow/models | official/nlp/tasks/sentence_prediction.py | 1 | 11656 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sentence prediction (classification) task."""
from typing import List, Union, Optional
from absl import logging
import dataclasses
import numpy as np
import orbit
from scipy import stats
from sklearn import metrics as sklearn_metrics
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import task_factory
from official.modeling import tf_utils
from official.modeling.hyperparams import base_config
from official.nlp.configs import encoders
from official.nlp.data import data_loader_factory
from official.nlp.modeling import models
from official.nlp.tasks import utils
METRIC_TYPES = frozenset(
['accuracy', 'matthews_corrcoef', 'pearson_spearman_corr'])
@dataclasses.dataclass
class ModelConfig(base_config.Config):
"""A classifier/regressor configuration."""
num_classes: int = 0
use_encoder_pooler: bool = False
encoder: encoders.EncoderConfig = encoders.EncoderConfig()
@dataclasses.dataclass
class SentencePredictionConfig(cfg.TaskConfig):
"""The model config."""
# At most one of `init_checkpoint` and `hub_module_url` can
# be specified.
init_checkpoint: str = ''
init_cls_pooler: bool = False
hub_module_url: str = ''
metric_type: str = 'accuracy'
# Defines the concrete model config at instantiation time.
model: ModelConfig = ModelConfig()
train_data: cfg.DataConfig = cfg.DataConfig()
validation_data: cfg.DataConfig = cfg.DataConfig()
@task_factory.register_task_cls(SentencePredictionConfig)
class SentencePredictionTask(base_task.Task):
"""Task object for sentence_prediction."""
def __init__(self, params: cfg.TaskConfig, logging_dir=None, name=None):
super().__init__(params, logging_dir, name=name)
if params.metric_type not in METRIC_TYPES:
raise ValueError('Invalid metric_type: {}'.format(params.metric_type))
self.metric_type = params.metric_type
if hasattr(params.train_data, 'label_field'):
self.label_field = params.train_data.label_field
else:
self.label_field = 'label_ids'
def build_model(self):
if self.task_config.hub_module_url and self.task_config.init_checkpoint:
raise ValueError('At most one of `hub_module_url` and '
'`init_checkpoint` can be specified.')
if self.task_config.hub_module_url:
encoder_network = utils.get_encoder_from_hub(
self.task_config.hub_module_url)
else:
encoder_network = encoders.build_encoder(self.task_config.model.encoder)
encoder_cfg = self.task_config.model.encoder.get()
if self.task_config.model.encoder.type == 'xlnet':
return models.XLNetClassifier(
network=encoder_network,
num_classes=self.task_config.model.num_classes,
initializer=tf.keras.initializers.RandomNormal(
stddev=encoder_cfg.initializer_range))
else:
return models.BertClassifier(
network=encoder_network,
num_classes=self.task_config.model.num_classes,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
use_encoder_pooler=self.task_config.model.use_encoder_pooler)
def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
label_ids = labels[self.label_field]
if self.task_config.model.num_classes == 1:
loss = tf.keras.losses.mean_squared_error(label_ids, model_outputs)
else:
loss = tf.keras.losses.sparse_categorical_crossentropy(
label_ids, tf.cast(model_outputs, tf.float32), from_logits=True)
if aux_losses:
loss += tf.add_n(aux_losses)
return tf_utils.safe_mean(loss)
def build_inputs(self, params, input_context=None):
"""Returns tf.data.Dataset for sentence_prediction task."""
if params.input_path == 'dummy':
def dummy_data(_):
dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32)
x = dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids)
if self.task_config.model.num_classes == 1:
y = tf.zeros((1,), dtype=tf.float32)
else:
y = tf.zeros((1, 1), dtype=tf.int32)
x[self.label_field] = y
return x
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
return data_loader_factory.get_data_loader(params).load(input_context)
def build_metrics(self, training=None):
del training
if self.task_config.model.num_classes == 1:
metrics = [tf.keras.metrics.MeanSquaredError()]
else:
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='cls_accuracy')
]
return metrics
def process_metrics(self, metrics, labels, model_outputs):
for metric in metrics:
metric.update_state(labels[self.label_field], model_outputs)
def process_compiled_metrics(self, compiled_metrics, labels, model_outputs):
compiled_metrics.update_state(labels[self.label_field], model_outputs)
def validation_step(self, inputs, model: tf.keras.Model, metrics=None):
if self.metric_type == 'accuracy':
return super(SentencePredictionTask,
self).validation_step(inputs, model, metrics)
features, labels = inputs, inputs
outputs = self.inference_step(features, model)
loss = self.build_losses(
labels=labels, model_outputs=outputs, aux_losses=model.losses)
logs = {self.loss: loss}
if self.metric_type == 'matthews_corrcoef':
logs.update({
'sentence_prediction': # Ensure one prediction along batch dimension.
tf.expand_dims(tf.math.argmax(outputs, axis=1), axis=1),
'labels':
labels[self.label_field],
})
if self.metric_type == 'pearson_spearman_corr':
logs.update({
'sentence_prediction': outputs,
'labels': labels[self.label_field],
})
return logs
def aggregate_logs(self, state=None, step_outputs=None):
if self.metric_type == 'accuracy':
return None
if state is None:
state = {'sentence_prediction': [], 'labels': []}
state['sentence_prediction'].append(
np.concatenate([v.numpy() for v in step_outputs['sentence_prediction']],
axis=0))
state['labels'].append(
np.concatenate([v.numpy() for v in step_outputs['labels']], axis=0))
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
if self.metric_type == 'accuracy':
return None
elif self.metric_type == 'matthews_corrcoef':
preds = np.concatenate(aggregated_logs['sentence_prediction'], axis=0)
preds = np.reshape(preds, -1)
labels = np.concatenate(aggregated_logs['labels'], axis=0)
labels = np.reshape(labels, -1)
return {
self.metric_type: sklearn_metrics.matthews_corrcoef(preds, labels)
}
elif self.metric_type == 'pearson_spearman_corr':
preds = np.concatenate(aggregated_logs['sentence_prediction'], axis=0)
preds = np.reshape(preds, -1)
labels = np.concatenate(aggregated_logs['labels'], axis=0)
labels = np.reshape(labels, -1)
pearson_corr = stats.pearsonr(preds, labels)[0]
spearman_corr = stats.spearmanr(preds, labels)[0]
corr_metric = (pearson_corr + spearman_corr) / 2
return {self.metric_type: corr_metric}
def initialize(self, model):
"""Load a pretrained checkpoint (if exists) and then train from iter 0."""
ckpt_dir_or_file = self.task_config.init_checkpoint
if not ckpt_dir_or_file:
return
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
pretrain2finetune_mapping = {
'encoder': model.checkpoint_items['encoder'],
}
if self.task_config.init_cls_pooler:
# This option is valid when use_encoder_pooler is false.
pretrain2finetune_mapping[
'next_sentence.pooler_dense'] = model.checkpoint_items[
'sentence_prediction.pooler_dense']
ckpt = tf.train.Checkpoint(**pretrain2finetune_mapping)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def predict(task: SentencePredictionTask,
params: cfg.DataConfig,
model: tf.keras.Model,
params_aug: Optional[cfg.DataConfig] = None,
test_time_aug_wgt: float = 0.3) -> List[Union[int, float]]:
"""Predicts on the input data.
Args:
task: A `SentencePredictionTask` object.
params: A `cfg.DataConfig` object.
model: A keras.Model.
params_aug: A `cfg.DataConfig` object for augmented data.
test_time_aug_wgt: Test time augmentation weight. The prediction score will
use (1. - test_time_aug_wgt) original prediction plus test_time_aug_wgt
augmented prediction.
Returns:
A list of predictions with length of `num_examples`. For regression task,
each element in the list is the predicted score; for classification task,
each element is the predicted class id.
"""
def predict_step(inputs):
"""Replicated prediction calculation."""
x = inputs
example_id = x.pop('example_id')
outputs = task.inference_step(x, model)
return dict(example_id=example_id, predictions=outputs)
def aggregate_fn(state, outputs):
"""Concatenates model's outputs."""
if state is None:
state = []
for per_replica_example_id, per_replica_batch_predictions in zip(
outputs['example_id'], outputs['predictions']):
state.extend(zip(per_replica_example_id, per_replica_batch_predictions))
return state
dataset = orbit.utils.make_distributed_dataset(tf.distribute.get_strategy(),
task.build_inputs, params)
outputs = utils.predict(predict_step, aggregate_fn, dataset)
# When running on TPU POD, the order of output cannot be maintained,
# so we need to sort by example_id.
outputs = sorted(outputs, key=lambda x: x[0])
is_regression = task.task_config.model.num_classes == 1
if params_aug is not None:
dataset_aug = orbit.utils.make_distributed_dataset(
tf.distribute.get_strategy(), task.build_inputs, params_aug)
outputs_aug = utils.predict(predict_step, aggregate_fn, dataset_aug)
outputs_aug = sorted(outputs_aug, key=lambda x: x[0])
if is_regression:
return [(1. - test_time_aug_wgt) * x[1] + test_time_aug_wgt * y[1]
for x, y in zip(outputs, outputs_aug)]
else:
return [
tf.argmax(
(1. - test_time_aug_wgt) * x[1] + test_time_aug_wgt * y[1],
axis=-1) for x, y in zip(outputs, outputs_aug)
]
if is_regression:
return [x[1] for x in outputs]
else:
return [tf.argmax(x[1], axis=-1) for x in outputs]
| apache-2.0 |
ozak/folium | examples/choropleth_states.py | 12 | 1111 | '''
Choropleth map of US states
'''
import folium
import pandas as pd
state_geo = r'us-states.json'
state_unemployment = r'US_Unemployment_Oct2012.csv'
state_data = pd.read_csv(state_unemployment)
#Let Folium determine the scale
states = folium.Map(location=[48, -102], zoom_start=3)
states.geo_json(geo_path=state_geo, data=state_data,
columns=['State', 'Unemployment'],
key_on='feature.id',
fill_color='YlGn', fill_opacity=0.7, line_opacity=0.2,
legend_name='Unemployment Rate (%)')
states.create_map(path='us_state_map.html')
#Let's define our own scale and change the line opacity
states2 = folium.Map(location=[48, -102], zoom_start=3)
states2.geo_json(geo_path=state_geo, data=state_data,
columns=['State', 'Unemployment'],
threshold_scale=[5, 6, 7, 8, 9, 10],
key_on='feature.id',
fill_color='BuPu', fill_opacity=0.7, line_opacity=0.5,
legend_name='Unemployment Rate (%)',
reset=True)
states2.create_map(path='us_state_map_2.html')
| mit |
mpld3/mpld3 | mpld3/test_plots/test_hist.py | 2 | 11306 | """Plot to test polygons"""
import matplotlib.pyplot as plt
import numpy as np
import mpld3
random_data =[1.6088185574147464,
-0.6865771485141146,
-1.6136369485782012,
-0.3392658435669958,
1.4387492600538156,
-1.7432591997998272,
-1.1082471324814325,
1.6589479820353064,
-0.13049658094505667,
1.2576658123386033,
1.7963480416316446,
1.1430405041422613,
-0.4472323843738978,
-0.6550491457750972,
0.9587514144130824,
-0.3395959055304727,
2.1602167699502393,
-1.295552001830939,
-0.8089544706632718,
-1.059898488934211,
-1.49840844285432,
-0.28948812041181315,
1.288458860066516,
-0.045776284512724794,
-0.17603684344978587,
0.32877358888432034,
0.541536214412118,
-0.24433774008617837,
0.601868139626899,
-0.28906442847885255,
-1.1151998316860108,
1.8356164778710642,
-0.7103164540816693,
1.3015278551091776,
1.3298491664708991,
0.039883192975611916,
-1.0335566237806555,
-0.6252207577843318,
1.3298538422237947,
-0.4617915164308597,
0.09126263830080214,
-0.48477949570239454,
0.26803781904185003,
-0.20182850148656825,
0.7972021848483254,
-0.8282312719003075,
1.3641371074485997,
0.24341631560423083,
1.3115542891128071,
0.06872710320774854,
-0.01672072283950251,
1.4422119012100003,
0.2528618829661779,
0.9616468054908228,
0.986887707014175,
3.0258489983553383,
-1.6816251031482299,
0.2950440044644165,
-1.8763902132612298,
0.7624976458741685,
-0.037227217772358524,
-0.25902776727215787,
-0.5417080779000882,
0.04284672907498912,
-0.13718254382313286,
0.3569275300145882,
-0.10662334822352819,
-0.20642815589262306,
0.5450968108182405,
-0.062190347435352254,
-0.5304976410890131,
0.11496067466178328,
-1.0368841824181443,
0.2503567167261422,
-0.6341715118680747,
1.25978863474973,
2.0435613495865774,
0.7410644160060791,
0.2528301815581853,
-1.538978431967695,
-0.2206925312874959,
0.29577840638693786,
-0.8990147300635852,
1.6909167851741114,
-0.10736814113290882,
1.8031628709576693,
-0.5003026573281728,
1.1236234943942045,
-0.47104101684459104,
-0.1923028862063637,
-1.8800925078391912,
-0.42312802123630855,
0.038490251876292195,
-1.0867893036291791,
0.0743810621308829,
-0.47909429145664206,
0.373368117914869,
1.534135149819737,
0.5245494022834717,
-0.6984090785788215,
1.4427674444634822,
-2.4922813329332545,
-1.055953770682888,
1.878609211373456,
-0.5908328730082637,
-1.857048057759057,
1.4786935759559536,
-1.28181489277262,
0.5157675445982315,
1.7195808917236108,
-0.38440912454122145,
0.8797574085810836,
1.676239682331843,
-0.45240799723213676,
0.2802772191700505,
0.3309554099198398,
0.38338570346598083,
-0.5034539695574349,
0.37627553203847464,
0.8519091363726424,
0.5383015788727779,
1.1096325663839426,
-0.12052436986058586,
1.3140245276665212,
-1.6530681149038304,
-0.25888841120633477,
-0.16350037559227912,
1.8474504533549003,
2.1263366425570833,
-2.4710734105428376,
0.8718448474019873,
0.033821899566860276,
0.8085927980513511,
1.2601667298400134,
-0.0996093564508275,
-0.11628042414484877,
-0.30729165471241415,
-1.0341851401416844,
-0.1504103660115286,
2.350913034096008,
1.3196851342214893,
-1.1347512591503675,
-0.8734170695785559,
-0.7056234969058184,
-0.9103649939672405,
-0.2002480157061158,
-0.10831954230789262,
-0.007850307426785403,
1.6674772351633886,
-0.9856132076907798,
-1.0434180080976176,
-0.7030186659655316,
1.2277115608023585,
0.898005768601088,
1.6274643029829878,
-0.08320385964386205,
-1.0356424576358394,
-1.9123345323284744,
-0.4955321187542757,
0.4408707747503287,
-0.5249733215919139,
-0.10939488654798794,
0.13553860026767425,
-0.044305821603251534,
-0.7159385332949207,
0.1337325665608888,
0.4342004517486429,
0.9866633375956508,
-0.4653819475223896,
0.5295495127235367,
0.3247402423321501,
-0.172493356502056,
-0.2537971923947709,
-1.1923561470207291,
-1.377995450737569,
0.3296828119945463,
1.140535300240797,
-1.819560409414942,
-0.6162187522864669,
-0.18242365258955792,
1.239049703542898,
0.8643784466658591,
-0.08538549494480388,
0.5881499817461358,
-0.057619619123778186,
-1.2707376565079977,
-1.3977605070083239,
0.09574346340872995,
0.23884692218285297,
0.4029036841565875,
-0.9400128968346682,
0.42326857325407236,
0.5648516210728396,
-1.3651144362578458,
0.898288264619132,
0.4468229076264855,
0.30232587578398423,
0.19062463725075862,
-1.5821141982099332,
0.3138327015600902,
0.9040160345128291,
-0.7917206940604362,
1.1607614234184414,
0.5766896888158412,
-1.043476800149341,
-0.8052738529239508,
-0.7951554215524248,
0.2232526689122744,
1.0188389616353848,
0.46285935979550724,
0.7530817165084904,
0.5807926089444349,
0.951763854573985,
0.7779599385764743,
0.9504812904388322,
-0.7516979999357363,
-0.464067808534713,
-0.16380932224811862,
-0.6864196976240141,
0.2455737023517738,
-0.7348409615713362,
0.05758310026884851,
0.553063059550217,
0.048544227898844196,
-1.0605120659188905,
-1.8920278387522278,
-0.658061996299685,
1.9990327483568218,
1.6828958494813993,
-0.21219138503074944,
2.028293015011859,
0.25073190254352673,
-1.0099002205136993,
-0.989634542442095,
-1.8160059730296367,
1.0035962608820848,
0.28918067305350814,
0.8361827598492787,
0.4089949457053751,
0.7185549408083635,
2.0949185611609504,
0.3971691690456786,
0.03746589256624114,
-1.0529324976132892,
0.27839584072377,
0.33365799579104055,
0.6964644795032722,
0.2714256509423176,
1.8044818571556243,
-0.014691244797071097,
-1.7387991143638268,
0.9914453084472065,
0.1180319411371459,
1.0937264804224316,
0.9364065911889146,
0.2519024816944673,
0.4021939664482127,
-0.5028089868006651,
0.0388962526649951,
1.2030710009668146,
0.3840680927370874,
1.313243414710453,
-1.3349241399871241,
-0.8300588871468806,
-2.03421982770625,
0.32842532365118315,
-1.89901585610299,
0.32970239917356436,
0.8533773496260463,
1.5693173384939607,
-0.16435860939222388,
2.017424616530559,
-0.8717492192875496,
-0.9205396786640737,
-0.27997075405423666,
0.8765383654317493,
-1.4259702572640618,
-1.8306588867848146,
-0.5533842251456949,
-0.21735511572955551,
0.6382620822372181,
0.2697140871204187,
-0.31404218568267667,
0.9316811764590066,
-2.0667451860587622,
1.697252913678298,
1.1140315605316327,
-0.009728699147934545,
0.44189013768413343,
-2.091495320531901,
-1.2308916385480955,
1.03026426888392,
0.6627516925501729,
0.817194063857429,
-0.09226755901979303,
-0.7362235501925207,
0.04861987725869331,
-1.0870115812073784,
-0.2775709188139612,
-0.29904174027582114,
-0.24527232361588672,
0.04773573775114175,
-1.0677960401047661,
0.8530438179721761,
0.2897513718951045,
1.6199955149540348,
1.5104979796348013,
-0.41309856833836495,
-0.5872239669415509,
-0.7130500893351722,
-0.3365322853411775,
1.489694182097857,
0.7557971239310557,
0.03497335646263285,
0.16339919779924367,
-1.3428139079259165,
0.023618745777960616,
0.33455741995750904,
-0.2773459662635286,
-1.2584353309363554,
-0.35231067826871987,
-0.46865158640324983,
1.3217355404117228,
0.8399869535160688,
-1.2162406398696064,
-0.6350983093634716,
-0.4321460762899319,
-0.08527475307077167,
-0.45399095073088236,
-0.8177666488623411,
0.5418295821038207,
-0.6897257208335155,
1.9658831505381047,
-0.5284782606870327,
-0.10594382890096236,
0.24217314486549776,
-0.5460335643043203,
-1.3520718886259466,
-0.15218616373826915,
-0.1805423354466487,
0.8415580222953368,
-0.879646055763199,
-2.714005962761264,
-0.9585563813890874,
0.2388829833649452,
-0.0018540203771739466,
0.644323760922803,
-1.1416656024255565,
0.36059299815837736,
-0.3405770278729513,
-0.3060403596539795,
-0.17120569365180208,
-0.8850411936131686,
0.4314129582862788,
0.5069769759851513,
0.19882895186884772,
0.540960911738796,
0.7150512811896927,
0.21539364433513875,
-0.14880865724659603,
-0.970486940943654,
1.1256175047352606,
-0.12226332002538869,
-1.205309174534235,
-0.3261500143680117,
-1.0554032200494499,
-0.2573819201081508,
-0.390669301965708,
0.21584409681938665,
0.6619000008876321,
0.6672312131593522,
2.614110705304245,
2.3277918581365675,
-0.13865785865596422,
1.6795442426292522,
-0.5908374267513135,
-1.909847525232184,
1.3472993801655453,
-0.2745189218380143,
-2.3547825709264467,
-0.7955575743254125,
0.7923489976326644,
0.29674299018855055,
0.1035247640421333,
-0.9888059458106297,
-1.8395994751705467,
0.12825918570249015,
-0.19293516184610582,
-1.3489737673445084,
1.2327432621197973,
0.2221064625924095,
0.7610779332844465,
-1.0239691289648312,
-0.1565823811347759,
1.4533257293286792,
-0.11013059558922982,
-0.5155256072913103,
-0.3205426002771927,
0.3539596160876571,
-0.9638065147736249,
0.09279011027491435,
-0.7071397022232381,
1.5997327256021119,
-1.584187374648889,
-1.1156709280409551,
0.4340149441755118,
-0.7083424606262801,
1.030159256692143,
-0.03902523555766703,
0.31686738030088063,
-1.468287742606628,
-0.4249646873881102,
-2.132444540031783,
1.3759071574719794,
0.5604989649036469,
-0.6391100435482325,
-0.37883211235704667,
0.27634895124008235,
0.44186696190782637,
-1.1996295740986185,
-0.6313435996243602,
0.15195351822228517,
-1.0084179828289148,
-0.28009382621337053,
-1.2916745021370748,
0.8031447665256342,
1.5448416345166864,
0.5650167050374918,
-0.22692657044538106,
0.4657098245292046,
-0.731737283783585,
0.29015243544801783,
-0.2568729575658686,
0.6462038763805821,
0.03227524011079943,
-0.11612118017364606,
-0.6868112224881517,
-0.2647807973589498,
0.9670076443564106,
1.4042294708834777,
0.46222355841059154,
-0.16896499680869345,
0.5194292707657132,
0.049028237544197155,
0.23541854435222753,
1.5963045842316512,
-0.8835656730136358,
-1.5303883709287394,
2.14927118430476,
0.651015648751183,
-0.38864585624570913,
0.737489494433733,
0.1453528158913322,
-1.598180855169015,
0.5275094033382759,
1.0127561937365395,
-0.3933309736771058,
-1.4863368389917533,
-0.9483466608061892,
-0.9887848826467983,
-0.4844687388626192,
0.6588653188263609,
-1.9217465388124388,
-1.8233868910754438,
-0.5060394534743602,
0.08339289479665324,
1.6073503691251432,
-1.512588432746404,
-0.8384147514383815,
-1.4945981086734483,
1.001356338889699,
-1.4193317466315716,
-0.9826214973907532,
-1.0318404168530542,
-0.7855313870405117,
0.019212819799928733,
-0.3921471487430196,
1.1804152033180966,
-0.4999154374050257,
-0.4554909566262925,
0.1749698807335615,
2.0540590495754274,
1.2606061405374105,
-1.7699196258937016,
-0.6398880181586967,
0.24074296988586688,
1.6366265129160817,
-0.11216944021389891,
0.05076596701734716,
-1.1415712976136028,
-1.1648288165948886,
0.45647427435363913,
0.09807293341608687,
-0.3118362702922066,
0.10678521064476658,
-0.038455686391581,
-0.22007985721261505,
-0.5635347991217103,
0.2941046121794234,
0.31455015119383994]
def create_plot():
fig, ax = plt.subplots()
ax.grid(color='gray')
x = np.array(random_data )
ax.hist(x, 30, fc='blue', alpha=0.5)
ax.xaxis.set_major_locator(plt.NullLocator())
return fig
def test_hist():
fig = create_plot()
html = mpld3.fig_to_html(fig)
plt.close(fig)
if __name__ == "__main__":
mpld3.show(create_plot())
| bsd-3-clause |
jlegendary/opencog | opencog/python/spatiotemporal/temporal_events/composition/emperical_distribution.py | 34 | 6615 | import csv
import numpy
from spatiotemporal.temporal_events.relation_formulas import TemporalRelation
from spatiotemporal.temporal_events.trapezium import TemporalEventTrapezium, generate_random_events
from spatiotemporal.time_intervals import TimeInterval
__author__ = 'keyvan'
def trim_float(float_object, no_digits=12):
return int(float_object * 10 ** no_digits) / float(10 ** no_digits)
def overlaps(bounds_1, bounds_2):
a_1, b_1 = bounds_1
a_2, b_2 = bounds_2
a_1, b_1, a_2, b_2 = trim_float(a_1), trim_float(b_1), trim_float(a_2), trim_float(b_2)
return a_1 < a_2 < b_1 or a_1 < b_2 < b_1 or a_2 < a_1 < b_2 or a_2 < b_1 < b_2 or a_1 == a_2 or b_1 == b_2
def generate_random_relations(size=1000):
relations = []
A = TemporalEventTrapezium(1000, 1008, 1002, 1004)
B_as = TimeInterval(991, 1008, size)
for B_a in B_as:
B = TemporalEventTrapezium(B_a, B_a + 9, B_a + 3, B_a + 8)
relations.append((A * B).to_list())
return relations
def generate_random_relations_file(size=20):
from datetime import datetime
from spatiotemporal.time_intervals import TimeInterval
csv_writer = csv.writer(open('relations.csv~', 'w'))
year_2010 = TimeInterval(datetime(2010, 1, 1), datetime(2011, 1, 1))
i = size
specifications = [None, None]
while i >= 0:
for j in xrange(2):
a = year_2010.random_time()
beg = year_2010.random_time(start=a)
end = year_2010.random_time(start=beg) #(start=a)
b = year_2010.random_time(start=end) #(start=max(end, beg))
specifications[j] = (a, b, beg, end)
a_beg, a_end = (specifications[0][0], specifications[0][2]), (specifications[0][3], specifications[0][1])
b_beg, b_end = (specifications[1][0], specifications[1][2]), (specifications[1][3], specifications[1][1])
valid = False
for bounds_1, bounds_2 in [
(a_beg, b_beg), (a_beg, b_end), (a_end, b_beg), (a_end, b_end)
]:
if overlaps(bounds_1, bounds_2):
valid = True
break
if not valid:
continue
event_1, event_2 = TemporalEventTrapezium(*specifications[0]), TemporalEventTrapezium(*specifications[1])
csv_writer.writerow((event_1 * event_2).to_list())
percentage = (size - i + 1) / float(size) * 100
if (size - i + 1) % 10**3 == 0:
print '%' + str(int(percentage))
i -= 1
def read_data(size=1000):
csv_reader = csv.reader(open('relations.csv~', 'r'))
relations = []
i = size
ps, ms, os = [], [], []
for row in csv_reader:
p, m, o = row[0:3]
p, m, o = float(p), float(m), float(o)
if i < 0:
break
ps.append(p)
ms.append(m)
os.append(o)
i -= 1
from matplotlib import pylab as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(ps, ms, os)
ax.set_xlabel('p')
ax.set_ylabel('m')
ax.set_zlabel('o')
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.set_zlim(0, 1)
plt.show()
def classify(size=10000, iterable=None):
csv_reader = iterable
if iterable is None:
csv_reader = csv.reader(open('relations.csv~', 'r'))
classes = {}
for i, row in enumerate(csv_reader):
if i > size - 1:
print 'number of classes:', len(classes)
for class_type in classes:
print classes[class_type][0].type, len(classes[class_type])
return classes
relation = TemporalRelation.from_list(row)
if relation.type not in classes:
classes[relation.type] = [relation]
else:
classes[relation.type].append(relation)
print 'number of classes:', len(classes)
for class_type in classes:
print classes[class_type][0].type, len(classes[class_type])
return classes
def learn(size=10000):
classes = classify(size)
relations = classes['DSOMP']
size = len(relations)
train_size = size - size / 4
train_data = relations[0:train_size]
test_data = relations[train_size:]
train_x, train_y = [], []
for relation in train_data:
train_x.append([relation['O'], relation['M']])
train_y.append(relation['P'])
train_x = numpy.array(train_x)
train_y = numpy.array(train_y).T
from sklearn.linear_model import Lasso, Ridge, ElasticNet, LinearRegression, LassoLars, BayesianRidge, ElasticNetCV, SGDRegressor
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsRegressor
from random import randrange
clf = KNeighborsRegressor(8)#alpha=0.000001)
clf.fit(train_x, train_y)
test_x, test_y = [], []
for relation in test_data:
test_x.append([relation['O'], relation['M']])
test_y.append(relation['P'])
print '\n', '///////// tests ////////'
for i in xrange(5):
print 'learning', clf.predict(train_x[i])
print 'actual', train_y[i], '\n-------------\n'
print '***************************'
for i in xrange(5):
print 'learning', clf.predict(test_x[i])
print 'actual', test_y[i], '\n-------------\n'
def learn_all(size=10000):
relations = read_data(size)
size = len(relations)
train_size = size - size / 4
train_data = relations[0:train_size]
test_data = relations[train_size:]
train_x, train_y = [], []
for relation in train_data:
train_x.append(numpy.array([relation['F']]))
train_y.append(numpy.array([relation['o']]))
train_x = numpy.array(train_x)
train_y = numpy.array(train_y)
from sklearn.linear_model import Lasso, Ridge, ElasticNet, LinearRegression, LassoLars, BayesianRidge, ElasticNetCV, SGDRegressor
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsRegressor
from random import randrange
clf = KNeighborsRegressor()#alpha=0.000001)
clf.fit(train_x, train_y)
test_x, test_y = [], []
for relation in test_data:
test_x.append(numpy.array([relation['F']]))
test_y.append(numpy.array([relation['o']]))
print '\n', '///////// tests ////////'
for i in xrange(5):
print 'F:', train_x[i]
print 'learning', clf.predict(train_x[i])
print 'actual', train_y[i], '\n-------------\n'
print '***************************'
for i in xrange(5):
print 'F:', test_x[i]
print 'learning', clf.predict(test_x[i])
print 'actual', test_y[i], '\n-------------\n'
| agpl-3.0 |
alperyeg/elephant | doc/conf.py | 2 | 11515 | # -*- coding: utf-8 -*-
#
# Elephant documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 5 17:11:26 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
from datetime import date
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, '..')
# -- General configuration -----------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.imgmath',
'sphinx.ext.viewcode',
'sphinx.ext.mathjax',
'sphinxcontrib.bibtex',
'matplotlib.sphinxext.plot_directive',
'numpydoc',
'nbsphinx',
'sphinx_tabs.tabs',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Elephant'
authors = u'Elephant authors and contributors'
copyright = u"2014-{this_year}, {authors}".format(this_year=date.today().year,
authors=authors)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
root_dir = os.path.dirname(os.path.dirname(__file__))
with open(os.path.join(root_dir, 'elephant', 'VERSION')) as version_file:
# The full version, including alpha/beta/rc tags.
release = version_file.read().strip()
# The short X.Y version.
version = '.'.join(release.split('.')[:-1])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
'_build',
'**.ipynb_checkpoints',
'maintainers_guide.rst', # should not be visible for users
]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# Only execute Jupyter notebooks that have no evaluated cells
nbsphinx_execute = 'auto'
# Kernel to use for execution
nbsphinx_kernel_name = 'python3'
# Cancel compile on errors in notebooks
nbsphinx_allow_errors = False
# Required to automatically create a summary page for each function listed in
# the autosummary fields of each module.
autosummary_generate = True
# don't overwrite our custom toctree/*.rst
autosummary_generate_overwrite = False
# -- Options for HTML output ---------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
html_theme_options = {
'font_family': 'Arial',
'page_width': '1200px', # default is 940
'sidebar_width': '280px', # default is 220
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'images/elephant_logo_sidebar.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'images/elephant_favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'elephantdoc'
# Suppresses wrong numpy doc warnings
# see here https://github.com/phn/pytpm/issues/3#issuecomment-12133978
numpydoc_show_class_members = False
# A fix for Alabaster theme for no space between a citation reference
# and citation text
# https://github.com/sphinx-doc/sphinx/issues/6705#issuecomment-536197438
html4_writer = True
# -- Options for LaTeX output --------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'elephant.tex', u'Elephant Documentation',
authors, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'elephant', u'Elephant Documentation',
[authors], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index',
'Elephant',
u'Elephant Documentation',
authors,
'Elephant',
'Elephant is a package for the analysis of neurophysiology data.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = authors
epub_publisher = authors
epub_copyright = copyright
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
# Use more reliable mathjax source
mathjax_path = 'https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML'
# Remove the copyright notice from docstrings:
def process_docstring_remove_copyright(app, what, name, obj, options, lines):
copyright_line = None
for i, line in enumerate(lines):
if line.startswith(':copyright:'):
copyright_line = i
break
if copyright_line:
while len(lines) > copyright_line:
lines.pop()
def setup(app):
app.connect('autodoc-process-docstring',
process_docstring_remove_copyright)
| bsd-3-clause |
jat255/hyperspyUI | tests/test_backend.py | 2 | 1095 |
import matplotlib
matplotlib.use('module://hyperspyui.mdi_mpl_backend')
matplotlib.interactive(True)
from hyperspyui.mdi_mpl_backend import (
connect_on_new_figure, disconnect_on_new_figure,
connect_on_destroy, disconnect_on_destroy)
import matplotlib.pyplot as plt
def test_new_figure(qapp):
plt.figure()
plt.plot(range(10))
def test_new_figure_callback(qapp):
call_count = 0
def trigger(figure):
nonlocal call_count
assert figure
call_count += 1
connect_on_new_figure(trigger)
plt.figure()
plt.plot(range(10))
plt.close()
disconnect_on_new_figure(trigger)
plt.figure()
plt.plot(range(10))
plt.close()
assert call_count == 1
def test_destroyed_figure_callback(qapp):
call_count = 0
def trigger(figure):
nonlocal call_count
assert figure
call_count += 1
connect_on_destroy(trigger)
plt.figure()
plt.plot(range(10))
plt.close()
disconnect_on_destroy(trigger)
plt.figure()
plt.plot(range(10))
plt.close()
assert call_count == 1
| gpl-3.0 |
SamStudio8/scikit-bio | skbio/util/tests/test_testing.py | 7 | 9114 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import os
import itertools
import unittest
import pandas as pd
import numpy as np
import numpy.testing as npt
from skbio import OrdinationResults
from skbio.util import (get_data_path, assert_ordination_results_equal,
assert_data_frame_almost_equal)
from skbio.util._testing import _normalize_signs
class TestGetDataPath(unittest.TestCase):
def test_get_data_path(self):
fn = 'parrot'
path = os.path.dirname(os.path.abspath(__file__))
data_path = os.path.join(path, 'data', fn)
data_path_2 = get_data_path(fn)
self.assertEqual(data_path_2, data_path)
class TestAssertOrdinationResultsEqual(unittest.TestCase):
def test_assert_ordination_results_equal(self):
minimal1 = OrdinationResults('foo', 'bar', pd.Series([1.0, 2.0]),
pd.DataFrame([[1, 2, 3], [4, 5, 6]]))
# a minimal set of results should be equal to itself
assert_ordination_results_equal(minimal1, minimal1)
# type mismatch
with npt.assert_raises(AssertionError):
assert_ordination_results_equal(minimal1, 'foo')
# numeric values should be checked that they're almost equal
almost_minimal1 = OrdinationResults(
'foo', 'bar',
pd.Series([1.0000001, 1.9999999]),
pd.DataFrame([[1, 2, 3], [4, 5, 6]]))
assert_ordination_results_equal(minimal1, almost_minimal1)
# test each of the optional numeric attributes
for attr in ('features', 'samples', 'biplot_scores',
'sample_constraints'):
# missing optional numeric attribute in one, present in the other
setattr(almost_minimal1, attr, pd.DataFrame([[1, 2], [3, 4]]))
with npt.assert_raises(AssertionError):
assert_ordination_results_equal(minimal1, almost_minimal1)
setattr(almost_minimal1, attr, None)
# optional numeric attributes present in both, but not almost equal
setattr(minimal1, attr, pd.DataFrame([[1, 2], [3, 4]]))
setattr(almost_minimal1, attr, pd.DataFrame([[1, 2],
[3.00002, 4]]))
with npt.assert_raises(AssertionError):
assert_ordination_results_equal(minimal1, almost_minimal1)
setattr(minimal1, attr, None)
setattr(almost_minimal1, attr, None)
# optional numeric attributes present in both, and almost equal
setattr(minimal1, attr, pd.DataFrame([[1.0, 2.0], [3.0, 4.0]]))
setattr(almost_minimal1, attr,
pd.DataFrame([[1.0, 2.0], [3.00000002, 4]]))
assert_ordination_results_equal(minimal1, almost_minimal1)
setattr(minimal1, attr, None)
setattr(almost_minimal1, attr, None)
# missing optional numeric attribute in one, present in the other
almost_minimal1.proportion_explained = pd.Series([1, 2, 3])
with npt.assert_raises(AssertionError):
assert_ordination_results_equal(minimal1, almost_minimal1)
almost_minimal1.proportion_explained = None
# optional numeric attributes present in both, but not almost equal
minimal1.proportion_explained = pd.Series([1, 2, 3])
almost_minimal1.proportion_explained = pd.Series([1, 2, 3.00002])
with npt.assert_raises(AssertionError):
assert_ordination_results_equal(minimal1, almost_minimal1)
almost_minimal1.proportion_explained = None
almost_minimal1.proportion_explained = None
# optional numeric attributes present in both, and almost equal
minimal1.proportion_explained = pd.Series([1, 2, 3])
almost_minimal1.proportion_explained = pd.Series([1, 2, 3.00000002])
assert_ordination_results_equal(minimal1, almost_minimal1)
almost_minimal1.proportion_explained = None
almost_minimal1.proportion_explained = None
class TestNormalizeSigns(unittest.TestCase):
def test_shapes_and_nonarray_input(self):
with self.assertRaises(ValueError):
_normalize_signs([[1, 2], [3, 5]], [[1, 2]])
def test_works_when_different(self):
"""Taking abs value of everything would lead to false
positives."""
a = np.array([[1, -1],
[2, 2]])
b = np.array([[-1, -1],
[2, 2]])
with self.assertRaises(AssertionError):
npt.assert_equal(*_normalize_signs(a, b))
def test_easy_different(self):
a = np.array([[1, 2],
[3, -1]])
b = np.array([[-1, 2],
[-3, -1]])
npt.assert_equal(*_normalize_signs(a, b))
def test_easy_already_equal(self):
a = np.array([[1, -2],
[3, 1]])
b = a.copy()
npt.assert_equal(*_normalize_signs(a, b))
def test_zeros(self):
a = np.array([[0, 3],
[0, -1]])
b = np.array([[0, -3],
[0, 1]])
npt.assert_equal(*_normalize_signs(a, b))
def test_hard(self):
a = np.array([[0, 1],
[1, 2]])
b = np.array([[0, 1],
[-1, 2]])
npt.assert_equal(*_normalize_signs(a, b))
def test_harder(self):
"""We don't want a value that might be negative due to
floating point inaccuracies to make a call to allclose in the
result to be off."""
a = np.array([[-1e-15, 1],
[5, 2]])
b = np.array([[1e-15, 1],
[5, 2]])
# Clearly a and b would refer to the same "column
# eigenvectors" but a slopppy implementation of
# _normalize_signs could change the sign of column 0 and make a
# comparison fail
npt.assert_almost_equal(*_normalize_signs(a, b))
def test_column_zeros(self):
a = np.array([[0, 1],
[0, 2]])
b = np.array([[0, -1],
[0, -2]])
npt.assert_equal(*_normalize_signs(a, b))
def test_column_almost_zero(self):
a = np.array([[1e-15, 3],
[-2e-14, -6]])
b = np.array([[0, 3],
[-1e-15, -6]])
npt.assert_almost_equal(*_normalize_signs(a, b))
class TestAssertDataFrameAlmostEqual(unittest.TestCase):
def setUp(self):
self.df = pd.DataFrame(
{'foo': [42, 42.0, np.nan, 0],
'bar': ['a', 'b', 'cd', 'e']})
def test_not_equal(self):
unequal_dfs = [
self.df,
# floating point error too large to be "almost equal"
pd.DataFrame({'foo': [42, 42.001, np.nan, 0],
'bar': ['a', 'b', 'cd', 'e']}),
# extra NaN
pd.DataFrame({'foo': [42, np.nan, np.nan, 0],
'bar': ['a', 'b', 'cd', 'e']}),
# different column order
pd.DataFrame(self.df, columns=['foo', 'bar']),
# different index order
pd.DataFrame(self.df, index=np.arange(4)[::-1]),
# different index type
pd.DataFrame(self.df, index=np.arange(4).astype(float)),
# various forms of "empty" DataFrames that are not equivalent
pd.DataFrame(),
pd.DataFrame(index=np.arange(10)),
pd.DataFrame(columns=np.arange(10)),
pd.DataFrame(index=np.arange(10), columns=np.arange(10)),
pd.DataFrame(index=np.arange(9)),
pd.DataFrame(columns=np.arange(9)),
pd.DataFrame(index=np.arange(9), columns=np.arange(9))
]
# each df should compare equal to itself
for df in unequal_dfs:
assert_data_frame_almost_equal(df, df)
# every pair of dfs should not compare equal. use permutations instead
# of combinations to test that comparing df1 to df2 and df2 to df1 are
# both not equal
for df1, df2 in itertools.permutations(unequal_dfs, 2):
with self.assertRaises(AssertionError):
assert_data_frame_almost_equal(df1, df2)
def test_equal(self):
equal_dfs = [
self.df,
# floating point error small enough to be "almost equal"
pd.DataFrame({'foo': [42, 42.00001, np.nan, 0],
'bar': ['a', 'b', 'cd', 'e']})
]
for df in equal_dfs:
assert_data_frame_almost_equal(df, df)
for df1, df2 in itertools.permutations(equal_dfs, 2):
assert_data_frame_almost_equal(df1, df2)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
jreback/pandas | pandas/tests/series/indexing/test_datetime.py | 2 | 18058 | """
Also test support for datetime64[ns] in Series / DataFrame
"""
from datetime import datetime, timedelta
import re
from dateutil.tz import gettz, tzutc
import numpy as np
import pytest
import pytz
from pandas._libs import iNaT, index as libindex
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
NaT,
Series,
Timestamp,
date_range,
period_range,
)
import pandas._testing as tm
def test_fancy_getitem():
dti = date_range(
freq="WOM-1FRI", start=datetime(2005, 1, 1), end=datetime(2010, 1, 1)
)
s = Series(np.arange(len(dti)), index=dti)
assert s[48] == 48
assert s["1/2/2009"] == 48
assert s["2009-1-2"] == 48
assert s[datetime(2009, 1, 2)] == 48
assert s[Timestamp(datetime(2009, 1, 2))] == 48
with pytest.raises(KeyError, match=r"^'2009-1-3'$"):
s["2009-1-3"]
tm.assert_series_equal(
s["3/6/2009":"2009-06-05"], s[datetime(2009, 3, 6) : datetime(2009, 6, 5)]
)
def test_fancy_setitem():
dti = date_range(
freq="WOM-1FRI", start=datetime(2005, 1, 1), end=datetime(2010, 1, 1)
)
s = Series(np.arange(len(dti)), index=dti)
s[48] = -1
assert s[48] == -1
s["1/2/2009"] = -2
assert s[48] == -2
s["1/2/2009":"2009-06-05"] = -3
assert (s[48:54] == -3).all()
def test_slicing_datetimes():
# GH 7523
# unique
df = DataFrame(
np.arange(4.0, dtype="float64"),
index=[datetime(2001, 1, i, 10, 00) for i in [1, 2, 3, 4]],
)
result = df.loc[datetime(2001, 1, 1, 10) :]
tm.assert_frame_equal(result, df)
result = df.loc[: datetime(2001, 1, 4, 10)]
tm.assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 10) : datetime(2001, 1, 4, 10)]
tm.assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 11) :]
expected = df.iloc[1:]
tm.assert_frame_equal(result, expected)
result = df.loc["20010101 11":]
tm.assert_frame_equal(result, expected)
# duplicates
df = DataFrame(
np.arange(5.0, dtype="float64"),
index=[datetime(2001, 1, i, 10, 00) for i in [1, 2, 2, 3, 4]],
)
result = df.loc[datetime(2001, 1, 1, 10) :]
tm.assert_frame_equal(result, df)
result = df.loc[: datetime(2001, 1, 4, 10)]
tm.assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 10) : datetime(2001, 1, 4, 10)]
tm.assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 11) :]
expected = df.iloc[1:]
tm.assert_frame_equal(result, expected)
result = df.loc["20010101 11":]
tm.assert_frame_equal(result, expected)
def test_getitem_setitem_datetime_tz_pytz():
N = 50
# testing with timezone, GH #2785
rng = date_range("1/1/1990", periods=N, freq="H", tz="US/Eastern")
ts = Series(np.random.randn(N), index=rng)
# also test Timestamp tz handling, GH #2789
result = ts.copy()
result["1990-01-01 09:00:00+00:00"] = 0
result["1990-01-01 09:00:00+00:00"] = ts[4]
tm.assert_series_equal(result, ts)
result = ts.copy()
result["1990-01-01 03:00:00-06:00"] = 0
result["1990-01-01 03:00:00-06:00"] = ts[4]
tm.assert_series_equal(result, ts)
# repeat with datetimes
result = ts.copy()
result[datetime(1990, 1, 1, 9, tzinfo=pytz.timezone("UTC"))] = 0
result[datetime(1990, 1, 1, 9, tzinfo=pytz.timezone("UTC"))] = ts[4]
tm.assert_series_equal(result, ts)
result = ts.copy()
# comparison dates with datetime MUST be localized!
date = pytz.timezone("US/Central").localize(datetime(1990, 1, 1, 3))
result[date] = 0
result[date] = ts[4]
tm.assert_series_equal(result, ts)
def test_getitem_setitem_datetime_tz_dateutil():
tz = (
lambda x: tzutc() if x == "UTC" else gettz(x)
) # handle special case for utc in dateutil
N = 50
# testing with timezone, GH #2785
rng = date_range("1/1/1990", periods=N, freq="H", tz="America/New_York")
ts = Series(np.random.randn(N), index=rng)
# also test Timestamp tz handling, GH #2789
result = ts.copy()
result["1990-01-01 09:00:00+00:00"] = 0
result["1990-01-01 09:00:00+00:00"] = ts[4]
tm.assert_series_equal(result, ts)
result = ts.copy()
result["1990-01-01 03:00:00-06:00"] = 0
result["1990-01-01 03:00:00-06:00"] = ts[4]
tm.assert_series_equal(result, ts)
# repeat with datetimes
result = ts.copy()
result[datetime(1990, 1, 1, 9, tzinfo=tz("UTC"))] = 0
result[datetime(1990, 1, 1, 9, tzinfo=tz("UTC"))] = ts[4]
tm.assert_series_equal(result, ts)
result = ts.copy()
result[datetime(1990, 1, 1, 3, tzinfo=tz("America/Chicago"))] = 0
result[datetime(1990, 1, 1, 3, tzinfo=tz("America/Chicago"))] = ts[4]
tm.assert_series_equal(result, ts)
def test_getitem_setitem_datetimeindex():
N = 50
# testing with timezone, GH #2785
rng = date_range("1/1/1990", periods=N, freq="H", tz="US/Eastern")
ts = Series(np.random.randn(N), index=rng)
result = ts["1990-01-01 04:00:00"]
expected = ts[4]
assert result == expected
result = ts.copy()
result["1990-01-01 04:00:00"] = 0
result["1990-01-01 04:00:00"] = ts[4]
tm.assert_series_equal(result, ts)
result = ts["1990-01-01 04:00:00":"1990-01-01 07:00:00"]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = 0
result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = ts[4:8]
tm.assert_series_equal(result, ts)
lb = "1990-01-01 04:00:00"
rb = "1990-01-01 07:00:00"
# GH#18435 strings get a pass from tzawareness compat
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
lb = "1990-01-01 04:00:00-0500"
rb = "1990-01-01 07:00:00-0500"
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
# But we do not give datetimes a pass on tzawareness compat
# TODO: do the same with Timestamps and dt64
msg = "Cannot compare tz-naive and tz-aware datetime-like objects"
naive = datetime(1990, 1, 1, 4)
with tm.assert_produces_warning(FutureWarning):
# GH#36148 will require tzawareness compat
result = ts[naive]
expected = ts[4]
assert result == expected
result = ts.copy()
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# GH#36148 will require tzawareness compat
result[datetime(1990, 1, 1, 4)] = 0
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# GH#36148 will require tzawareness compat
result[datetime(1990, 1, 1, 4)] = ts[4]
tm.assert_series_equal(result, ts)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# GH#36148 will require tzawareness compat
result = ts[datetime(1990, 1, 1, 4) : datetime(1990, 1, 1, 7)]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
result = ts.copy()
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# GH#36148 will require tzawareness compat
result[datetime(1990, 1, 1, 4) : datetime(1990, 1, 1, 7)] = 0
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# GH#36148 will require tzawareness compat
result[datetime(1990, 1, 1, 4) : datetime(1990, 1, 1, 7)] = ts[4:8]
tm.assert_series_equal(result, ts)
lb = datetime(1990, 1, 1, 4)
rb = datetime(1990, 1, 1, 7)
msg = r"Invalid comparison between dtype=datetime64\[ns, US/Eastern\] and datetime"
with pytest.raises(TypeError, match=msg):
# tznaive vs tzaware comparison is invalid
# see GH#18376, GH#18162
ts[(ts.index >= lb) & (ts.index <= rb)]
lb = Timestamp(datetime(1990, 1, 1, 4)).tz_localize(rng.tzinfo)
rb = Timestamp(datetime(1990, 1, 1, 7)).tz_localize(rng.tzinfo)
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
result = ts[ts.index[4]]
expected = ts[4]
assert result == expected
result = ts[ts.index[4:8]]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
result = ts.copy()
result[ts.index[4:8]] = 0
result.iloc[4:8] = ts.iloc[4:8]
tm.assert_series_equal(result, ts)
# also test partial date slicing
result = ts["1990-01-02"]
expected = ts[24:48]
tm.assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-02"] = 0
result["1990-01-02"] = ts[24:48]
tm.assert_series_equal(result, ts)
def test_getitem_setitem_periodindex():
N = 50
rng = period_range("1/1/1990", periods=N, freq="H")
ts = Series(np.random.randn(N), index=rng)
result = ts["1990-01-01 04"]
expected = ts[4]
assert result == expected
result = ts.copy()
result["1990-01-01 04"] = 0
result["1990-01-01 04"] = ts[4]
tm.assert_series_equal(result, ts)
result = ts["1990-01-01 04":"1990-01-01 07"]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-01 04":"1990-01-01 07"] = 0
result["1990-01-01 04":"1990-01-01 07"] = ts[4:8]
tm.assert_series_equal(result, ts)
lb = "1990-01-01 04"
rb = "1990-01-01 07"
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
# GH 2782
result = ts[ts.index[4]]
expected = ts[4]
assert result == expected
result = ts[ts.index[4:8]]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
result = ts.copy()
result[ts.index[4:8]] = 0
result.iloc[4:8] = ts.iloc[4:8]
tm.assert_series_equal(result, ts)
def test_datetime_indexing():
index = date_range("1/1/2000", "1/7/2000")
index = index.repeat(3)
s = Series(len(index), index=index)
stamp = Timestamp("1/8/2000")
with pytest.raises(KeyError, match=re.escape(repr(stamp))):
s[stamp]
s[stamp] = 0
assert s[stamp] == 0
# not monotonic
s = Series(len(index), index=index)
s = s[::-1]
with pytest.raises(KeyError, match=re.escape(repr(stamp))):
s[stamp]
s[stamp] = 0
assert s[stamp] == 0
"""
test duplicates in time series
"""
@pytest.fixture
def dups():
dates = [
datetime(2000, 1, 2),
datetime(2000, 1, 2),
datetime(2000, 1, 2),
datetime(2000, 1, 3),
datetime(2000, 1, 3),
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 4),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
]
return Series(np.random.randn(len(dates)), index=dates)
def test_constructor(dups):
assert isinstance(dups, Series)
assert isinstance(dups.index, DatetimeIndex)
def test_is_unique_monotonic(dups):
assert not dups.index.is_unique
def test_index_unique(dups):
uniques = dups.index.unique()
expected = DatetimeIndex(
[
datetime(2000, 1, 2),
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
]
)
assert uniques.dtype == "M8[ns]" # sanity
tm.assert_index_equal(uniques, expected)
assert dups.index.nunique() == 4
# #2563
assert isinstance(uniques, DatetimeIndex)
dups_local = dups.index.tz_localize("US/Eastern")
dups_local.name = "foo"
result = dups_local.unique()
expected = DatetimeIndex(expected, name="foo")
expected = expected.tz_localize("US/Eastern")
assert result.tz is not None
assert result.name == "foo"
tm.assert_index_equal(result, expected)
# NaT, note this is excluded
arr = [1370745748 + t for t in range(20)] + [iNaT]
idx = DatetimeIndex(arr * 3)
tm.assert_index_equal(idx.unique(), DatetimeIndex(arr))
assert idx.nunique() == 20
assert idx.nunique(dropna=False) == 21
arr = [
Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)
] + [NaT]
idx = DatetimeIndex(arr * 3)
tm.assert_index_equal(idx.unique(), DatetimeIndex(arr))
assert idx.nunique() == 20
assert idx.nunique(dropna=False) == 21
def test_duplicate_dates_indexing(dups):
ts = dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
tm.assert_series_equal(result, expected)
else:
tm.assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
tm.assert_series_equal(cp, expected)
key = datetime(2000, 1, 6)
with pytest.raises(KeyError, match=re.escape(repr(key))):
ts[key]
# new index
ts[datetime(2000, 1, 6)] = 0
assert ts[datetime(2000, 1, 6)] == 0
def test_groupby_average_dup_values(dups):
result = dups.groupby(level=0).mean()
expected = dups.groupby(dups.index).mean()
tm.assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(monkeypatch):
# #1821
monkeypatch.setattr(libindex, "_SIZE_CUTOFF", 1000)
# create large list of non periodic datetime
dates = []
sec = timedelta(seconds=1)
half_sec = timedelta(microseconds=500000)
d = datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4), index=dates, columns=list("ABCD"))
pos = n * 3
timestamp = df.index[pos]
assert timestamp in df.index
# it works!
df.loc[timestamp]
assert len(df.loc[[timestamp]]) > 0
def test_indexing_over_size_cutoff_period_index(monkeypatch):
# GH 27136
monkeypatch.setattr(libindex, "_SIZE_CUTOFF", 1000)
n = 1100
idx = pd.period_range("1/1/2000", freq="T", periods=n)
assert idx._engine.over_size_threshold
s = Series(np.random.randn(len(idx)), index=idx)
pos = n - 1
timestamp = idx[pos]
assert timestamp in s.index
# it works!
s[timestamp]
assert len(s.loc[[timestamp]]) > 0
def test_indexing_unordered():
# GH 2437
rng = date_range(start="2011-01-01", end="2011-01-15")
ts = Series(np.random.rand(len(rng)), index=rng)
ts2 = pd.concat([ts[0:4], ts[-4:], ts[4:-4]])
for t in ts.index:
expected = ts[t]
result = ts2[t]
assert expected == result
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
expected.index = expected.index._with_freq(None)
tm.assert_series_equal(result, expected)
compare(slice("2011-01-01", "2011-01-15"))
with tm.assert_produces_warning(FutureWarning):
compare(slice("2010-12-30", "2011-01-15"))
compare(slice("2011-01-01", "2011-01-16"))
# partial ranges
compare(slice("2011-01-01", "2011-01-6"))
compare(slice("2011-01-06", "2011-01-8"))
compare(slice("2011-01-06", "2011-01-12"))
# single values
result = ts2["2011"].sort_index()
expected = ts["2011"]
expected.index = expected.index._with_freq(None)
tm.assert_series_equal(result, expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq="M")
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts["2005"]
for t in result.index:
assert t.year == 2005
def test_indexing():
idx = date_range("2001-1-1", periods=20, freq="M")
ts = Series(np.random.rand(len(idx)), index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts["2001"]
expected.name = "A"
df = DataFrame({"A": ts})
with tm.assert_produces_warning(FutureWarning):
# GH#36179 string indexing on rows for DataFrame deprecated
result = df["2001"]["A"]
tm.assert_series_equal(expected, result)
# setting
ts["2001"] = 1
expected = ts["2001"]
expected.name = "A"
df.loc["2001", "A"] = 1
with tm.assert_produces_warning(FutureWarning):
# GH#36179 string indexing on rows for DataFrame deprecated
result = df["2001"]["A"]
tm.assert_series_equal(expected, result)
# GH3546 (not including times on the last day)
idx = date_range(start="2013-05-31 00:00", end="2013-05-31 23:00", freq="H")
ts = Series(range(len(idx)), index=idx)
expected = ts["2013-05"]
tm.assert_series_equal(expected, ts)
idx = date_range(start="2013-05-31 00:00", end="2013-05-31 23:59", freq="S")
ts = Series(range(len(idx)), index=idx)
expected = ts["2013-05"]
tm.assert_series_equal(expected, ts)
idx = [
Timestamp("2013-05-31 00:00"),
Timestamp(datetime(2013, 5, 31, 23, 59, 59, 999999)),
]
ts = Series(range(len(idx)), index=idx)
expected = ts["2013"]
tm.assert_series_equal(expected, ts)
# GH14826, indexing with a seconds resolution string / datetime object
df = DataFrame(
np.random.rand(5, 5),
columns=["open", "high", "low", "close", "volume"],
index=date_range("2012-01-02 18:01:00", periods=5, tz="US/Central", freq="s"),
)
expected = df.loc[[df.index[2]]]
# this is a single date, so will raise
with pytest.raises(KeyError, match=r"^'2012-01-02 18:01:02'$"):
df["2012-01-02 18:01:02"]
msg = r"Timestamp\('2012-01-02 18:01:02-0600', tz='US/Central', freq='S'\)"
with pytest.raises(KeyError, match=msg):
df[df.index[2]]
| bsd-3-clause |
iancze/ScottiePippen | scripts/plot_tracks.py | 1 | 5255 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.ticker import FormatStrFormatter as FSF
from matplotlib.ticker import MultipleLocator
from scipy.interpolate import InterpolatedUnivariateSpline as spline
from grids import DartmouthPMS, PISA, Baraffe15, Seiss
# Functon lifted from triangle.py: https://github.com/dfm/triangle.py/
def hist2d(ax, x, y, *args, **kwargs):
"""
Plot a 2-D histogram of samples.
"""
extent = [[x.min(), x.max()], [y.min(), y.max()]]
bins = 50
color = "k"
linewidths = 0.8
cmap = cm.get_cmap("gray")
cmap._init()
cmap._lut[:-3, :-1] = 0.
cmap._lut[:-3, -1] = np.linspace(1, 0, cmap.N)
X = np.linspace(extent[0][0], extent[0][1], bins + 1)
Y = np.linspace(extent[1][0], extent[1][1], bins + 1)
try:
H, X, Y = np.histogram2d(x.flatten(), y.flatten(), bins=(X, Y))
except ValueError:
raise ValueError("It looks like at least one of your sample columns "
"have no dynamic range. You could try using the "
"`extent` argument.")
# V = 1.0 - np.exp(-0.5 * np.array([1.0, 2.0, 3.0]) ** 2)
V = 1.0 - np.exp(-0.5 * np.array([1.0, 2.0]) ** 2)
#V = 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5) ** 2)
Hflat = H.flatten()
inds = np.argsort(Hflat)[::-1]
Hflat = Hflat[inds]
sm = np.cumsum(Hflat)
sm /= sm[-1]
for i, v0 in enumerate(V):
try:
V[i] = Hflat[sm <= v0][-1]
except:
V[i] = Hflat[0]
X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1])
X, Y = X[:-1], Y[:-1]
# Plot the contours
# ax.pcolor(X, Y, H.max() - H.T, cmap=cmap)
ax.contour(X1, Y1, H.T, V, colors=color, linewidths=linewidths)
ax.set_xlim(extent[0])
ax.set_ylim(extent[1])
TR = np.load("eparams_R.npy")
temp = TR[:,0]
radius = TR[:,5]
BARAFFE = np.load("plots/Baraffe15/eparams_emcee.npy")
DARTMOUTH = np.load("plots/DartmouthPMS/eparams_emcee.npy")
PISA_ = np.load("plots/PISA/eparams_emcee.npy")
SIESS = np.load("plots/Seiss/eparams_emcee.npy")
fig,ax = plt.subplots(nrows=2, ncols=4, figsize=(6.5,4.))
# Models are in order: Baraffe, Dartmouth, PISA, SIESS
labels = ["BCAH15", "Dartmouth14", "PISA", "Siess"]
# First row is T - R diagrams
# Second row is M - Age diagrams
def interp(T, R):
interp = spline(T, R, k=5)
Tfine = np.linspace(np.min(T), np.max(T))
Rfine = interp(Tfine)
return (Tfine, Rfine)
for i,a in enumerate(ax[0]):
hist2d(a, temp, radius)
a.xaxis.set_major_formatter(FSF("%.0f"))
a.xaxis.set_major_locator(MultipleLocator(500))
a.set_xlim(7100, 5200)
a.annotate(labels[i], (0.05, 0.05), xycoords="axes fraction", size=6, backgroundcolor="w")
if i != 0:
a.xaxis.set_ticks([])
a.yaxis.set_ticks([])
ax[0,0].set_ylabel(r"$R_\ast$ [$R_\odot$]")
ax[0,0].set_xlabel(r"$T_\textrm{eff}$ [K]")
# Break the tracks up into ages
# Plot Baraffe
# Baraffe15
grid = Baraffe15(age_range=[1, 50], mass_range=[0.9, 1.4])
grid.load()
masses = np.arange(1.2, 1.5, 0.1)
Ts = []
Rs = []
for mass in masses:
inds = np.isclose(grid.masses, mass)
tt = grid.temps[inds]
rr = grid.radii[inds]
# tfine, rfine = interp(tt, rr)
Ts.append(tt)
Rs.append(rr)
for T, R in zip(Ts, Rs):
ax[0,0].plot(T, R, "-", color="0.5")
# Plot Dartmouth
grid = DartmouthPMS(age_range=[1, 100], mass_range=[0.5, 2.0])
grid.load()
masses = np.arange(1.2, 1.55, 0.1)
Ts = []
Rs = []
for mass in masses:
inds = np.isclose(grid.masses, mass)
tt = grid.temps[inds]
rr = grid.radii[inds]
# tfine, rfine = interp(tt, rr)
Ts.append(tt)
Rs.append(rr)
for T, R in zip(Ts, Rs):
ax[0,1].plot(T, R, "-", color="0.5")
# Plot PISA
grid = PISA(age_range=[1, 100], mass_range=[0.5, 2.0])
grid.load()
masses = np.arange(1.2, 1.55, 0.1)
Ts = []
Rs = []
for mass in masses:
inds = np.isclose(grid.masses, mass)
tt = grid.temps[inds]
rr = grid.radii[inds]
# tfine, rfine = interp(tt, rr)
Ts.append(tt)
Rs.append(rr)
for T, R in zip(Ts, Rs):
ax[0,2].plot(T, R, "-", color="0.5")
# Plot Siess
grid = Seiss(age_range=[1, 100], mass_range=[0.5, 2.0])
grid.load()
masses = np.arange(1.2, 1.55, 0.1)
Ts = []
Rs = []
for mass in masses:
inds = np.isclose(grid.masses, mass)
tt = grid.temps[inds]
rr = grid.radii[inds]
# tfine, rfine = interp(tt, rr)
Ts.append(tt)
Rs.append(rr)
for T, R in zip(Ts, Rs):
ax[0,3].plot(T, R, "-", color="0.5")
hist2d(ax[1,0], BARAFFE[:,1], BARAFFE[:,0])
hist2d(ax[1,1], DARTMOUTH[:,1], DARTMOUTH[:,0])
hist2d(ax[1,2], PISA_[:,1], PISA_[:,0])
hist2d(ax[1,3], SIESS[:,1], SIESS[:,0])
for i,a in enumerate(ax[1]):
a.set_xlim(1.1, 1.5)
a.set_ylim(10, 25.)
a.axvspan(1.17, 1.31, color="0.8")
a.xaxis.set_major_formatter(FSF("%.1f"))
a.xaxis.set_major_locator(MultipleLocator(0.1))
if i != 0:
a.xaxis.set_ticks([])
a.yaxis.set_ticks([])
ax[1,0].axvline(1.4, color="0.5", linestyle=":")
ax[1,0].set_ylabel(r"$\tau$ [Myr]")
ax[1,0].set_xlabel(r"$M_\ast$ [$M_\odot$]")
fig.subplots_adjust(left=0.1, right=0.9, wspace=0.0, top=0.98, bottom=0.1, hspace=0.3)
fig.savefig("posterior.pdf")
| mit |
zhzhussupovkz/digit-recognizer | digit_recognizer.py | 1 | 1748 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
from pandas import read_csv, DataFrame, Series, concat
from sklearn import cross_validation, svm, grid_search
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import accuracy_score, roc_auc_score
import pylab as pl
import matplotlib.pyplot as plt
def get_train_data():
print 'Get train data...'
data = read_csv('./train.csv')
return data
def get_test_data():
print 'Get test data...'
data = read_csv('./test.csv')
return data
# final function
def go():
data = get_train_data()
model_rfc = RandomForestClassifier(n_estimators = 1024, criterion = 'entropy', n_jobs = -1)
print 'Go!!!'
print 'RFC...'
test = get_test_data()
target = data.label
train = data.drop(['label'], axis = 1)
print "..."
result = DataFrame()
model_rfc.fit(train, target)
result['ImageId'] = range(1, len(test) + 1)
result.insert(1,'Label', model_rfc.predict(test))
result.to_csv('./test_rfc_1024.csv', index=False)
def grid_search_test():
data = get_train_data()
target = data.label
train = data.drop(['label'], axis = 1)
model_rfc = RandomForestClassifier()
params = {"n_estimators" : [100, 250, 500, 625], "criterion" : ('entropy', 'gini')}
clf = grid_search.GridSearchCV(model_rfc, params)
clf.fit(train, target)
# summarize the results of the grid search
print(clf.best_score_)
print(clf.best_estimator_.criterion)
print(clf.best_estimator_.n_estimators)
go() | mit |
partizand/gnucashreport | src/gnucashreport/rawdata.py | 1 | 57482 | import gettext
import locale
import os
from copy import copy
from datetime import date, datetime
from decimal import Decimal
import pandas
import numpy
from gnucashreport import utils
from gnucashreport.financial import xirr
from gnucashreport.margins import Margins
from gnucashreport.gnucashbook import GNUCashBook
import gnucashreport.cols as cols
# Признак, что счет не участвует в расчете доходности
MARKER_NO_INVEST = '%no_invest%'
# Признак, что счет участвует в расчете доходности
MARKER_INVEST = '%invest%'
class RawData:
"""
Low level DataFrame implementation of GnuCash database tables for build reports
function returns DataFrames with data, without totals and styling
"""
# # GnuCash account types
# CASH = 'CASH'
# BANK = 'BANK'
# ASSET = 'ASSET'
# STOCK = 'STOCK'
# MUTUAL = 'MUTUAL'
# INCOME = 'INCOME'
# EXPENSE = 'EXPENSE'
# EQUITY = 'EQUITY'
# LIABILITY = 'LIABILITY'
# ROOT = 'ROOT'
# # GNUCash all account assets types
# ALL_ASSET_TYPES = [CASH, BANK, ASSET, STOCK, MUTUAL]
#
# # All account types for calc yield by xirr
# ALL_XIRR_TYPES = [BANK, ASSET, STOCK, MUTUAL, LIABILITY]
# ASSET_XIRR_TYPES = [BANK, ASSET, LIABILITY]
# STOCK_XIRR_TYPES = [STOCK, MUTUAL]
# INCEXP_XIRR_TYPES = [INCOME, EXPENSE]
# Данные для генерации тестовых данных и тестирования
dir_pickle = 'V:/test_data'
pickle_prices = 'prices.pkl'
pickle_splits = 'splits.pkl'
pickle_accounts = 'accounts.pkl'
pickle_tr = 'transactions.pkl'
pickle_commodities = 'commodities.pkl'
dir_excel = "v:/tables"
def __init__(self, filename=None):
self.df_accounts = pandas.DataFrame()
self.df_transactions = pandas.DataFrame()
self.df_commodities = pandas.DataFrame()
self.df_splits = pandas.DataFrame()
self.df_prices = pandas.DataFrame()
# self.book_name = None
# internalization
self.set_locale()
self.book = None
self.root_account_guid = None
if filename:
self.open_book_file(filename)
self._xirr_info_added = False
@staticmethod
def set_locale():
"""
Set current os locale for gettext
:return:
"""
# internalization
if os.name == 'nt':
current_locale, encoding = locale.getdefaultlocale()
os.environ['LANGUAGE'] = current_locale
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
dir_locale = os.path.join(dir_path, 'locale')
gettext.install('gnucashreport', localedir=dir_locale)
# def open_book_file(self, filename, readonly=True, open_if_lock=False,):
def open_book_file(self, filename):
"""
Open GnuCash database file. Autodetect type: sqlite or xml
:param filename:
:param readonly: only for sqlite
:param open_if_lock: only for sqlite
:return:
"""
self.book = GNUCashBook()
self.book.open_file(filename)
self.df_accounts = self.book.df_accounts
self.df_commodities = self.book.df_commodities
self.df_prices = self.book.df_prices
self.df_transactions = self.book.df_transactions
self.df_splits = self.book.df_splits
self.root_account_guid = self.book.root_account_guid
self._after_read()
def __repr__(self):
return 'gcreport {book}'.format(book=self.book)
def _after_read(self):
"""
Some manipulation with dataframes after load data
:return:
"""
# # Минимальная и максимальная даты в базе
self.min_date = self.df_splits[cols.POST_DATE].min() #.date()
self.max_date = self.df_splits[cols.POST_DATE].max() #.date()
# Цены за каждый день по каждому инструменту
self.df_prices_days = self._group_prices_by_period(self.min_date, self.max_date, 'D')
# Пересчет транзакций в валюту учета
self.df_splits = self._currency_calc(self.df_splits,
col_currency_guid=cols.CURRENCY_GUID,
col_rate=cols.RATE_CURRENCY
)
def _add_margins(self, dataframe, margins=None):
"""
Add totals into DataFrame
:param dataframe:
:param margins:
:return: DataFrame with totals
"""
df = dataframe.copy()
if margins:
if margins.total_row:
df = self._add_row_total(df, margins)
if margins.total_col or margins.mean_col:
df = self._add_col_total(df, margins)
return df
def _add_col_total(self, dataframe, margins):
# Список полей для подсчета среднего
columns = dataframe.columns.tolist()
df_ret = dataframe.copy()
# Добавление пустого столбца
if margins.empty_col:
df_ret[''] = ''
if margins.total_col:
df_ret[margins.total_name] = df_ret[columns].sum(axis=1)
if margins.mean_col:
df_ret[margins.mean_name] = df_ret[columns].mean(axis=1)
return df_ret
def _add_row_total(self, dataframe, margins=None):
total_name = _('Total')
if margins:
total_name = margins.total_name
if isinstance(dataframe.index, pandas.MultiIndex):
df_ret = dataframe.copy()
df_sum = pandas.DataFrame(data=dataframe.sum()).T
# df_sum.reindex()
# Строковые имена колонок индекса
strinames = [str(name) for name in dataframe.index.names]
first = True
for i in strinames:
if first:
df_sum[i] = total_name
first = False
else:
df_sum[i] = ''
df_sum.set_index(strinames, inplace=True)
df_ret = df_ret.append(df_sum)
return df_ret
else:
index = total_name
df_ret = dataframe.copy()
df_ret.loc[index] = dataframe.sum()
return df_ret
def equity_by_period(self, from_date, to_date, period='M', glevel=1, margins = None):
"""
Получение капитала за период (активы минус пассивы)
Возвращает DataFrame
:param from_date: Start date
:param to_date: Finish date
:param period: "M" for month, "D" for day...
:param glevel: group level
:param margins:
:return: pivot DataFrame
"""
assets_and_liability = copy(GNUCashBook.ALL_ASSET_TYPES)
assets_and_liability.append(GNUCashBook.LIABILITY)
# Группировка по периоду
group_acc = self._balance_group_by_period(from_date=from_date, to_date=to_date, period=period,
account_types=assets_and_liability, drop_null=False)
# пересчет в нужную валюту
group_acc = self._currency_calc(group_acc)
# Суммируем
equity_name = _('Equity')
if margins:
equity_name = margins.equity_name
df = self._sum_all(group_acc, total_name=equity_name, glevel=glevel, inverse=False)
# Добавление итогов
df = self._add_margins(df, margins)
return df
def balance_on_date(self, on_date, col_value_currency=cols.VALUE_CURRENCY,
account_names=None, account_guids=None, start_balance=False):
"""
Возвращает DataFrame со строками балансов выбранных счетов на конец дня заданной даты
Если задано start_balance=True - будет значение на начало дня заданной даты
(если баланс 0, строки не будет)
Баланс в кол-ве бумаг - cum_sum
Баланс в валюте учета - value_currency
:param on_date:
:param account_names:
:param account_guids:
:param start_balance: False - баланс на конец дня, True - баланс на начало дня
:return: DataFrame с балансами
"""
# Сортировка по дате
if start_balance:
df = (self.df_splits[(self.df_splits[cols.POST_DATE] < on_date)]).copy()
else:
df = (self.df_splits[(self.df_splits[cols.POST_DATE] <= on_date)]).copy()
# Сортировка по счетам
if account_names:
df = df[(df[cols.FULLNAME]).isin(account_names)]
if account_guids:
df = df[(df[cols.ACCOUNT_GUID]).isin(account_guids)]
# Установка индекса по account_guid
df['guid'] = df.index
df.set_index(cols.ACCOUNT_GUID, inplace=True, drop=False)
# отсечение повторов по индексу
df = df[~df.index.duplicated(keep='last')]
# Теперь в cum_sum - остаток по счету на дату (если он есть)
df[cols.POST_DATE] = numpy.datetime64(on_date)
# df.drop(cols.VALUE, axis=1, inplace=True)
# df.rename(columns={cols.CUM_SUM: cols.VALUE}, inplace=True)
if not df.empty:
df = self._currency_calc(df, col_value=cols.CUM_SUM,
col_value_currency=col_value_currency)
# Убрать нулевые значения
df = df[df[cols.VALUE_CURRENCY] != 0]
df[cols.DESCRIPTION] = 'Balance on date'
df.set_index('guid', inplace=True)
return df
def yield_calc(self, account_guid=None, account_name=None, account_types=None, from_date=None, to_date=None,
recurse=True, rename_col=True):
"""
Calculate annual return for account or account and it childrens (recurse)
Set account name or account guid
if not set account_guid and account_name will use root account
if not set to_date will use today date
You may use self.max_date - to set last transaction date in gnucash base
:param account_guid:
:param account_name:
:param account_types: array for filter by account types
:param from_date:
:param to_date:
:param recurse: calculate children accounts returns too
:return: dataframe
"""
self._add_xirr_info()
ar_xirr = self._xirr_child_calc_array(account_guid=account_guid, account_name=account_name,
account_types=account_types,
from_date=from_date, to_date=to_date,
recurse=recurse
)
# Колонки в нужной последовательности
df = pandas.DataFrame(ar_xirr, columns=[
# cols.SHORTNAME,
cols.FULLNAME,
cols.YIELD_TOTAL,
cols.YIELD_INCOME,
cols.YIELD_CAPITAL,
cols.YIELD_EXPENSE,
cols.START_DATE,
cols.END_DATE,
cols.DAYS
])
# Переименовать колонки для отображения
if rename_col:
df.rename({cols.YIELD_TOTAL: _('Total'),
cols.YIELD_INCOME: _('Cashflow'),
cols.YIELD_CAPITAL: _('Capital'),
cols.YIELD_EXPENSE: _('Expense'),
cols.START_DATE: _('Start date'),
cols.END_DATE: _('End date'),
cols.DAYS: _('Days')
}, inplace=True, axis=1 )
if account_guid:
account_name = self.df_accounts.loc[account_guid, cols.SHORTNAME]
df.sort_values(cols.FULLNAME, inplace=True)
df[cols.FULLNAME] = df[cols.FULLNAME].apply(utils.shift_account_name, args=(account_name,))
df.set_index(cols.FULLNAME, inplace=True, drop=True)
return df
def _xirr_child_calc_array(self, account_guid=None, account_name=None, account_types=None,
from_date=None, to_date=None, df_all_xirr=None, recurse=True):
"""
Подсчитывает доходность счета или счетов
Возвращает массив словарей
:param account_guid:
:param account_name:
:param account_types:
:param from_date:
:param to_date:
:param df_all_xirr:
:param recurse:
:return: array of dictionaries with annual return
"""
root_guid = account_guid
# Получение guid счета для которого считать доходность
if not root_guid:
if account_name:
root_guid = self._get_account_guid(account_name)
else:
root_guid = self.root_account_guid
# Если типы счетов не заданы, все типы для xirr
if not account_types:
account_types = [GNUCashBook.BANK,
GNUCashBook.ASSET,
GNUCashBook.STOCK,
GNUCashBook.MUTUAL,
GNUCashBook.LIABILITY]
# Теперь в root_guid счет с которого нужно начинать
# Нужно посчитать его доходность и доходности его потомков
ar_xirr = []
# Получение списка проводок по которым считается доходность
if df_all_xirr is None:
child_guids = self._get_child_accounts(root_guid, account_types=account_types,
xirr_enable=True, recurse=True)
account_guids = [root_guid] + child_guids
df_all_xirr = self._get_all_for_xirr(account_guids=account_guids, from_date=from_date, to_date=to_date)
# Подсчет доходности текущего счета
if root_guid != self.root_account_guid:
xirr_root = self._xirr_calc(account_guid=root_guid, account_types=account_types,
df_all_xirr=df_all_xirr)
if xirr_root:
ar_xirr += [xirr_root]
# Считаем доходность потомков, если нужно
if recurse:
childs = self._get_child_accounts(account_guid=root_guid, account_types=account_types,
xirr_enable=True, recurse=False)
for child in childs:
sub_xirr = self._xirr_child_calc_array(account_guid=child, account_types=account_types,
df_all_xirr=df_all_xirr, recurse=recurse)
if sub_xirr:
ar_xirr += sub_xirr
return ar_xirr
def _xirr_calc(self, account_guid, account_types, df_all_xirr):
"""
Возвращает итоговую доходность по указанному счету по таблице df_all_xirr
:param account_guid:
:param account_types:
:param df_all_xirr: table with all xirr values for calculating
:return: dictionary with annual return
"""
child_guids = self._get_child_accounts(account_guid, account_types=account_types,
xirr_enable=True, recurse=True)
account_guids = [account_guid] + child_guids
df_xirr = (df_all_xirr[df_all_xirr[cols.XIRR_ACCOUNT].isin(account_guids)]).copy()
if df_xirr.empty:
return
# Общая доходность
yield_total = self._xirr_by_dataframe(df_xirr)
# Доходность денежного потока
if not any(df_xirr[cols.ACCOUNT_TYPE].isin([GNUCashBook.INCOME])):
yield_income = Decimal(0)
else:
# Доходность без денежного потока
df_without_income = df_xirr[df_xirr[cols.ACCOUNT_TYPE] != GNUCashBook.INCOME]
without_income_yeld = self._xirr_by_dataframe(df_without_income)
yield_income = yield_total - without_income_yeld
# Стоимость расходов
if not any(df_xirr[cols.ACCOUNT_TYPE].isin([GNUCashBook.EXPENSE])):
yield_expense = Decimal(0)
else:
# Доходность без расходов
df_without_expense = df_xirr[df_xirr[cols.ACCOUNT_TYPE] != GNUCashBook.EXPENSE]
yield_without_expense = self._xirr_by_dataframe(df_without_expense)
yield_expense = yield_without_expense - yield_total
itog = {}
round_prec = 4
itog[cols.FULLNAME] = self.df_accounts.loc[account_guid][cols.FULLNAME]
itog[cols.SHORTNAME] = self.df_accounts.loc[account_guid][cols.SHORTNAME]
itog[cols.YIELD_TOTAL] = round(yield_total, round_prec)
itog[cols.YIELD_INCOME] = round(yield_income, round_prec)
itog[cols.YIELD_EXPENSE] = round(yield_expense, round_prec)
itog[cols.YIELD_CAPITAL] = itog[cols.YIELD_TOTAL] - itog[cols.YIELD_INCOME]
itog[cols.START_DATE] = df_xirr[cols.POST_DATE].min().date()
itog[cols.END_DATE] = df_xirr[cols.POST_DATE].max().date()
itog[cols.DAYS] = (itog[cols.END_DATE] - itog[cols.START_DATE]).days
# itog[cols.XIRR_DAYS] = days
return itog
def _xirr_by_dataframe(self, obj, date_field=cols.POST_DATE, value_field=cols.XIRR_VALUE):
"""
Считает функцию xirr по значениям dataframe. obj может быть dataframe или массивом словарей
:param obj: DataFrame
:param date_field: Name of date column
:param value_field: Name of value column
:return: annual yield
"""
df = pandas.DataFrame(obj, columns=[date_field, value_field])
df[date_field] = pandas.to_datetime(df[date_field]).dt.date
tuples = [tuple(x) for x in df.to_records(index=False)]
a_yield = xirr(tuples)
return a_yield
def _get_child_accounts(self, account_guid, account_types=None, xirr_enable=None, recurse=True):
"""
Возвращает список счетов потомков
recurse=True - Список всех потомков
recurse=False - Только потомки первого уровня
:param account_guid:
:return:
"""
# speed optimization
df = self.df_accounts
# Фильтрация по типам счетов
if account_types:
df = df[(df[cols.ACCOUNT_TYPE]).isin(account_types)]
# Фильтрация по xirr_enable
if xirr_enable:
df = df[df[cols.XIRR_ENABLE] == xirr_enable]
df = df[df[cols.PARENT_GUID] == account_guid]
childs = df.index.tolist()
if recurse:
sub_childs = []
for child_account in childs:
sub_childs += self._get_child_accounts(child_account)
childs += sub_childs
return childs
def _get_account_guid(self, fullname):
"""
Возвращает guid счета по полному имени или none если имя не найдено
:param fullname:
:return: account guid
"""
idx = self.df_accounts[self.df_accounts[cols.FULLNAME] == fullname].index.tolist()
if idx:
return idx[0]
else:
return None
def _add_xirr_info(self):
if self._xirr_info_added:
return
# Добавление столбцов для xirr в df_splits
self.df_splits[cols.XIRR_ACCOUNT] = ''
self.df_splits[cols.XIRR_VALUE] = ''
# Получить список tr_guid
tr_guids = self.df_splits[cols.TRANSACTION_GUID].drop_duplicates().tolist()
# Установить индекс по tr_guid
self.df_splits.set_index(cols.TRANSACTION_GUID, inplace=True, append=True, drop=True)
self.df_splits = self.df_splits.swaplevel()
# dataframe_to_excel(self.df_splits, 'splits-after-index')
# Перебираем все транзакции
for tr_guid in tr_guids:
df_tr_splits = self.df_splits.loc[tr_guid]
self._add_xirr_by_transaction(df_tr_splits, tr_guid)
# Вернуть индекс на место
self.df_splits = self.df_splits.swaplevel()
# dataframe_to_excel(self.df_splits, 'splits-swap')
self.df_splits.reset_index(level=1, drop=False, inplace=True)
self._xirr_info_added = True
def _add_xirr_by_transaction(self, df_tr_splits: pandas.DataFrame, tr_guid: str):
"""
Добавляет значения в поля xirr_account и xirr_value в df_splits
Согласно правил, описанных в excel нике
Возможно можно оптимизировать быстродействие
:param df_tr_splits: pandas.DataFrame
Все сплиты транзакции
:param tr_guid: transaction guid
:return:
"""
# income and expense types
incexp_types = [GNUCashBook.INCOME, GNUCashBook.EXPENSE]
# income splits
df_incexps = df_tr_splits[df_tr_splits[cols.ACCOUNT_TYPE].isin(incexp_types)]
# все оставшиеся splits
df_assets = df_tr_splits[~df_tr_splits[cols.ACCOUNT_TYPE].isin(incexp_types)]
# есть ли счета для xirr
if not any(df_assets[cols.XIRR_ENABLE]):
return
# Простая 2-х проводочная транзакция
if len(df_tr_splits) == 2:
if (len(df_incexps) == 1) and (len(df_assets) == 1):
# у asset уже стоит xirr_enable
# Нужно добавить df_asset, если xirr_enabe у incexp = False
ie_xirr_enable = df_incexps.iloc[0][cols.XIRR_ENABLE]
if not ie_xirr_enable:
self._set_xirr_to_splits(tr_guid=tr_guid, df=df_assets)
return
elif len(df_assets) == 2:
# добавить все строки с xirr_enable
self._set_xirr_to_splits(tr_guid=tr_guid, df=df_assets)
return
else:
# Неясность
print("Unknown transaction type for xirr.")
self._print_transaction_info(df_tr_splits, tr_guid)
return
# Multi transaction
# has one stock
df_stocks = df_tr_splits[df_tr_splits[cols.ACCOUNT_TYPE].isin(GNUCashBook.STOCK_XIRR_TYPES)]
len_stocks = len(df_stocks) # number of stock account in transaction
if len_stocks > 0: # transaction has stock accounts
asset_guid = df_stocks.iloc[0][cols.ACCOUNT_GUID] # first stock account
if len_stocks == 2:
asset_guid2 = df_stocks.iloc[1][cols.ACCOUNT_GUID] # second stock account
if asset_guid != asset_guid2:
# unknown transaction
print("Unknown stock transaction with two different stock")
self._print_transaction_info(df_tr_splits, tr_guid)
return
else:
print("Warning! two equal stocks in one transaction. I am calculate xirr, but it is wrong")
self._print_transaction_info(df_tr_splits, tr_guid)
if len_stocks > 2:
# unknown transaction
print("Unknown transaction with more than two stocks")
self._print_transaction_info(df_tr_splits, tr_guid)
return
# Тут нужно добавить все asset у которых xirr_enable = True
self._set_xirr_to_splits(tr_guid=tr_guid, df=df_assets)
# Тут нужно определить счет на который пойдут прибыли или убытки
# И добавить все расходы/доходы у которых xirr_enable=true
self._set_xirr_to_splits(tr_guid=tr_guid, df=df_incexps, xirr_account=asset_guid)
return
elif (len(df_assets) == 2) and (len(df_incexps) == 1):
# Тест. Добавление признака такой транзакции
# self.df_splits.loc[tr_guid, 'tr_type'] = 'asset-asset-incexp'
# Нужно добавить все строки asset с xirr_enable = True
self._set_xirr_to_splits(tr_guid=tr_guid, df=df_assets)
# И добавить все расходы/доходы у которых xirr_enable=true
master_guid = self._get_master_asset_guid(df_assets)
self._set_xirr_to_splits(tr_guid=tr_guid, df=df_incexps, xirr_account=master_guid)
return
elif (len(df_assets) == 1) and (len(df_incexps) == 2):
# Нужно добавить все строки asset с xirr_enable = True
self._set_xirr_to_splits(tr_guid=tr_guid, df=df_assets)
# И добавить все расходы/доходы у которых xirr_enable=true
master_guid = df_assets.iloc[0][cols.ACCOUNT_GUID]
self._set_xirr_to_splits(tr_guid=tr_guid, df=df_incexps, xirr_account=master_guid)
return
else:
# Error, unknown stock transaction for xirr
print('Unknown multi transaction for xirr calculate.')
self._print_transaction_info(df_tr_splits, tr_guid)
return
def _print_transaction_info(self, df_tr_splits, tr_guid):
"""
Print transaction info
:param df_tr_splits: pandas.DataFrame
Все сплиты транзакции
:param tr_guid: transaction guid
:return:
"""
tr_date = df_tr_splits.iloc[0][cols.POST_DATE]
tr_descr = df_tr_splits.iloc[0][cols.DESCRIPTION]
print('Transaction info: '
'guid={tr_guid}. Date={tr_date}.\n Description={tr_descr}'.format(tr_guid=tr_guid, tr_date=tr_date,
tr_descr=tr_descr))
def _set_xirr_to_splits(self, tr_guid: str, df: pandas.DataFrame, xirr_account: str = None):
"""
Задает значения колонок xirr_value (сумма с обратным знаком) и xirr_account (guid счета по которому идет этот оборот в xirr)
в таблице df_splits
Из строк таблицы df, у которых xirr_enable = True
Если xirr_account заполнен, то берется он, иначе берется счет из строки df
:param tr_guid:
:param df:
:param xirr_account:
:return:
"""
for index, row in df.iterrows():
if row[cols.XIRR_ENABLE]:
value_currency = row[cols.VALUE_CURRENCY]
if value_currency != 0:
self.df_splits.loc[(tr_guid, index), cols.XIRR_VALUE] = value_currency * -1
if xirr_account:
self.df_splits.loc[(tr_guid, index), cols.XIRR_ACCOUNT] = xirr_account
else:
self.df_splits.loc[(tr_guid, index), cols.XIRR_ACCOUNT] = row[cols.ACCOUNT_GUID]
def _get_master_asset_guid(self, df_assets: pandas.DataFrame):
"""
Находит account_guid из df_assets на который писать доход/убыток транзакции
Возвращает account_guid для отобранного счета
:param df_assets: проводки транзакции с типом asset
:param df_incexp: проводки транзакции с типом incexp
:return: account_guid
"""
df_asset = df_assets[df_assets[cols.XIRR_ENABLE]]
# Нет счетов вообще
if df_asset.empty:
return None
# Если счет один, то он и главный
if len(df_asset) == 1:
return df_asset.iloc[0][cols.ACCOUNT_GUID]
# если есть тип stock, то он главный
if any(df_asset[cols.ACCOUNT_TYPE].isin(GNUCashBook.STOCK_XIRR_TYPES)):
df = df_asset[df_asset[cols.ACCOUNT_TYPE].isin(GNUCashBook.STOCK_XIRR_TYPES)]
return df.iloc[0][cols.ACCOUNT_GUID]
# Если есть счет с 0, то главный он
df_zero = df_asset[df_asset[cols.VALUE_CURRENCY] == 0]
if len(df_zero) > 0:
return df_zero.iloc[0][cols.ACCOUNT_GUID]
# Если есть счет с типом liability, то главный он
if any(df_asset[cols.ACCOUNT_TYPE].isin([GNUCashBook.LIABILITY])):
df = df_asset[df_asset[cols.ACCOUNT_TYPE].isin([GNUCashBook.LIABILITY])]
return df.iloc[0][cols.ACCOUNT_GUID]
# А здесь сложно понять кто главный
# Берем счет с отрицательной суммой
df = df_asset[df_asset[cols.VALUE_CURRENCY] < 0]
if df.empty:
print('Error detect master account for xirr')
return None
else:
return df.iloc[0][cols.ACCOUNT_GUID]
def _get_all_for_xirr(self, account_guids, from_date=None, to_date=None):
"""
Возвращает все данные для подсчета xirr
:param account_guids:
:param from_date:
:param to_date:
:return:
"""
df_splits = self._get_splits_for_xirr(account_guids=account_guids, from_date=from_date, to_date=to_date)
df_balances = self._get_balances_for_xirr(account_guids=account_guids, from_date=from_date, to_date=to_date)
df_all = pandas.concat([df_splits, df_balances], ignore_index=True, sort=True)
return df_all
def _get_balances_for_xirr(self, account_guids, from_date=None, to_date=None):
"""
Возвращает начальный и конечный баланс для подсчета xirr
:param account_guids:
:param from_date:
:param to_date:
:return:
"""
# Дата для конечного баланса
end_date = to_date
if not end_date:
# end_date = self.max_date # Дата последней проводки в базе
end_date = datetime.today() # Текущая дата
# Конечный баланс
df_itog_balances = self.balance_on_date(end_date, account_guids=account_guids)
# Добавление начального баланса
if from_date:
start_balances = self.balance_on_date(from_date, account_guids=account_guids, start_balance=True)
# Начальный баланс - это потрачено
start_balances[cols.VALUE_CURRENCY] = start_balances[cols.VALUE_CURRENCY] * (-1)
df_itog_balances = pandas.concat([start_balances, df_itog_balances], ignore_index=True, sort=True)
# Задать xirr_value и xirr_account
df_itog_balances[cols.XIRR_VALUE] = df_itog_balances[cols.VALUE_CURRENCY]
df_itog_balances[cols.XIRR_ACCOUNT] = df_itog_balances[cols.ACCOUNT_GUID]
return df_itog_balances
def _get_splits_for_xirr(self, account_guids, from_date=None, to_date=None):
"""
Возвращает строки из df_splits, для подсчета xirr
:param account_guids:
:param from_date:
:param to_date:
:return: dataframe
"""
sel_df = (self.df_splits[(self.df_splits[cols.XIRR_ACCOUNT]).isin(account_guids)]).copy()
# Фильтрация по времени
if from_date:
sel_df = sel_df[(sel_df[cols.POST_DATE] >= from_date)]
if to_date:
sel_df = sel_df[(sel_df[cols.POST_DATE] <= to_date)]
return sel_df
def _balance_group_by_period(self, from_date, to_date, period, account_types=None, drop_null=False, accounts=None, is_guid=False):
"""
Группирует балансы по периоду у заданных типов счетов
Возвращает DataFrame со всеми счетами сгруппированными по периоду (суммирует за период)
:param from_date:
:param to_date:
:param period:
:param account_types:
:param drop_null:
:return:
"""
# Отбираем нужные колонки (почти все и нужны)
sel_df = pandas.DataFrame(self.df_splits,
columns=[cols.ACCOUNT_GUID, cols.POST_DATE, cols.FULLNAME, cols.COMMODITY_GUID, cols.ACCOUNT_TYPE,
cols.CUM_SUM, cols.SHORTNAME, cols.HIDDEN, cols.MNEMONIC])
# Фильтр по типам счетов
if account_types:
if type(account_types) is str:
account_types = [account_types]
sel_df = sel_df[(sel_df[cols.ACCOUNT_TYPE]).isin(account_types)]
# Фильтр по именам счетов или guid счетов
if accounts:
if is_guid:
sel_df = sel_df[(sel_df[cols.ACCOUNT_GUID]).isin(accounts)]
else:
sel_df = sel_df[(sel_df[cols.FULLNAME]).isin(accounts)]
# Список всех account_guid
account_guids = sel_df[cols.ACCOUNT_GUID].drop_duplicates().tolist()
# Добавление колонки нарастающий итог по счетам
# Будет ли нарастающий итог по порядку возрастания дат???? Нет! Нужно сначала отсортировать
sel_df.rename(columns={cols.CUM_SUM: cols.VALUE}, inplace=True)
# здесь подразумевается, что есть только одна цена за день
# Поэтому отсекаем повторы
sel_df.set_index([cols.ACCOUNT_GUID, cols.POST_DATE], inplace=True)
# отсечение повторов по индексу
sel_df = sel_df[~sel_df.index.duplicated(keep='last')]
# Индекс по периоду
idx = pandas.date_range(from_date, to_date, freq=period)
# цикл по всем commodity_guid
group_acc = pandas.DataFrame()
for account_guid in account_guids:
# DataFrame с датами и значениями
df_acc = sel_df.loc[account_guid]
if not df_acc.empty:
df_acc = df_acc.resample(period).ffill()
df_acc = df_acc.reindex(idx, method='ffill')
# Здесь теряются все колонки если начинается с пустой
if drop_null:
# Убрать если все значения 0
has_balances = not (df_acc[cols.VALUE].apply(lambda x: x == 0).all())
else:
has_balances = True
# Берем только не пустые счета
if has_balances:
acc_info = self.df_accounts.loc[account_guid]
df_acc.index.name = cols.POST_DATE
df_acc[cols.ACCOUNT_GUID] = account_guid
df_acc[cols.FULLNAME] = acc_info[cols.FULLNAME]
df_acc[cols.COMMODITY_GUID] = acc_info[cols.COMMODITY_GUID]
df_acc[cols.ACCOUNT_TYPE] = acc_info[cols.ACCOUNT_TYPE]
df_acc[cols.SHORTNAME] = acc_info[cols.SHORTNAME]
df_acc[cols.HIDDEN] = acc_info[cols.HIDDEN]
df_acc[cols.MNEMONIC] = acc_info[cols.MNEMONIC]
df_acc.set_index(cols.ACCOUNT_GUID, append=True, inplace=True)
# Меняем местами индексы
df_acc = df_acc.swaplevel()
group_acc = group_acc.append(df_acc)
# Сбрасываем один уровень индекса (post_date)
group_acc.reset_index(inplace=True)
# Заменяем Nan нулями
group_acc.fillna(Decimal(0), inplace=True)
return group_acc
def balance_to_currency(self, from_date=None, to_date=None, accounts=None):
"""
Возвращает баланс счетов по имени или guid счета на заданную дату с пересчетом в валюту представления
:param on_date:
:param accounts:
:param is_guid:
:return: DataFrame
"""
# Отбираем нужные колонки
sel_df = pandas.DataFrame(self.df_splits,
columns=[cols.POST_DATE,
cols.TRANSACTION_GUID,
cols.ACCOUNT_GUID,
cols.FULLNAME,
cols.COMMODITY_GUID,
cols.ACCOUNT_TYPE,
cols.VALUE,
cols.CUM_SUM,
cols.SHORTNAME,
cols.MNEMONIC,
cols.CURRENCY_GUID])
if accounts:
# Выбранные счета
if type(accounts) is str:
accounts = [accounts]
sel_df = sel_df[(sel_df[cols.FULLNAME]).isin(accounts)]
else:
# отбираем все счета с активами
sel_df = sel_df[(sel_df[cols.ACCOUNT_TYPE]).isin(GNUCashBook.ALL_ASSET_TYPES)]
# Фильтрация по времени
if from_date:
sel_df = sel_df[(sel_df[cols.POST_DATE] >= from_date)]
if to_date:
sel_df = sel_df[(sel_df[cols.POST_DATE] <= to_date)]
# пересчет в нужную валюту
group = self._currency_calc(sel_df, from_date=from_date)
return group
def balance_by_period(self, from_date, to_date, period='M', account_types=GNUCashBook.ALL_ASSET_TYPES, glevel=1,
margins = None, drop_null=False):
"""
Возвращает сводный баланс по счетам за интервал дат с разбивкой по периодам
:param from_date:
:param to_date:
:param period:
:param account_types:
:param glevel:
:param margins:
:param drop_null: Отбрасывать нулевые значения (итоги могут не содержать всех столбцов)
:return: DataFrame
"""
group_acc = self._balance_group_by_period(from_date=from_date, to_date=to_date, period=period,
account_types=account_types, drop_null=drop_null)
# пересчет в нужную валюту
group = self._currency_calc(group_acc)
# Группировка по счетам
group = self._group_by_accounts(group, glevel=glevel, drop_null=drop_null)
# group = self._curcalc_and_accgroup(group_acc, from_date=from_date, to_date=to_date, period=period,
# glevel=glevel, margins=margins, drop_null=drop_null)
# Добавление итогов
group = self._add_margins(group, margins)
return group
def turnover_by_period(self, from_date, to_date, period='M', account_type=GNUCashBook.EXPENSE, glevel=1,
margins: Margins = None, drop_null=False):
"""
Получение сводных оборотов по тратам/доходам за промежуток времени с разбивкой на периоды
Например, ежемесячные траты за год. Возвращает DataFrame
:param from_date: Start date
:param to_date: Finish date
:param period: "M" for month, "D" for day...
:param account_type: INCOME or EXPENSE
:param glevel: group level
:return: pivot DataFrame
"""
sel_df = self._turnover_group_by_period(from_date=from_date, to_date=to_date, period=period,
account_type=account_type)
# inverse income
if account_type == GNUCashBook.INCOME:
sel_df[cols.VALUE] = sel_df[cols.VALUE].apply(lambda x: -1 * x)
# пересчет в нужную валюту
group = self._currency_calc(sel_df)
# Группировка по счетам
group = self._group_by_accounts(group, glevel=glevel, drop_null=drop_null)
# Здесь появляются нули
# group.fillna(Decimal(0), inplace=True)
# Добавление итогов
group = self._add_margins(group, margins)
group.replace(0, Decimal(0), inplace=True)
return group
def profit_by_period(self, from_date: date, to_date: date, period='M', glevel=1, margins: Margins = None):
"""
Получение прибыли за период
Возвращает DataFrame
:param from_date: Start date
:param to_date: Finish date
:param period: "M" for month, "D" for day...
:param account_type: INCOME or EXPENSE
:param glevel: group level
:param margins:
:return: pivot DataFrame
"""
income_and_expense = [GNUCashBook.INCOME, GNUCashBook.EXPENSE]
# Группировка по периоду
sel_df = self._turnover_group_by_period(from_date=from_date, to_date=to_date, period=period,
account_type=income_and_expense)
# пересчет в нужную валюту
group = self._currency_calc(sel_df)
# Группировка по счетам
# Суммируем
profit_name = _('Profit')
if margins:
profit_name = margins.profit_name
df = self._sum_all(group, total_name=profit_name, glevel=glevel, inverse=True)
# Добавление итогов
df = self._add_margins(df, margins)
return df
def _sum_all(self, dataframe, total_name, glevel, inverse):
"""
Суммирует все значения DataFrame, возвращает итоговую строку с именем total_name
:param dataframe:
:param total_name:
:param glevel:
:param inverse:
:return:
"""
# Суммируем
# group = dataframe.groupby(cols.POST_DATE).value_currency.sum()
group = dataframe.groupby(cols.POST_DATE).value_currency.sum()
if inverse:
group = group.map(lambda x: x * -1)
# Переворот дат из строк в колонки
df = pandas.DataFrame(group).T
df.index = [total_name]
# Нужно добавить колонки если Multiindex
if type(glevel) is int:
glevel = [glevel]
idx_len = len(glevel)
new_indexes = [str(i) for i in range(1, idx_len)]
if new_indexes:
# Нужно добавить уровни
for col_name in new_indexes:
df[col_name] = ''
df.set_index(new_indexes, append=True, inplace=True)
return df
def _turnover_group_by_period(self, from_date, to_date, period, account_type):
"""
Возвращает обороты по счетам сгруппированные по периодам
:param from_date:
:param to_date:
:param period:
:param account_type:
:return:
"""
if type(account_type) is str:
account_type = [account_type]
# Фильтрация по времени
sel_df = self.df_splits[(self.df_splits[cols.POST_DATE] >= from_date)
& (self.df_splits[cols.POST_DATE] <= to_date)]
# Отбираем нужные типы счетов
sel_df = sel_df[(sel_df[cols.ACCOUNT_TYPE]).isin(account_type)]
# Группировка по месяцу
sel_df.set_index(cols.POST_DATE, inplace=True)
sel_df = sel_df.groupby([pandas.Grouper(freq=period), cols.FULLNAME, cols.COMMODITY_GUID]).value.sum().reset_index()
return sel_df
def _currency_calc(self, dataframe,
col_value=cols.VALUE,
col_currency_guid=cols.COMMODITY_GUID,
col_rate=cols.RATE,
col_value_currency=cols.VALUE_CURRENCY
):
"""
Добавляет в dataframe колонку с курсом валюты и колонку со стоимостью в валюте учета
Исходный datafrmae должен содержать поля:
post_date - дата
value - стоимость в валюте счета или кол-во ценных бумаг
commodity_guid - guid счета или ценной бумаги
Добавятся колонки:
rate - курс в валюте учета
value_currency - стоимость в валюте учета
исходный datafrmae должен быть сгруппирован по from_date, to_date, period
Но функция его не группирует!
:param dataframe:
:param from_date:
:param to_date:
:param period:
:return: DataFrame с добавленными колонками
"""
df = dataframe
# Определяем цены на нужные даты
# group_prices = self.df_prices_days
start_date = df[cols.POST_DATE].min()
end_date = df[cols.POST_DATE].max()
prices_on_dates = self._group_prices_by_period(start_date, end_date, 'D', col_rate=col_rate)
# Добавление колонки курс
if prices_on_dates.empty:
df[col_rate] = 1
else:
df = df.merge(prices_on_dates, left_on=[col_currency_guid, cols.POST_DATE], right_index=True,
how='left')
# Заполнить пустые поля еденицей
df[col_rate] = df[col_rate].fillna(Decimal(1))
# Пересчет в валюту представления
df[col_value_currency] = (df[col_value] * df[col_rate]).apply(lambda x: round(x, 2))
# Теперь в колонке value_currency реальная сумма в рублях
# Конец пересчета в нужную валюту
return df
def _group_by_accounts(self, dataframe, glevel=1, drop_null=False):
"""
Group dataframe by accounts, add totals
glevel - group level of accounts: array of levels or single int level
Example:
glevel=[0, 1] - Group accounts for 0 and 1 level,
into 2 rows and 2 columns (Multiindex dataframe):
Assets - Current assets
- reserve
glevel=1 - groups only 1 level, into 2 rows and 1 column:
Current assets
reserve
Accounts example:
0 1 2 (account levels)
Assets:Current assets:Cash
Assets:Current assets:Card
Assets:reserve:Deposite
Assets:reserve:Cash
Группирует dataframe по счетам, добавляет итоги
glevel - Уровень группировки счетов: массив уровней или номер уровня
Например:
glevel=[0, 1] - Сгруппирует все счета с 0 по 1-ый уровень,
будут две строки и два столбца (Multiindex dataframe):
Активы - Текущие активы
- Резервы
glevel=1 - сгруппирует только первый уровень, будут две строки и один столбец:
Текущие активы
Резервы
Пример счетов:
0 1 2 (Уровни счетов)
Активы:Текущие активы:Деньги
Активы:Текущие активы:Карта
Активы:Резервы:Депозит
Активы:Резервы:Заначка
:param dataframe:
:param glevel: Уровень группировки счетов: массив уровней или номер уровня
:param margins: Итоги
:param drop_null:
:return:
"""
# Отбираем нужные колонки
sel_df = pandas.DataFrame(dataframe,
columns=[cols.POST_DATE, cols.FULLNAME, cols.VALUE_CURRENCY]).copy()
# Добавление MultiIndex по дате и названиям счетов
# Get dataframe where fullname split to parts (only these parts)
s = sel_df[cols.FULLNAME].str.split(':', expand=True)
# change columns name type from int to string, for new version Pandas
s.rename(str, axis='columns', inplace=True)
# Get list of column name's of fullname parts
columns = s.columns
columns = columns.tolist()
columns = [cols.POST_DATE] + columns
# Add splitted fullname columns
sel_df = pandas.concat([sel_df, s], axis=1)
sel_df.sort_values(by=columns, inplace=True) # Сортировка по дате и счетам
if drop_null:
sel_df.dropna(subset=[cols.VALUE_CURRENCY], inplace=True) # Удаление пустых значений
# sel_df = sel_df[sel_df[cols.VALUE] != 0] # Удаление нулевых значений
sel_df.drop(cols.FULLNAME, axis=1, inplace=True) # Удаление колонки fullname
# set index by date and splitted fulname columns
sel_df.set_index(columns, inplace=True)
# Переворот дат из строк в колонки
# date index to column
unst = sel_df.unstack(level=cols.POST_DATE, fill_value=0)
# delete column level header
unst.columns = unst.columns.droplevel()
# Группировка по нужному уровню
group = unst.groupby(level=glevel).sum()
return group
def inflation_by_period(self, from_date, to_date, period='A', glevel=1, cumulative=False):
"""
Calcs inflation by periods. Return DataFrame with percent of inflation
:param from_date:
:param to_date:
:param period:
:param glevel:
:param cumulative: in each column calculate cumulative inflation to the first column (True)
or inflation to the previous column (False)
:return: DataFrame
"""
# Calculate expenses
margins = Margins()
margins.total_row = True
df = self.turnover_by_period(from_date=from_date, to_date=to_date, period=period,
account_type=GNUCashBook.EXPENSE, glevel=glevel, margins=margins)
# Empty Dataframe with same columns and index
df_inf = pandas.DataFrame(index=df.index, columns=df.columns[1:])
columns = df.columns
for i in range(1, len(columns)):
if not cumulative:
# Процент к предыдущему
df_inf[columns[i]] = self._percent_increase(df[columns[i-1]], df[columns[i]])
else:
# Процент к началу
df_inf[columns[i]] = self._percent_increase(df[columns[0]], df[columns[i]], i)
# Average by period
if not cumulative:
i2 = len(columns) - 1
df_inf[('Total')] = self._percent_increase(df[columns[0]], df[columns[i2]], i2)
return df_inf
@staticmethod
def _percent_increase(a_ser, b_ser, distance=1):
"""
Return percent increase between two series
:param a_ser: First series
:param b_ser: Last series
:param distance: time counts between series
:return: series: percent increase
"""
i_ser = ((b_ser.astype('float64')).divide(a_ser.astype('float64'))).pow(1 / distance) - 1
return i_ser
def _group_prices_by_period(self, from_date, to_date, period='M', guids=None, col_rate=cols.RATE):
"""
Получение курса/цен активов за период
Возвращает таблицу с ценой каждого актива на конец периода (по последней ближайшей к дате)
Возвращаемый DataFrame содержит индекс и столбцы
[cols.COMMODITY_GUID, 'date'] ([cols.MNEMONIC, cols.CURRENCY_GUID, cols.RATE], dtype='object')
rate - курс
:param from_date:
:param to_date:
:param period:
:param guids: Список commodities_guids или None для всех
:return: DataFrame with grouped prices
"""
all_commodities_guids = set(self.df_prices.index.get_level_values(cols.COMMODITY_GUID).drop_duplicates().tolist())
# Индекс по периоду
from_date2 = from_date
to_date2 = to_date
if not from_date:
from_date2 = self.min_date
if not to_date:
to_date2 = self.max_date
idx = pandas.date_range(from_date2, to_date2, freq=period)
# Список commodities guids
if guids is None:
guids_list = all_commodities_guids
else:
guids_list = set(guids) & all_commodities_guids
# здесь подразумевается, что есть только одна цена за день
sel_df = pandas.DataFrame(self.df_prices,
columns=[cols.MNEMONIC, cols.CURRENCY_GUID, cols.VALUE])
# цикл по всем commodity_guid
group_prices = pandas.DataFrame()
for commodity_guid in guids_list:
# DataFrame с датами и значениями
sel_mnem = sel_df.loc[commodity_guid]
if not sel_mnem.empty:
sel_mnem = sel_mnem.resample(period).ffill()
sel_mnem = sel_mnem.reindex(idx, method='nearest')
sel_mnem.index.name = 'date'
sel_mnem[cols.COMMODITY_GUID] = commodity_guid
sel_mnem.set_index(cols.COMMODITY_GUID, append=True, inplace=True)
# Меняем местами индексы
sel_mnem = sel_mnem.swaplevel()
group_prices = group_prices.append(sel_mnem)
# Список guid всех нужных валют
if group_prices.empty:
currency_guids=None
else:
currency_guids = set(group_prices[cols.CURRENCY_GUID].drop_duplicates().tolist()) & all_commodities_guids
if currency_guids:
# TODO: Здесь нужен пересчет в валюту представления
pass
# Теперь в колонке rate курс ценной бумаги в рублях
group_prices.rename(columns={cols.VALUE: col_rate, cols.CURRENCY_GUID: cols.PRICE_CURRENCY_GUID}, inplace=True)
return group_prices
| gpl-3.0 |
raghavrv/scikit-learn | benchmarks/bench_plot_ward.py | 117 | 1283 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import matplotlib.pyplot as plt
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
plt.figure("scikit-learn Ward's method benchmark results")
plt.imshow(np.log(ratio), aspect='auto', origin="lower")
plt.colorbar()
plt.contour(ratio, levels=[1, ], colors='k')
plt.yticks(range(len(n_features)), n_features.astype(np.int))
plt.ylabel('N features')
plt.xticks(range(len(n_samples)), n_samples.astype(np.int))
plt.xlabel('N samples')
plt.title("Scikit's time, in units of scipy time (log)")
plt.show()
| bsd-3-clause |
furby32/pattern_recognition | gtk.py | 1 | 5578 | #!/usr/bin/env python
# -*- coding: utf-8 -*-import sys
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gio, GObject
from matplotlib.backends.backend_gtk3agg import FigureCanvasGTK3Agg as FigureCanvas
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from generator import ClassGenerator
from generator import ClassHolder
from Clasifier import EuclideanDistance
from Clasifier import Mahalanobis
from Clasifier import MaxProbability
classifiers = [EuclideanDistance(),Mahalanobis(),MaxProbability()]
class MyWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="capp")
self.set_border_width(10)
self.set_default_size(400, 200)
self.Classifier = {'index':-1,'name':None}
self.filePath = None
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111)
self.colors = ['b', 'g', 'c', 'm', 'y','k']
self._x = 0
self._y = 0
header = Gtk.HeaderBar(title="Pattern Recognition")
header.set_subtitle("Classifier")
header.props.show_close_button = True
self.set_titlebar(header)
box = Gtk.Box(spacing=6)
self.add(box)
button1 = Gtk.Button("Choose File")
button1.connect("clicked", self.on_file_clicked)
box.add(button1)
name_store = Gtk.ListStore(int, str)
name_store.append([0, "Eculedian"])
name_store.append([1, "Mahalanobis"])
name_store.append([2, "MaxProbability"])
name_combo = Gtk.ComboBox.new_with_model_and_entry(name_store)
name_combo.connect("changed", self.on_name_combo_changed)
name_combo.set_entry_text_column(1)
box.add(name_combo)
self._xEntry = Gtk.Entry()
box.add(Gtk.Label("Coordenada x"))
box.add(self._xEntry)
self._yEntry = Gtk.Entry()
box.add(Gtk.Label("Coordenada y"))
box.add(self._yEntry)
self._lEntry = Gtk.Entry()
self._lEntry.set_text("20")
box.add(Gtk.Label("Limite"))
box.add(self._lEntry)
run_button = Gtk.Button(label="Classify")
run_button.connect("clicked", self.on_button_clicked)
box.add(run_button)
plot_button = Gtk.Button(label="Close plot")
plot_button.connect("clicked", self.on_button_plot)
box.add(plot_button)
self.f = Figure(figsize=(5, 4), dpi=100)
canvas = FigureCanvas(self.f) # a Gtk.DrawingArea
canvas.set_size_request(800, 600)
#sw.add_with_viewport(canvas)
box.add(canvas)
def on_name_combo_changed(self, combo):
tree_iter = combo.get_active_iter()
if tree_iter != None:
model = combo.get_model()
row_id, name = model[tree_iter][:2]
print("Selected: ID=%d, name=%s" % (row_id, name))
self.Classifier['index']=row_id
self.Classifier['name']= name
else:
entry = combo.get_child()
print("Entered: %s" % entry.get_text())
def on_file_clicked(self, widget):
dialog = Gtk.FileChooserDialog("Please choose a file", self,
Gtk.FileChooserAction.OPEN,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN, Gtk.ResponseType.OK))
self.add_filters(dialog)
response = dialog.run()
if response == Gtk.ResponseType.OK:
print("Open clicked")
print("File selected: " + dialog.get_filename())
self.filePath = dialog.get_filename()
elif response == Gtk.ResponseType.CANCEL:
print("Cancel clicked")
dialog.destroy()
def on_button_plot(self,widget):
plt.close('all')
def add_filters(self, dialog):
filter_text = Gtk.FileFilter()
filter_text.set_name("Text files")
filter_text.add_mime_type("text/plain")
dialog.add_filter(filter_text)
filter_py = Gtk.FileFilter()
filter_py.set_name("Python files")
filter_py.add_mime_type("text/x-python")
dialog.add_filter(filter_py)
filter_any = Gtk.FileFilter()
filter_any.set_name("Any files")
filter_any.add_pattern("*")
dialog.add_filter(filter_any)
"""
grid.attach_next_to(button3, button1, Gtk.PositionType.BOTTOM, 1, 2)
grid.attach_next_to(button4, button3, Gtk.PositionType.RIGHT, 2, 1)
grid.attach(button5, 1, 2, 1, 1)
grid.attach_next_to(button6, button5, Gtk.PositionType.RIGHT, 1, 1)
self.button = Gtk.Button(label="Click Here")
self.button.connect("clicked", self.on_button_clicked)
self.add(self.button)
"""
def on_button_clicked(self, widget):
print("Hello World")
print self.Classifier['index']
print self.Classifier['name']
x=0
y=0
l=0
if len(self._xEntry.get_text()) > 0:
x = int(self._xEntry.get_text())
if len(self._yEntry.get_text()) > 0:
y = int(self._yEntry.get_text())
if len(self._lEntry.get_text()) > 0:
l = int(self._lEntry.get_text())
a = ClassGenerator(size=100,config_path=self.filePath)
holder = ClassHolder(a.generate(),plotfig=self.f)
plot = holder.classify(classifiers[self.Classifier['index']],x,y,l)
if plot != None:
#print plot
plot.show()
widget = Gtk.Box()
#print(dir(widget.props))
win = MyWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main() | gpl-2.0 |
arhik/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/__init__.py | 69 | 28184 | """
This is an object-orient plotting library.
A procedural interface is provided by the companion pylab module,
which may be imported directly, e.g::
from pylab import *
or using ipython::
ipython -pylab
For the most part, direct use of the object-oriented library is
encouraged when programming rather than working interactively. The
exceptions are the pylab commands :func:`~matplotlib.pyplot.figure`,
:func:`~matplotlib.pyplot.subplot`,
:func:`~matplotlib.backends.backend_qt4agg.show`, and
:func:`~pyplot.savefig`, which can greatly simplify scripting.
Modules include:
:mod:`matplotlib.axes`
defines the :class:`~matplotlib.axes.Axes` class. Most pylab
commands are wrappers for :class:`~matplotlib.axes.Axes`
methods. The axes module is the highest level of OO access to
the library.
:mod:`matplotlib.figure`
defines the :class:`~matplotlib.figure.Figure` class.
:mod:`matplotlib.artist`
defines the :class:`~matplotlib.artist.Artist` base class for
all classes that draw things.
:mod:`matplotlib.lines`
defines the :class:`~matplotlib.lines.Line2D` class for
drawing lines and markers
:mod`matplotlib.patches`
defines classes for drawing polygons
:mod:`matplotlib.text`
defines the :class:`~matplotlib.text.Text`,
:class:`~matplotlib.text.TextWithDash`, and
:class:`~matplotlib.text.Annotate` classes
:mod:`matplotlib.image`
defines the :class:`~matplotlib.image.AxesImage` and
:class:`~matplotlib.image.FigureImage` classes
:mod:`matplotlib.collections`
classes for efficient drawing of groups of lines or polygons
:mod:`matplotlib.colors`
classes for interpreting color specifications and for making
colormaps
:mod:`matplotlib.cm`
colormaps and the :class:`~matplotlib.image.ScalarMappable`
mixin class for providing color mapping functionality to other
classes
:mod:`matplotlib.ticker`
classes for calculating tick mark locations and for formatting
tick labels
:mod:`matplotlib.backends`
a subpackage with modules for various gui libraries and output
formats
The base matplotlib namespace includes:
:data:`~matplotlib.rcParams`
a global dictionary of default configuration settings. It is
initialized by code which may be overridded by a matplotlibrc
file.
:func:`~matplotlib.rc`
a function for setting groups of rcParams values
:func:`~matplotlib.use`
a function for setting the matplotlib backend. If used, this
function must be called immediately after importing matplotlib
for the first time. In particular, it must be called
**before** importing pylab (if pylab is imported).
matplotlib is written by John D. Hunter (jdh2358 at gmail.com) and a
host of others.
"""
from __future__ import generators
__version__ = '0.98.5.2'
__revision__ = '$Revision: 6660 $'
__date__ = '$Date: 2008-12-18 06:10:51 -0600 (Thu, 18 Dec 2008) $'
import os, re, shutil, subprocess, sys, warnings
import distutils.sysconfig
import distutils.version
NEWCONFIG = False
# Needed for toolkit setuptools support
if 0:
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
pass # must not have setuptools
if not hasattr(sys, 'argv'): # for modpython
sys.argv = ['modpython']
"""
Manage user customizations through a rc file.
The default file location is given in the following order
- environment variable MATPLOTLIBRC
- HOME/.matplotlib/matplotlibrc if HOME is defined
- PATH/matplotlibrc where PATH is the return value of
get_data_path()
"""
import sys, os, tempfile
from rcsetup import defaultParams, validate_backend, validate_toolbar
from rcsetup import validate_cairo_format
major, minor1, minor2, s, tmp = sys.version_info
_python24 = major>=2 and minor1>=4
# the havedate check was a legacy from old matplotlib which preceeded
# datetime support
_havedate = True
#try:
# import pkg_resources # pkg_resources is part of setuptools
#except ImportError: _have_pkg_resources = False
#else: _have_pkg_resources = True
if not _python24:
raise ImportError('matplotlib requires Python 2.4 or later')
import numpy
nn = numpy.__version__.split('.')
if not (int(nn[0]) >= 1 and int(nn[1]) >= 1):
raise ImportError(
'numpy 1.1 or later is required; you have %s' % numpy.__version__)
def is_string_like(obj):
if hasattr(obj, 'shape'): return 0
try: obj + ''
except (TypeError, ValueError): return 0
return 1
def _is_writable_dir(p):
"""
p is a string pointing to a putative writable dir -- return True p
is such a string, else False
"""
try: p + '' # test is string like
except TypeError: return False
try:
t = tempfile.TemporaryFile(dir=p)
t.write('1')
t.close()
except OSError: return False
else: return True
class Verbose:
"""
A class to handle reporting. Set the fileo attribute to any file
instance to handle the output. Default is sys.stdout
"""
levels = ('silent', 'helpful', 'debug', 'debug-annoying')
vald = dict( [(level, i) for i,level in enumerate(levels)])
# parse the verbosity from the command line; flags look like
# --verbose-silent or --verbose-helpful
_commandLineVerbose = None
for arg in sys.argv[1:]:
if not arg.startswith('--verbose-'): continue
_commandLineVerbose = arg[10:]
def __init__(self):
self.set_level('silent')
self.fileo = sys.stdout
def set_level(self, level):
'set the verbosity to one of the Verbose.levels strings'
if self._commandLineVerbose is not None:
level = self._commandLineVerbose
if level not in self.levels:
raise ValueError('Illegal verbose string "%s". Legal values are %s'%(level, self.levels))
self.level = level
def set_fileo(self, fname):
std = {
'sys.stdout': sys.stdout,
'sys.stderr': sys.stderr,
}
if fname in std:
self.fileo = std[fname]
else:
try:
fileo = file(fname, 'w')
except IOError:
raise ValueError('Verbose object could not open log file "%s" for writing.\nCheck your matplotlibrc verbose.fileo setting'%fname)
else:
self.fileo = fileo
def report(self, s, level='helpful'):
"""
print message s to self.fileo if self.level>=level. Return
value indicates whether a message was issued
"""
if self.ge(level):
print >>self.fileo, s
return True
return False
def wrap(self, fmt, func, level='helpful', always=True):
"""
return a callable function that wraps func and reports it
output through the verbose handler if current verbosity level
is higher than level
if always is True, the report will occur on every function
call; otherwise only on the first time the function is called
"""
assert callable(func)
def wrapper(*args, **kwargs):
ret = func(*args, **kwargs)
if (always or not wrapper._spoke):
spoke = self.report(fmt%ret, level)
if not wrapper._spoke: wrapper._spoke = spoke
return ret
wrapper._spoke = False
wrapper.__doc__ = func.__doc__
return wrapper
def ge(self, level):
'return true if self.level is >= level'
return self.vald[self.level]>=self.vald[level]
verbose=Verbose()
def checkdep_dvipng():
try:
s = subprocess.Popen(['dvipng','-version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
line = s.stdout.readlines()[1]
v = line.split()[-1]
return v
except (IndexError, ValueError, OSError):
return None
def checkdep_ghostscript():
try:
if sys.platform == 'win32':
command_args = ['gswin32c', '--version']
else:
command_args = ['gs', '--version']
s = subprocess.Popen(command_args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
v = s.stdout.read()[:-1]
return v
except (IndexError, ValueError, OSError):
return None
def checkdep_tex():
try:
s = subprocess.Popen(['tex','-version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
line = s.stdout.readlines()[0]
pattern = '3\.1\d+'
match = re.search(pattern, line)
v = match.group(0)
return v
except (IndexError, ValueError, AttributeError, OSError):
return None
def checkdep_pdftops():
try:
s = subprocess.Popen(['pdftops','-v'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for line in s.stderr:
if 'version' in line:
v = line.split()[-1]
return v
except (IndexError, ValueError, UnboundLocalError, OSError):
return None
def compare_versions(a, b):
"return True if a is greater than or equal to b"
if a:
a = distutils.version.LooseVersion(a)
b = distutils.version.LooseVersion(b)
if a>=b: return True
else: return False
else: return False
def checkdep_ps_distiller(s):
if not s:
return False
flag = True
gs_req = '7.07'
gs_sugg = '7.07'
gs_v = checkdep_ghostscript()
if compare_versions(gs_v, gs_sugg): pass
elif compare_versions(gs_v, gs_req):
verbose.report(('ghostscript-%s found. ghostscript-%s or later '
'is recommended to use the ps.usedistiller option.') % (gs_v, gs_sugg))
else:
flag = False
warnings.warn(('matplotlibrc ps.usedistiller option can not be used '
'unless ghostscript-%s or later is installed on your system') % gs_req)
if s == 'xpdf':
pdftops_req = '3.0'
pdftops_req_alt = '0.9' # poppler version numbers, ugh
pdftops_v = checkdep_pdftops()
if compare_versions(pdftops_v, pdftops_req):
pass
elif compare_versions(pdftops_v, pdftops_req_alt) and not \
compare_versions(pdftops_v, '1.0'):
pass
else:
flag = False
warnings.warn(('matplotlibrc ps.usedistiller can not be set to '
'xpdf unless xpdf-%s or later is installed on your system') % pdftops_req)
if flag:
return s
else:
return False
def checkdep_usetex(s):
if not s:
return False
tex_req = '3.1415'
gs_req = '7.07'
gs_sugg = '7.07'
dvipng_req = '1.5'
flag = True
tex_v = checkdep_tex()
if compare_versions(tex_v, tex_req): pass
else:
flag = False
warnings.warn(('matplotlibrc text.usetex option can not be used '
'unless TeX-%s or later is '
'installed on your system') % tex_req)
dvipng_v = checkdep_dvipng()
if compare_versions(dvipng_v, dvipng_req): pass
else:
flag = False
warnings.warn( 'matplotlibrc text.usetex can not be used with *Agg '
'backend unless dvipng-1.5 or later is '
'installed on your system')
gs_v = checkdep_ghostscript()
if compare_versions(gs_v, gs_sugg): pass
elif compare_versions(gs_v, gs_req):
verbose.report(('ghostscript-%s found. ghostscript-%s or later is '
'recommended for use with the text.usetex '
'option.') % (gs_v, gs_sugg))
else:
flag = False
warnings.warn(('matplotlibrc text.usetex can not be used '
'unless ghostscript-%s or later is '
'installed on your system') % gs_req)
return flag
def _get_home():
"""Find user's home directory if possible.
Otherwise raise error.
:see: http://mail.python.org/pipermail/python-list/2005-February/263921.html
"""
path=''
try:
path=os.path.expanduser("~")
except:
pass
if not os.path.isdir(path):
for evar in ('HOME', 'USERPROFILE', 'TMP'):
try:
path = os.environ[evar]
if os.path.isdir(path):
break
except: pass
if path:
return path
else:
raise RuntimeError('please define environment variable $HOME')
get_home = verbose.wrap('$HOME=%s', _get_home, always=False)
def _get_configdir():
"""
Return the string representing the configuration dir.
default is HOME/.matplotlib. you can override this with the
MPLCONFIGDIR environment variable
"""
configdir = os.environ.get('MPLCONFIGDIR')
if configdir is not None:
if not _is_writable_dir(configdir):
raise RuntimeError('Could not write to MPLCONFIGDIR="%s"'%configdir)
return configdir
h = get_home()
p = os.path.join(get_home(), '.matplotlib')
if os.path.exists(p):
if not _is_writable_dir(p):
raise RuntimeError("'%s' is not a writable dir; you must set %s/.matplotlib to be a writable dir. You can also set environment variable MPLCONFIGDIR to any writable directory where you want matplotlib data stored "% (h, h))
else:
if not _is_writable_dir(h):
raise RuntimeError("Failed to create %s/.matplotlib; consider setting MPLCONFIGDIR to a writable directory for matplotlib configuration data"%h)
os.mkdir(p)
return p
get_configdir = verbose.wrap('CONFIGDIR=%s', _get_configdir, always=False)
def _get_data_path():
'get the path to matplotlib data'
if 'MATPLOTLIBDATA' in os.environ:
path = os.environ['MATPLOTLIBDATA']
if not os.path.isdir(path):
raise RuntimeError('Path in environment MATPLOTLIBDATA not a directory')
return path
path = os.sep.join([os.path.dirname(__file__), 'mpl-data'])
if os.path.isdir(path): return path
# setuptools' namespace_packages may highjack this init file
# so need to try something known to be in matplotlib, not basemap
import matplotlib.afm
path = os.sep.join([os.path.dirname(matplotlib.afm.__file__), 'mpl-data'])
if os.path.isdir(path): return path
# py2exe zips pure python, so still need special check
if getattr(sys,'frozen',None):
path = os.path.join(os.path.split(sys.path[0])[0], 'mpl-data')
if os.path.isdir(path): return path
else:
# Try again assuming we need to step up one more directory
path = os.path.join(os.path.split(os.path.split(sys.path[0])[0])[0],
'mpl-data')
if os.path.isdir(path): return path
else:
# Try again assuming sys.path[0] is a dir not a exe
path = os.path.join(sys.path[0], 'mpl-data')
if os.path.isdir(path): return path
raise RuntimeError('Could not find the matplotlib data files')
def _get_data_path_cached():
if defaultParams['datapath'][0] is None:
defaultParams['datapath'][0] = _get_data_path()
return defaultParams['datapath'][0]
get_data_path = verbose.wrap('matplotlib data path %s', _get_data_path_cached,
always=False)
def get_example_data(fname):
"""
return a filehandle to one of the example files in mpl-data/example
*fname*
the name of one of the files in mpl-data/example
"""
datadir = os.path.join(get_data_path(), 'example')
fullpath = os.path.join(datadir, fname)
if not os.path.exists(fullpath):
raise IOError('could not find matplotlib example file "%s" in data directory "%s"'%(
fname, datadir))
return file(fullpath, 'rb')
def get_py2exe_datafiles():
datapath = get_data_path()
head, tail = os.path.split(datapath)
d = {}
for root, dirs, files in os.walk(datapath):
# Need to explicitly remove cocoa_agg files or py2exe complains
# NOTE I dont know why, but do as previous version
if 'Matplotlib.nib' in files:
files.remove('Matplotlib.nib')
files = [os.path.join(root, filename) for filename in files]
root = root.replace(tail, 'mpl-data')
root = root[root.index('mpl-data'):]
d[root] = files
return d.items()
def matplotlib_fname():
"""
Return the path to the rc file
Search order:
* current working dir
* environ var MATPLOTLIBRC
* HOME/.matplotlib/matplotlibrc
* MATPLOTLIBDATA/matplotlibrc
"""
oldname = os.path.join( os.getcwd(), '.matplotlibrc')
if os.path.exists(oldname):
print >> sys.stderr, """\
WARNING: Old rc filename ".matplotlibrc" found in working dir
and and renamed to new default rc file name "matplotlibrc"
(no leading"dot"). """
shutil.move('.matplotlibrc', 'matplotlibrc')
home = get_home()
oldname = os.path.join( home, '.matplotlibrc')
if os.path.exists(oldname):
configdir = get_configdir()
newname = os.path.join(configdir, 'matplotlibrc')
print >> sys.stderr, """\
WARNING: Old rc filename "%s" found and renamed to
new default rc file name "%s"."""%(oldname, newname)
shutil.move(oldname, newname)
fname = os.path.join( os.getcwd(), 'matplotlibrc')
if os.path.exists(fname): return fname
if 'MATPLOTLIBRC' in os.environ:
path = os.environ['MATPLOTLIBRC']
if os.path.exists(path):
fname = os.path.join(path, 'matplotlibrc')
if os.path.exists(fname):
return fname
fname = os.path.join(get_configdir(), 'matplotlibrc')
if os.path.exists(fname): return fname
path = get_data_path() # guaranteed to exist or raise
fname = os.path.join(path, 'matplotlibrc')
if not os.path.exists(fname):
warnings.warn('Could not find matplotlibrc; using defaults')
return fname
_deprecated_map = {
'text.fontstyle': 'font.style',
'text.fontangle': 'font.style',
'text.fontvariant': 'font.variant',
'text.fontweight': 'font.weight',
'text.fontsize': 'font.size',
'tick.size' : 'tick.major.size',
}
class RcParams(dict):
"""
A dictionary object including validation
validating functions are defined and associated with rc parameters in
:mod:`matplotlib.rcsetup`
"""
validate = dict([ (key, converter) for key, (default, converter) in \
defaultParams.iteritems() ])
def __setitem__(self, key, val):
try:
if key in _deprecated_map.keys():
alt = _deprecated_map[key]
warnings.warn('%s is deprecated in matplotlibrc. Use %s \
instead.'% (key, alt))
key = alt
cval = self.validate[key](val)
dict.__setitem__(self, key, cval)
except KeyError:
raise KeyError('%s is not a valid rc parameter.\
See rcParams.keys() for a list of valid parameters.'%key)
def rc_params(fail_on_error=False):
'Return the default params updated from the values in the rc file'
fname = matplotlib_fname()
if not os.path.exists(fname):
# this should never happen, default in mpl-data should always be found
message = 'could not find rc file; returning defaults'
ret = RcParams([ (key, default) for key, (default, converter) in \
defaultParams.iteritems() ])
warnings.warn(message)
return ret
cnt = 0
rc_temp = {}
for line in file(fname):
cnt += 1
strippedline = line.split('#',1)[0].strip()
if not strippedline: continue
tup = strippedline.split(':',1)
if len(tup) !=2:
warnings.warn('Illegal line #%d\n\t%s\n\tin file "%s"'%\
(cnt, line, fname))
continue
key, val = tup
key = key.strip()
val = val.strip()
if key in rc_temp:
warnings.warn('Duplicate key in file "%s", line #%d'%(fname,cnt))
rc_temp[key] = (val, line, cnt)
ret = RcParams([ (key, default) for key, (default, converter) in \
defaultParams.iteritems() ])
for key in ('verbose.level', 'verbose.fileo'):
if key in rc_temp:
val, line, cnt = rc_temp.pop(key)
if fail_on_error:
ret[key] = val # try to convert to proper type or raise
else:
try: ret[key] = val # try to convert to proper type or skip
except Exception, msg:
warnings.warn('Bad val "%s" on line #%d\n\t"%s"\n\tin file \
"%s"\n\t%s' % (val, cnt, line, fname, msg))
verbose.set_level(ret['verbose.level'])
verbose.set_fileo(ret['verbose.fileo'])
for key, (val, line, cnt) in rc_temp.iteritems():
if key in defaultParams:
if fail_on_error:
ret[key] = val # try to convert to proper type or raise
else:
try: ret[key] = val # try to convert to proper type or skip
except Exception, msg:
warnings.warn('Bad val "%s" on line #%d\n\t"%s"\n\tin file \
"%s"\n\t%s' % (val, cnt, line, fname, msg))
else:
print >> sys.stderr, """
Bad key "%s" on line %d in
%s.
You probably need to get an updated matplotlibrc file from
http://matplotlib.sf.net/_static/matplotlibrc or from the matplotlib source
distribution""" % (key, cnt, fname)
if ret['datapath'] is None:
ret['datapath'] = get_data_path()
if not ret['text.latex.preamble'] == ['']:
verbose.report("""
*****************************************************************
You have the following UNSUPPORTED LaTeX preamble customizations:
%s
Please do not ask for support with these customizations active.
*****************************************************************
"""% '\n'.join(ret['text.latex.preamble']), 'helpful')
verbose.report('loaded rc file %s'%fname)
return ret
# this is the instance used by the matplotlib classes
rcParams = rc_params()
rcParamsDefault = RcParams([ (key, default) for key, (default, converter) in \
defaultParams.iteritems() ])
rcParams['ps.usedistiller'] = checkdep_ps_distiller(rcParams['ps.usedistiller'])
rcParams['text.usetex'] = checkdep_usetex(rcParams['text.usetex'])
def rc(group, **kwargs):
"""
Set the current rc params. Group is the grouping for the rc, eg.
for ``lines.linewidth`` the group is ``lines``, for
``axes.facecolor``, the group is ``axes``, and so on. Group may
also be a list or tuple of group names, eg. (*xtick*, *ytick*).
*kwargs* is a dictionary attribute name/value pairs, eg::
rc('lines', linewidth=2, color='r')
sets the current rc params and is equivalent to::
rcParams['lines.linewidth'] = 2
rcParams['lines.color'] = 'r'
The following aliases are available to save typing for interactive
users:
===== =================
Alias Property
===== =================
'lw' 'linewidth'
'ls' 'linestyle'
'c' 'color'
'fc' 'facecolor'
'ec' 'edgecolor'
'mew' 'markeredgewidth'
'aa' 'antialiased'
===== =================
Thus you could abbreviate the above rc command as::
rc('lines', lw=2, c='r')
Note you can use python's kwargs dictionary facility to store
dictionaries of default parameters. Eg, you can customize the
font rc as follows::
font = {'family' : 'monospace',
'weight' : 'bold',
'size' : 'larger'}
rc('font', **font) # pass in the font dict as kwargs
This enables you to easily switch between several configurations.
Use :func:`~matplotlib.pyplot.rcdefaults` to restore the default
rc params after changes.
"""
aliases = {
'lw' : 'linewidth',
'ls' : 'linestyle',
'c' : 'color',
'fc' : 'facecolor',
'ec' : 'edgecolor',
'mew' : 'markeredgewidth',
'aa' : 'antialiased',
}
if is_string_like(group):
group = (group,)
for g in group:
for k,v in kwargs.items():
name = aliases.get(k) or k
key = '%s.%s' % (g, name)
if key not in rcParams:
raise KeyError('Unrecognized key "%s" for group "%s" and name "%s"' %
(key, g, name))
rcParams[key] = v
def rcdefaults():
"""
Restore the default rc params - the ones that were created at
matplotlib load time.
"""
rcParams.update(rcParamsDefault)
if NEWCONFIG:
#print "importing from reorganized config system!"
try:
from config import rcParams, rcdefaults, mplConfig, save_config
verbose.set_level(rcParams['verbose.level'])
verbose.set_fileo(rcParams['verbose.fileo'])
except:
from config import rcParams, rcdefaults
_use_error_msg = """ This call to matplotlib.use() has no effect
because the the backend has already been chosen;
matplotlib.use() must be called *before* pylab, matplotlib.pyplot,
or matplotlib.backends is imported for the first time.
"""
def use(arg, warn=True):
"""
Set the matplotlib backend to one of the known backends.
The argument is case-insensitive. For the Cairo backend,
the argument can have an extension to indicate the type of
output. Example:
use('cairo.pdf')
will specify a default of pdf output generated by Cairo.
Note: this function must be called *before* importing pylab for
the first time; or, if you are not using pylab, it must be called
before importing matplotlib.backends. If warn is True, a warning
is issued if you try and callthis after pylab or pyplot have been
loaded. In certain black magic use cases, eg
pyplot.switch_backends, we are doing the reloading necessary to
make the backend switch work (in some cases, eg pure image
backends) so one can set warn=False to supporess the warnings
"""
if 'matplotlib.backends' in sys.modules:
if warn: warnings.warn(_use_error_msg)
return
arg = arg.lower()
if arg.startswith('module://'):
name = arg
else:
be_parts = arg.split('.')
name = validate_backend(be_parts[0])
rcParams['backend'] = name
if name == 'cairo' and len(be_parts) > 1:
rcParams['cairo.format'] = validate_cairo_format(be_parts[1])
def get_backend():
"Returns the current backend"
return rcParams['backend']
def interactive(b):
"""
Set interactive mode to boolean b.
If b is True, then draw after every plotting command, eg, after xlabel
"""
rcParams['interactive'] = b
def is_interactive():
'Return true if plot mode is interactive'
b = rcParams['interactive']
return b
def tk_window_focus():
"""Return true if focus maintenance under TkAgg on win32 is on.
This currently works only for python.exe and IPython.exe.
Both IDLE and Pythonwin.exe fail badly when tk_window_focus is on."""
if rcParams['backend'] != 'TkAgg':
return False
return rcParams['tk.window_focus']
# Now allow command line to override
# Allow command line access to the backend with -d (matlab compatible
# flag)
for s in sys.argv[1:]:
if s.startswith('-d') and len(s) > 2: # look for a -d flag
try:
use(s[2:])
except (KeyError, ValueError):
pass
# we don't want to assume all -d flags are backends, eg -debug
verbose.report('matplotlib version %s'%__version__)
verbose.report('verbose.level %s'%verbose.level)
verbose.report('interactive is %s'%rcParams['interactive'])
verbose.report('units is %s'%rcParams['units'])
verbose.report('platform is %s'%sys.platform)
verbose.report('loaded modules: %s'%sys.modules.keys(), 'debug')
| agpl-3.0 |
isrohutamahopetechnik/MissionPlanner | Lib/site-packages/numpy/fft/fftpack.py | 59 | 39653 | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
__all__ = ['fft','ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn',
'refft', 'irefft','refftn','irefftn', 'refft2', 'irefft2']
from numpy.core import asarray, zeros, swapaxes, shape, conjugate, \
take
import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache = _fft_cache ):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified." % n)
try:
wsave = fft_cache[n]
except(KeyError):
wsave = init_function(n)
fft_cache[n] = wsave
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0,n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0,s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
return r
def fft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
def ifft(a, n=None, axis=-1):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e., ``a[0]`` should contain the zero frequency term,
``a[1:n/2+1]`` should contain the positive-frequency terms, and
``a[n/2+1:]`` should contain the negative-frequency terms, in order of
decreasingly negative frequency. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at 0x...>
>>> plt.show()
"""
a = asarray(a).astype(complex)
if n is None:
n = shape(a)[axis]
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache) / n
def rfft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is ``n/2+1``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermite-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n/2+1``.
When ``A = rfft(a)``, ``A[0]`` contains the zero-frequency term, which
must be purely real due to the Hermite symmetry.
If `n` is even, ``A[-1]`` contains the term for frequencies ``n/2`` and
``-n/2``, and must also be purely real. If `n` is odd, ``A[-1]``
contains the term for frequency ``A[(n-1)/2]``, and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
a = asarray(a).astype(float)
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf, _real_fft_cache)
def irfft(a, n=None, axis=-1):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermite-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n/2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input (along the axis specified by `axis`).
axis : int, optional
Axis over which to compute the inverse FFT.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where `m` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermite-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache) / n
def hfft(a, n=None, axis=-1):
"""
Compute the FFT of a signal whose spectrum has Hermitian symmetry.
Parameters
----------
a : array_like
The input array.
n : int, optional
The length of the FFT.
axis : int, optional
The axis over which to compute the FFT, assuming Hermitian symmetry
of the spectrum. Default is the last axis.
Returns
-------
out : ndarray
The transformed input.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal is real in the frequency domain and has
Hermite symmetry in the time domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return irfft(conjugate(a), n, axis) * n
def ihfft(a, n=None, axis=-1):
"""
Compute the inverse FFT of a signal whose spectrum has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
axis : int, optional
Axis over which to compute the inverse FFT, assuming Hermitian
symmetry of the spectrum. Default is the last axis.
Returns
-------
out : ndarray
The transformed input.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal is real in the frequency domain and has
Hermite symmetry in the time domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
"""
a = asarray(a).astype(float)
if n is None:
n = shape(a)[axis]
return conjugate(rfft(a, n, axis))/n
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = range(-len(s), 0)
if len(s) != len(axes):
raise ValueError, "Shape and axes have different lengths."
if invreal and shapeless:
s[axes[-1]] = (s[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = range(len(axes))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii])
return a
def fftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a,s,axes,fft)
def ifftn(a, s=None, axes=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft)
def fft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 5.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 10.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 15.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 20.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a,s,axes,fft)
def ifft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft)
def rfftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
a = asarray(a).astype(float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1])
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii])
return a
def rfft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes)
def irfftn(a, s=None, axes=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input (along the
axes specified by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where `m` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
a = asarray(a).astype(complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii])
a = irfft(a, s[-1], axes[-1])
return a
def irfft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes)
# Deprecated names
from numpy import deprecate
refft = deprecate(rfft, 'refft', 'rfft')
irefft = deprecate(irfft, 'irefft', 'irfft')
refft2 = deprecate(rfft2, 'refft2', 'rfft2')
irefft2 = deprecate(irfft2, 'irefft2', 'irfft2')
refftn = deprecate(rfftn, 'refftn', 'rfftn')
irefftn = deprecate(irfftn, 'irefftn', 'irfftn')
| gpl-3.0 |
nesterione/scikit-learn | sklearn/linear_model/tests/test_least_angle.py | 57 | 16523 | from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.cross_validation import train_test_split
from sklearn.externals import joblib
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.utils import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually garantied in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False) | bsd-3-clause |
florian-f/sklearn | examples/linear_model/plot_polynomial_interpolation.py | 4 | 1648 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
by manually adding non-linear features. Kernel methods extend this idea and can
induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# License: BSD Style.
import numpy as np
import pylab as pl
from sklearn.linear_model import Ridge
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
pl.plot(x_plot, f(x_plot), label="ground truth")
pl.scatter(x, y, label="training points")
for degree in [3, 4, 5]:
ridge = Ridge()
ridge.fit(np.vander(x, degree + 1), y)
pl.plot(x_plot, ridge.predict(np.vander(x_plot, degree + 1)),
label="degree %d" % degree)
pl.legend(loc='lower left')
pl.show()
| bsd-3-clause |
grantvk/aima-python | submissions/Johnson/myNN.py | 13 | 4766 | import traceback
from sklearn.neural_network import MLPClassifier
from submissions.Johnson import education
class DataFrame:
data = []
feature_names = []
target = []
target_names = []
moneyvsthings = DataFrame()
joint = {}
educations = education.get_all_states()
for each in educations:
try:
st = each['state']
expend = each['data']['funding']['expenditures']
revenue = each['data']['funding']['revenue']
ratio = each['data']['enrollment']['student teacher ratio']
eligible = each['data']['enrollment']['students']['other']['free lunch eligible']
grade8mathscore = each['data']['score']['math'][1]['scale score']
enrollment = each['data']['enrollment']['students']['all']
net = revenue - expend
joint[st] = {}
joint[st]['ST']= st
joint[st]['Expend'] = expend
joint[st]['S/T Ratio'] = ratio
joint[st]['Net Gain'] = net
joint[st]['Free Lunch Eligible'] = eligible
joint[st]['Enrollment'] = enrollment
joint[st]['8th Grade Math Score'] = grade8mathscore
except:
traceback.print_exc()
for st in joint:
# choose the input values
moneyvsthings.data.append([
# countyST,
# intersection[countyST]['ST'],
# intersection[countyST]['Trump'],
#joint[st]['Free Lunch Eligible'],
joint[st]['S/T Ratio'],
joint[st]['8th Grade Math Score'],
joint[st]['Enrollment'],
#joint[st]['Net Gain']
])
moneyvsthings.feature_names = [
# 'countyST',
# 'ST',
# 'Trump',
#'Free Lunch Eligible',
'S/T Ratio',
'Grade 8 Math Scores',
'Enrollment'
#'Net Gain'
]
moneyvsthings.target = []
def netpos(number):
if number > 10000000000:
return 1
return 0
for st in joint:
# choose the target
ispos = netpos(joint[st]['Expend'])
moneyvsthings.target.append(ispos)
moneyvsthings.target_names = [
'Small Expenditure',
'Large Expenditure',
#'Free Lunch Eligible <= 300,000',
#'Free Lunch Eligible > 300,000'
]
mlpc = MLPClassifier(
# hidden_layer_sizes = (100,),
# activation = 'relu',
solver='sgd', # 'adam',
# alpha = 0.0001,
# batch_size='auto',
learning_rate = 'adaptive', # 'constant',
# power_t = 0.5,
max_iter = 1000, # 200,
# shuffle = True,
# random_state = None,
# tol = 1e-4,
# verbose = False,
# warm_start = False,
# momentum = 0.9,
# nesterovs_momentum = True,
# early_stopping = False,
# validation_fraction = 0.1,
# beta_1 = 0.9,
# beta_2 = 0.999,
# epsilon = 1e-8,
)
mlpc2 = MLPClassifier(
#hidden_layer_sizes = (100,),
# activation = 'relu',
solver='adam', # 'adam',
#alpha = 0.0001,
# batch_size='auto',
learning_rate = 'constant', # 'constant',
# power_t = 0.5,
max_iter = 1000, # 200,
# shuffle = True,
# random_state = None,
# tol = 1e-4,
#verbose = True,
# warm_start = False,
# momentum = 0.9,
# nesterovs_momentum = True,
# early_stopping = False,
# validation_fraction = 0.1,
# beta_1 = 0.9,
# beta_2 = 0.999,
# epsilon = 1e-8,
)
ExpendScaled = DataFrame()
def setupScales(grid):
global min, max
min = list(grid[0])
max = list(grid[0])
for row in range(1, len(grid)):
for col in range(len(grid[row])):
cell = grid[row][col]
if cell < min[col]:
min[col] = cell
if cell > max[col]:
max[col] = cell
def scaleGrid(grid):
newGrid = []
for row in range(len(grid)):
newRow = []
for col in range(len(grid[row])):
try:
cell = grid[row][col]
scaled = (cell - min[col]) \
/ (max[col] - min[col])
newRow.append(scaled)
except:
pass
newGrid.append(newRow)
return newGrid
setupScales(moneyvsthings.data)
ExpendScaled.data = scaleGrid(moneyvsthings.data)
ExpendScaled.feature_names = moneyvsthings.feature_names
ExpendScaled.target = moneyvsthings.target
ExpendScaled.target_names = moneyvsthings.target_names
Examples = {
'ExpendDefault': {
'frame' : moneyvsthings,
},
'ExpendDefault(+MLPC)': {
'frame' : moneyvsthings,
'mlpc': mlpc
},
'ExpendDefault(+MLPC2)': {
'frame' : moneyvsthings,
'mlpc': mlpc2
},
'ExpendScaled': {
'frame' : ExpendScaled,
},
'ExpendScaled(+MLPC)': {
'frame' : ExpendScaled,
'mlpc': mlpc
},
'ExpendScaled(+MLPC2)': {
'frame' : ExpendScaled,
'mlpc': mlpc2
}
} | mit |
magic2du/contact_matrix | Contact_maps/mnist_psuedo_ipython_dl_ppi/code/DL_Stacked_Model_Mnist_Psuedo_11_14_2014.py | 2 | 21552 |
# coding: utf-8
# In[6]:
# this part imports libs and load data from csv file
import sys
sys.path.append('../../../libs/')
import csv
from dateutil import parser
from datetime import timedelta
from sklearn import svm
import numpy as np
import pandas as pd
import pickle
from sklearn.cross_validation import train_test_split
from sklearn import preprocessing
import sklearn
import scipy.stats as ss
import cPickle
import gzip
import os
import time
import numpy
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
import os.path
import IO_class
from IO_class import FileOperator
from sklearn import cross_validation
import sklearn
import numpy as np
import csv
from dateutil import parser
from datetime import timedelta
from sklearn import svm
import numpy as np
import pandas as pd
import pdb
import pickle
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import KFold
from sklearn import preprocessing
import sklearn
import scipy.stats as ss
from sklearn.svm import LinearSVC
import random
from DL_libs import *
from itertools import izip #new
import math
from sklearn.svm import SVC
# In[53]:
# set settings for this script
settings = {}
settings['fisher_mode'] = 'FisherM1'
settings['predicted_score'] = False
settings['reduce_ratio'] = 1
settings['SVM'] = 1
settings['SVM_RBF'] = 1
settings['SVM_POLY'] = 1
settings['DL'] = 1
settings['Log'] = 1
settings['SAE_SVM'] = 1
settings['SAE_SVM_RBF'] = 1
settings['SAE_SVM_POLY'] = 1
settings['DL_S'] = 1
settings['SAE_S_SVM'] = 1
settings['SAE_S_SVM_RBF'] = 1
settings['SAE_S_SVM_POLY'] = 1
settings['number_iterations'] =30
settings['finetune_lr'] = 0.1
settings['batch_size'] = 30
settings['pretraining_interations'] = 50000
settings['pretrain_lr'] = 0.001
settings['training_epochs'] = 300
settings['hidden_layers_sizes'] = [200, 200]
settings['corruption_levels'] = [0, 0]
import logging
import time
current_date = time.strftime("%m_%d_%Y")
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logname = 'log_DL_handwritten_digits' + current_date + '.log'
handler = logging.FileHandler(logname)
handler.setLevel(logging.DEBUG)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
#logger.debug('This message should go to the log file')
for key, value in settings.items():
logger.info(key +': '+ str(value))
# In[54]:
f = gzip.open('mnist.pkl.gz', 'rb')
train_set, valid_set, test_set = cPickle.load(f)
X_train,y_train = train_set
X_valid,y_valid = valid_set
X_total=np.vstack((X_train, X_valid))
X_total = np.array(X_total, dtype= theano.config.floatX)
print'sample size', X_total.shape
y_total = np.concatenate([y_train, y_valid])
# In[62]:
def performance_score(target_label, predicted_label, predicted_score = False, print_report = True):
""" get performance matrix for prediction
Attributes:
target_label: int 0, 1
predicted_label: 0, 1 or ranking
predicted_score: bool if False, predicted_label is from 0, 1. If Ture, predicted_label is ranked, need to get AUC score.
print_report: if True, print the perfromannce on screen
"""
import sklearn
from sklearn.metrics import roc_auc_score
score = {}
if predicted_score == False:
score['accuracy'] = sklearn.metrics.accuracy_score(target_label, predicted_label)
score['precision'] = sklearn.metrics.precision_score(target_label, predicted_label, pos_label=1)
score['recall'] = sklearn.metrics.recall_score(target_label, predicted_label, pos_label=1)
if predicted_score == True:
auc_score = roc_auc_score(target_label, predicted_label)
score['auc_score'] = auc_score
target_label = [x >= 0.5 for x in target_label]
score['accuracy'] = sklearn.metrics.accuracy_score(target_label, predicted_label)
score['precision'] = sklearn.metrics.precision_score(target_label, predicted_label, pos_label=1)
score['recall'] = sklearn.metrics.recall_score(target_label, predicted_label, pos_label=1)
if print_report == True:
for key, value in score.iteritems():
print key, '{percent:.1%}'.format(percent=value)
return score
def saveAsCsv(predicted_score, fname, score_dict, arguments): #new
newfile = False
if os.path.isfile('report_' + fname + '.csv'):
pass
else:
newfile = True
csvfile = open('report_' + fname + '.csv', 'a+')
writer = csv.writer(csvfile)
if newfile == True:
if predicted_score == False:
writer.writerow(['no.', 'method', 'isTest']+ score_dict.keys()) #, 'AUC'])
else:
writer.writerow(['no.', 'method', 'isTest'] + score_dict.keys())
for arg in arguments:
writer.writerow([i for i in arg])
csvfile.close()
def run_models(settings = None):
analysis_scr = []
predicted_score = settings['predicted_score']
for subset_no in xrange(1,settings['number_iterations']+1):
print("Subset:", subset_no)
################## generate data ###################
array_A =[]
array_B =[]
for i in range(100000):
array_A.append(np.random.random_integers(0, 59999))
array_B.append(np.random.random_integers(0, 59999))
pos_index = []
neg_index = []
for index in xrange(100000):
if y_total[array_A[index]] - y_total[array_B[index]] == 1:
pos_index.append(index)
else:
neg_index.append(index)
print 'number of positive examples', len(pos_index)
selected_neg_index= neg_index[ : len(pos_index)]
array_A = np.array(array_A)
array_B = np.array(array_B)
index_for_positive_image_A = array_A[pos_index]
index_for_positive_image_B = array_B[pos_index]
index_for_neg_image_A = array_A[selected_neg_index]
index_for_neg_image_B = array_B[selected_neg_index]
X_pos_A = X_total[index_for_positive_image_A]
X_pos_B = X_total[index_for_positive_image_B]
X_pos_whole = np.hstack((X_pos_A,X_pos_B))
X_neg_A = X_total[index_for_neg_image_A]
X_neg_B = X_total[index_for_neg_image_B]
X_neg_whole = np.hstack((X_neg_A, X_neg_B))
print X_pos_A.shape, X_pos_B.shape, X_pos_whole.shape
print X_neg_A.shape, X_neg_B.shape, X_neg_whole.shape
X_whole = np.vstack((X_pos_whole, X_neg_whole))
print X_whole.shape
y_pos = np.ones(X_pos_whole.shape[0])
y_neg = np.zeros(X_neg_whole.shape[0])
y_whole = np.concatenate([y_pos,y_neg])
print y_whole
x_train_pre_validation_minmax, x_test_minmax, y_train_pre_validation_minmax, y_test_minmax = train_test_split(X_whole,y_whole, test_size=0.2, random_state=211)
x_train_minmax, x_validation_minmax, y_train_minmax, y_validation_minmax = train_test_split(x_train_pre_validation_minmax,
y_train_pre_validation_minmax,\
test_size=0.2, random_state=21)
print x_train_minmax.shape, y_train_minmax.shape, x_validation_minmax.shape, y_validation_minmax.shape, x_test_minmax.shape, y_test_minmax.shape
train_X_reduced = x_train_minmax
train_y_reduced = y_train_minmax
test_X = x_test_minmax
test_y = y_test_minmax
###original data###
################ end of data ####################
if settings['SVM']:
print "SVM"
standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced)
scaled_train_X = standard_scaler.transform(train_X_reduced)
scaled_test_X = standard_scaler.transform(test_X)
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(scaled_train_X, train_y_reduced)
predicted_test_y = Linear_SVC.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((subset_no, 'SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append(( subset_no, 'SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SVM_RBF']:
print "SVM_RBF"
standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced)
scaled_train_X = standard_scaler.transform(train_X_reduced)
scaled_test_X = standard_scaler.transform(test_X)
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(scaled_train_X, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((subset_no, 'SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((subset_no, 'SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SVM_POLY']:
print "SVM_POLY"
standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced)
scaled_train_X = standard_scaler.transform(train_X_reduced)
scaled_test_X = standard_scaler.transform(test_X)
L1_SVC_POLY_Selector = SVC(C=1, kernel='poly').fit(scaled_train_X, train_y_reduced)
predicted_test_y = L1_SVC_POLY_Selector.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append(( subset_no, 'SVM_POLY', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_POLY_Selector.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((subset_no, 'SVM_POLY', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['Log']:
print "Log"
standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced)
scaled_train_X = standard_scaler.transform(train_X_reduced)
scaled_test_X = standard_scaler.transform(test_X)
log_clf_l2 = sklearn.linear_model.LogisticRegression(C=1, penalty='l2' )
log_clf_l2.fit(scaled_train_X, train_y_reduced)
predicted_test_y = log_clf_l2.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((subset_no, 'Log', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = log_clf_l2.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((subset_no, 'Log', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
# direct deep learning
finetune_lr = settings['finetune_lr']
batch_size = settings['batch_size']
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
#pretrain_lr=0.001
pretrain_lr = settings['pretrain_lr']
training_epochs = settings['training_epochs']
hidden_layers_sizes = settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
if settings['DL']:
print "direct deep learning"
sda = trainSda(x_train_minmax, y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
test_predicted = sda.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((subset_no, 'DL', isTest) + tuple(performance_score(y_test, test_predicted).values()))
training_predicted = sda.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((subset_no, 'DL', isTest) + tuple(performance_score(y_train, training_predicted).values()))
####transformed original data####
x = train_X_reduced
a_MAE_A = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(train_X_reduced)
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax)
if settings['SAE_SVM']:
# SAE_SVM
print 'SAE followed by SVM'
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(new_x_train_minmax_A, train_y_reduced)
predicted_test_y = Linear_SVC.predict(new_x_test_minmax_A)
isTest = True; #new
analysis_scr.append(( subset_no, 'SAE_SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(new_x_train_minmax_A)
isTest = False; #new
analysis_scr.append(( subset_no, 'SAE_SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_SVM_RBF']:
# SAE_SVM
print 'SAE followed by SVM RBF'
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(new_x_train_minmax_A, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_minmax_A)
isTest = True; #new
analysis_scr.append((subset_no, 'SAE_SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_minmax_A)
isTest = False; #new
analysis_scr.append((subset_no, 'SAE_SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_SVM_POLY']:
# SAE_SVM
print 'SAE followed by SVM POLY'
L1_SVC_RBF_Selector = SVC(C=1, kernel='poly').fit(new_x_train_minmax_A, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_minmax_A)
isTest = True; #new
analysis_scr.append((subset_no, 'SAE_SVM_POLY', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_minmax_A)
isTest = False; #new
analysis_scr.append((subset_no, 'SAE_SVM_POLY', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
#### separated transformed data ####
y_test = test_y
print 'deep learning using split network'
# get the new representation for A set. first 784-D
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
x = x_train_minmax[:, :x_train_minmax.shape[1]/2]
print "original shape for A", x.shape
a_MAE_A = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes = [x/2 for x in hidden_layers_sizes], corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(x_train_minmax[:, :x_train_minmax.shape[1]/2])
x = x_train_minmax[:, x_train_minmax.shape[1]/2:]
print "original shape for B", x.shape
a_MAE_B = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes = [x/2 for x in hidden_layers_sizes], corruption_levels=corruption_levels)
new_x_train_minmax_B = a_MAE_B.transform(x_train_minmax[:, x_train_minmax.shape[1]/2:])
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax[:, :x_test_minmax.shape[1]/2])
new_x_test_minmax_B = a_MAE_B.transform(x_test_minmax[:, x_test_minmax.shape[1]/2:])
new_x_validation_minmax_A = a_MAE_A.transform(x_validation_minmax[:, :x_validation_minmax.shape[1]/2])
new_x_validation_minmax_B = a_MAE_B.transform(x_validation_minmax[:, x_validation_minmax.shape[1]/2:])
new_x_train_minmax_whole = np.hstack((new_x_train_minmax_A, new_x_train_minmax_B))
new_x_test_minmax_whole = np.hstack((new_x_test_minmax_A, new_x_test_minmax_B))
new_x_validationt_minmax_whole = np.hstack((new_x_validation_minmax_A, new_x_validation_minmax_B))
if settings['DL_S']:
# deep learning using split network
sda_transformed = trainSda(new_x_train_minmax_whole, y_train_minmax,
new_x_validationt_minmax_whole, y_validation_minmax ,
new_x_test_minmax_whole, y_test,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
test_predicted = sda_transformed.predict(new_x_test_minmax_whole)
y_test = test_y
isTest = True; #new
analysis_scr.append((subset_no, 'DL_S', isTest) + tuple(performance_score(y_test, test_predicted, predicted_score).values()))
training_predicted = sda_transformed.predict(new_x_train_minmax_whole)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((subset_no, 'DL_S', isTest) + tuple(performance_score(y_train, training_predicted, predicted_score).values()))
if settings['SAE_S_SVM']:
print 'SAE_S followed by SVM'
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(new_x_train_minmax_whole, train_y_reduced)
predicted_test_y = Linear_SVC.predict(new_x_test_minmax_whole)
isTest = True; #new
analysis_scr.append(( subset_no, 'SAE_S_SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(new_x_train_minmax_whole)
isTest = False; #new
analysis_scr.append(( subset_no, 'SAE_S_SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_S_SVM_RBF']:
print 'SAE S followed by SVM RBF'
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(new_x_train_minmax_whole, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_minmax_whole)
isTest = True; #new
analysis_scr.append((subset_no, 'SAE_S_SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_minmax_whole)
isTest = False; #new
analysis_scr.append((subset_no, 'SAE_S_SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_S_SVM_POLY']:
# SAE_SVM
print 'SAE S followed by SVM POLY'
L1_SVC_RBF_Selector = SVC(C=1, kernel='poly').fit(new_x_train_minmax_whole, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_minmax_whole)
isTest = True; #new
analysis_scr.append((subset_no, 'SAE_S_SVM_POLY', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_minmax_whole)
isTest = False; #new
analysis_scr.append((subset_no, 'SAE_S_SVM_POLY', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
report_name = 'DL_handwritten_digits' + '_size_'.join(map(str, hidden_layers_sizes)) + '_' + str(pretrain_lr) + '_' + str(finetune_lr) + '_' + '_' + str(training_epochs) + '_' + current_date
saveAsCsv(predicted_score, report_name, performance_score(test_y, predicted_test_y, predicted_score), analysis_scr)
# In[62]:
# In[63]:
run_models(settings)
# In[52]:
# In[48]:
x = logging._handlers.copy()
for i in x:
log.removeHandler(i)
i.flush()
i.close()
# In[ ]:
| gpl-2.0 |
aiguofer/bokeh | examples/app/crossfilter/main.py | 7 | 2700 | import pandas as pd
from bokeh.layouts import row, widgetbox
from bokeh.models import Select
from bokeh.palettes import Spectral5
from bokeh.plotting import curdoc, figure
from bokeh.sampledata.autompg import autompg
df = autompg.copy()
SIZES = list(range(6, 22, 3))
COLORS = Spectral5
ORIGINS = ['North America', 'Europe', 'Asia']
# data cleanup
df.cyl = [str(x) for x in df.cyl]
df.origin = [ORIGINS[x-1] for x in df.origin]
df['year'] = [str(x) for x in df.yr]
del df['yr']
df['mfr'] = [x.split()[0] for x in df.name]
df.loc[df.mfr=='chevy', 'mfr'] = 'chevrolet'
df.loc[df.mfr=='chevroelt', 'mfr'] = 'chevrolet'
df.loc[df.mfr=='maxda', 'mfr'] = 'mazda'
df.loc[df.mfr=='mercedes-benz', 'mfr'] = 'mercedes'
df.loc[df.mfr=='toyouta', 'mfr'] = 'toyota'
df.loc[df.mfr=='vokswagen', 'mfr'] = 'volkswagen'
df.loc[df.mfr=='vw', 'mfr'] = 'volkswagen'
del df['name']
columns = sorted(df.columns)
discrete = [x for x in columns if df[x].dtype == object]
continuous = [x for x in columns if x not in discrete]
quantileable = [x for x in continuous if len(df[x].unique()) > 20]
def create_figure():
xs = df[x.value].values
ys = df[y.value].values
x_title = x.value.title()
y_title = y.value.title()
kw = dict()
if x.value in discrete:
kw['x_range'] = sorted(set(xs))
if y.value in discrete:
kw['y_range'] = sorted(set(ys))
kw['title'] = "%s vs %s" % (x_title, y_title)
p = figure(plot_height=600, plot_width=800, tools='pan,box_zoom,reset', **kw)
p.xaxis.axis_label = x_title
p.yaxis.axis_label = y_title
if x.value in discrete:
p.xaxis.major_label_orientation = pd.np.pi / 4
sz = 9
if size.value != 'None':
groups = pd.qcut(df[size.value].values, len(SIZES))
sz = [SIZES[xx] for xx in groups.codes]
c = "#31AADE"
if color.value != 'None':
groups = pd.qcut(df[color.value].values, len(COLORS))
c = [COLORS[xx] for xx in groups.codes]
p.circle(x=xs, y=ys, color=c, size=sz, line_color="white", alpha=0.6, hover_color='white', hover_alpha=0.5)
return p
def update(attr, old, new):
layout.children[1] = create_figure()
x = Select(title='X-Axis', value='mpg', options=columns)
x.on_change('value', update)
y = Select(title='Y-Axis', value='hp', options=columns)
y.on_change('value', update)
size = Select(title='Size', value='None', options=['None'] + quantileable)
size.on_change('value', update)
color = Select(title='Color', value='None', options=['None'] + quantileable)
color.on_change('value', update)
controls = widgetbox([x, y, color, size], width=200)
layout = row(controls, create_figure())
curdoc().add_root(layout)
curdoc().title = "Crossfilter"
| bsd-3-clause |
linebp/pandas | pandas/util/_validators.py | 7 | 8192 | """
Module that contains many useful utilities
for validating data or function arguments
"""
from pandas.core.dtypes.common import is_bool
def _check_arg_length(fname, args, max_fname_arg_count, compat_args):
"""
Checks whether 'args' has length of at most 'compat_args'. Raises
a TypeError if that is not the case, similar to in Python when a
function is called with too many arguments.
"""
if max_fname_arg_count < 0:
raise ValueError("'max_fname_arg_count' must be non-negative")
if len(args) > len(compat_args):
max_arg_count = len(compat_args) + max_fname_arg_count
actual_arg_count = len(args) + max_fname_arg_count
argument = 'argument' if max_arg_count == 1 else 'arguments'
raise TypeError(
"{fname}() takes at most {max_arg} {argument} "
"({given_arg} given)".format(
fname=fname, max_arg=max_arg_count,
argument=argument, given_arg=actual_arg_count))
def _check_for_default_values(fname, arg_val_dict, compat_args):
"""
Check that the keys in `arg_val_dict` are mapped to their
default values as specified in `compat_args`.
Note that this function is to be called only when it has been
checked that arg_val_dict.keys() is a subset of compat_args
"""
for key in arg_val_dict:
# try checking equality directly with '=' operator,
# as comparison may have been overriden for the left
# hand object
try:
v1 = arg_val_dict[key]
v2 = compat_args[key]
# check for None-ness otherwise we could end up
# comparing a numpy array vs None
if (v1 is not None and v2 is None) or \
(v1 is None and v2 is not None):
match = False
else:
match = (v1 == v2)
if not is_bool(match):
raise ValueError("'match' is not a boolean")
# could not compare them directly, so try comparison
# using the 'is' operator
except:
match = (arg_val_dict[key] is compat_args[key])
if not match:
raise ValueError(("the '{arg}' parameter is not "
"supported in the pandas "
"implementation of {fname}()".
format(fname=fname, arg=key)))
def validate_args(fname, args, max_fname_arg_count, compat_args):
"""
Checks whether the length of the `*args` argument passed into a function
has at most `len(compat_args)` arguments and whether or not all of these
elements in `args` are set to their default values.
fname: str
The name of the function being passed the `*args` parameter
args: tuple
The `*args` parameter passed into a function
max_fname_arg_count: int
The maximum number of arguments that the function `fname`
can accept, excluding those in `args`. Used for displaying
appropriate error messages. Must be non-negative.
compat_args: OrderedDict
A ordered dictionary of keys and their associated default values.
In order to accommodate buggy behaviour in some versions of `numpy`,
where a signature displayed keyword arguments but then passed those
arguments **positionally** internally when calling downstream
implementations, an ordered dictionary ensures that the original
order of the keyword arguments is enforced. Note that if there is
only one key, a generic dict can be passed in as well.
Raises
------
TypeError if `args` contains more values than there are `compat_args`
ValueError if `args` contains values that do not correspond to those
of the default values specified in `compat_args`
"""
_check_arg_length(fname, args, max_fname_arg_count, compat_args)
# We do this so that we can provide a more informative
# error message about the parameters that we are not
# supporting in the pandas implementation of 'fname'
kwargs = dict(zip(compat_args, args))
_check_for_default_values(fname, kwargs, compat_args)
def _check_for_invalid_keys(fname, kwargs, compat_args):
"""
Checks whether 'kwargs' contains any keys that are not
in 'compat_args' and raises a TypeError if there is one.
"""
# set(dict) --> set of the dictionary's keys
diff = set(kwargs) - set(compat_args)
if diff:
bad_arg = list(diff)[0]
raise TypeError(("{fname}() got an unexpected "
"keyword argument '{arg}'".
format(fname=fname, arg=bad_arg)))
def validate_kwargs(fname, kwargs, compat_args):
"""
Checks whether parameters passed to the **kwargs argument in a
function `fname` are valid parameters as specified in `*compat_args`
and whether or not they are set to their default values.
Parameters
----------
fname: str
The name of the function being passed the `**kwargs` parameter
kwargs: dict
The `**kwargs` parameter passed into `fname`
compat_args: dict
A dictionary of keys that `kwargs` is allowed to have and their
associated default values
Raises
------
TypeError if `kwargs` contains keys not in `compat_args`
ValueError if `kwargs` contains keys in `compat_args` that do not
map to the default values specified in `compat_args`
"""
kwds = kwargs.copy()
_check_for_invalid_keys(fname, kwargs, compat_args)
_check_for_default_values(fname, kwds, compat_args)
def validate_args_and_kwargs(fname, args, kwargs,
max_fname_arg_count,
compat_args):
"""
Checks whether parameters passed to the *args and **kwargs argument in a
function `fname` are valid parameters as specified in `*compat_args`
and whether or not they are set to their default values.
Parameters
----------
fname: str
The name of the function being passed the `**kwargs` parameter
args: tuple
The `*args` parameter passed into a function
kwargs: dict
The `**kwargs` parameter passed into `fname`
max_fname_arg_count: int
The minimum number of arguments that the function `fname`
requires, excluding those in `args`. Used for displaying
appropriate error messages. Must be non-negative.
compat_args: OrderedDict
A ordered dictionary of keys that `kwargs` is allowed to
have and their associated default values. Note that if there
is only one key, a generic dict can be passed in as well.
Raises
------
TypeError if `args` contains more values than there are
`compat_args` OR `kwargs` contains keys not in `compat_args`
ValueError if `args` contains values not at the default value (`None`)
`kwargs` contains keys in `compat_args` that do not map to the default
value as specified in `compat_args`
See Also
--------
validate_args : purely args validation
validate_kwargs : purely kwargs validation
"""
# Check that the total number of arguments passed in (i.e.
# args and kwargs) does not exceed the length of compat_args
_check_arg_length(fname, args + tuple(kwargs.values()),
max_fname_arg_count, compat_args)
# Check there is no overlap with the positional and keyword
# arguments, similar to what is done in actual Python functions
args_dict = dict(zip(compat_args, args))
for key in args_dict:
if key in kwargs:
raise TypeError("{fname}() got multiple values for keyword "
"argument '{arg}'".format(fname=fname, arg=key))
kwargs.update(args_dict)
validate_kwargs(fname, kwargs, compat_args)
def validate_bool_kwarg(value, arg_name):
""" Ensures that argument passed in arg_name is of type bool. """
if not (is_bool(value) or value is None):
raise ValueError('For argument "%s" expected type bool, '
'received type %s.' %
(arg_name, type(value).__name__))
return value
| bsd-3-clause |
pnedunuri/scikit-learn | sklearn/svm/tests/test_sparse.py | 70 | 12992 | from nose.tools import assert_raises, assert_true, assert_false
import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.utils import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import assert_warns, assert_raise_message
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
if isinstance(dense_svm, svm.OneClassSVM):
msg = "cannot use sparse input in 'OneClassSVM' trained on dense data"
else:
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1).fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_oneclasssvm():
"""Check that sparse OneClassSVM gives the same result as dense OneClassSVM"""
# many class dataset:
X_blobs, _ = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, None, T], [X2_sp, None, T2],
[X_blobs[:80], None, X_blobs[80:]],
[iris.data, None, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.OneClassSVM(kernel=kernel, random_state=0)
sp_clf = svm.OneClassSVM(kernel=kernel, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catchs some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
| bsd-3-clause |
synthicity/activitysim | activitysim/core/test/test_skim.py | 2 | 3107 | # ActivitySim
# See full license in LICENSE.txt.
import numpy as np
import pandas as pd
import numpy.testing as npt
import pandas.util.testing as pdt
import pytest
from .. import skim
@pytest.fixture
def data():
return np.arange(100, dtype='int').reshape((10, 10))
def test_basic(data):
sk = skim.SkimWrapper(data)
orig = [5, 9, 1]
dest = [2, 9, 6]
npt.assert_array_equal(
sk.get(orig, dest),
[52, 99, 16])
def test_offset_int(data):
sk = skim.SkimWrapper(data, skim.OffsetMapper(-1))
orig = [6, 10, 2]
dest = [3, 10, 7]
npt.assert_array_equal(
sk.get(orig, dest),
[52, 99, 16])
def test_offset_list(data):
offset_mapper = skim.OffsetMapper()
offset_mapper.set_offset_list([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
# should have figured out it could use an int offset instead of list
assert offset_mapper.offset_int == -1
offset_mapper = skim.OffsetMapper()
offset_mapper.set_offset_list([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
sk = skim.SkimWrapper(data, offset_mapper)
orig = [60, 100, 20]
dest = [30, 100, 70]
npt.assert_array_equal(
sk.get(orig, dest),
[52, 99, 16])
# fixme - nan support disabled in skim.py (not sure we need it?)
# def test_skim_nans(data):
# sk = skim.SkimWrapper(data)
#
# orig = [5, np.nan, 1, 2]
# dest = [np.nan, 9, 6, 4]
#
# npt.assert_array_equal(
# sk.get(orig, dest),
# [np.nan, np.nan, 16, 24])
def test_skims(data):
skims_shape = data.shape + (2,)
skim_data = np.zeros(skims_shape, dtype=data.dtype)
skim_data[:, :, 0] = data
skim_data[:, :, 1] = data*10
skim_info = {
'block_offsets': {'AM': (0, 0), 'PM': (0, 1)}
}
skim_dict = skim.SkimDict([skim_data], skim_info)
skims = skim_dict.wrap("taz_l", "taz_r")
df = pd.DataFrame({
"taz_l": [1, 9, 4],
"taz_r": [2, 3, 7],
})
skims.set_df(df)
pdt.assert_series_equal(
skims["AM"],
pd.Series(
[12, 93, 47],
index=[0, 1, 2]
).astype(data.dtype)
)
pdt.assert_series_equal(
skims["PM"],
pd.Series(
[120, 930, 470],
index=[0, 1, 2]
).astype(data.dtype)
)
def test_3dskims(data):
skims_shape = data.shape + (2,)
skim_data = np.zeros(skims_shape, dtype=int)
skim_data[:, :, 0] = data
skim_data[:, :, 1] = data*10
skim_info = {
'block_offsets': {('SOV', 'AM'): (0, 0), ('SOV', 'PM'): (0, 1)},
'key1_block_offsets': {'SOV': (0, 0)}
}
skim_dict = skim.SkimDict([skim_data], skim_info)
stack = skim.SkimStack(skim_dict)
skims3d = stack.wrap(left_key="taz_l", right_key="taz_r", skim_key="period")
df = pd.DataFrame({
"taz_l": [1, 9, 4],
"taz_r": [2, 3, 7],
"period": ["AM", "PM", "AM"]
})
skims3d.set_df(df)
pdt.assert_series_equal(
skims3d["SOV"],
pd.Series(
[12, 930, 47],
index=[0, 1, 2]
),
check_dtype=False
)
| agpl-3.0 |
GoogleCloudPlatform/training-data-analyst | courses/machine_learning/deepdive2/production_ml/labs/samples/core/continue_training_from_prod/continue_training_from_prod.py | 2 | 7776 | # This sample demonstrates a common training scenario.
# New models are being trained strarting from the production model (if it exists).
# This sample produces two runs:
# 1. The trainer will train the model from scratch and set as prod after testing it
# 2. Exact same configuration, but the pipeline will discover the existing prod model (published by the 1st run) and warm-start the training from it.
# GCS URI of a directory where the models and the model pointers should be be stored.
model_dir_uri='gs://<bucket>/<path>'
kfp_endpoint=None
import kfp
from kfp import components
chicago_taxi_dataset_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/e3337b8bdcd63636934954e592d4b32c95b49129/components/datasets/Chicago%20Taxi/component.yaml')
xgboost_train_on_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Train/component.yaml')
xgboost_predict_on_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Predict/component.yaml')
pandas_transform_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/6162d55998b176b50267d351241100bb0ee715bc/components/pandas/Transform_DataFrame/in_CSV_format/component.yaml')
drop_header_op = kfp.components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/02c9638287468c849632cf9f7885b51de4c66f86/components/tables/Remove_header/component.yaml')
calculate_regression_metrics_from_csv_op = kfp.components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/616542ac0f789914f4eb53438da713dd3004fba4/components/ml_metrics/Calculate_regression_metrics/from_CSV/component.yaml')
download_from_gcs_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/5c7593f18f347f1c03f5ae6778a1ff305abc315c/components/google-cloud/storage/download/component.yaml')
upload_to_gcs_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/616542ac0f789914f4eb53438da713dd3004fba4/components/google-cloud/storage/upload_to_explicit_uri/component.yaml')
upload_to_gcs_unique_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/616542ac0f789914f4eb53438da713dd3004fba4/components/google-cloud/storage/upload_to_unique_uri/component.yaml')
def continuous_training_pipeline(
model_dir_uri,
training_start_date: str = '2019-02-01',
training_end_date: str = '2019-03-01',
testing_start_date: str = '2019-01-01',
testing_end_date: str = '2019-02-01',
):
# Preparing the training and testing data
training_data = chicago_taxi_dataset_op(
where='trip_start_timestamp >= "{}" AND trip_start_timestamp < "{}"'.format(str(training_start_date), str(training_end_date)),
select='tips,trip_seconds,trip_miles,pickup_community_area,dropoff_community_area,fare,tolls,extras,trip_total',
limit=10000,
).set_display_name('Training data').output
testing_data = chicago_taxi_dataset_op(
where='trip_start_timestamp >= "{}" AND trip_start_timestamp < "{}"'.format(str(testing_start_date), str(testing_end_date)),
select='tips,trip_seconds,trip_miles,pickup_community_area,dropoff_community_area,fare,tolls,extras,trip_total',
limit=10000,
).set_display_name('Testing data').output
# Preparing the true values for the testing data
true_values_table = pandas_transform_csv_op(
table=testing_data,
transform_code='''df = df[["tips"]]''',
).set_display_name('True values').output
true_values = drop_header_op(true_values_table).output
# Getting the active prod model
prod_model_pointer_uri = str(model_dir_uri) + 'prod'
get_prod_model_uri_task = download_from_gcs_op(
gcs_path=prod_model_pointer_uri,
default_data='',
).set_display_name('Get prod model')
# Disabling cache reuse to always get new data
get_prod_model_uri_task.execution_options.caching_strategy.max_cache_staleness = 'P0D'
prod_model_uri = get_prod_model_uri_task.output
# Training new model from scratch
with kfp.dsl.Condition(prod_model_uri == ""):
# Training
model = xgboost_train_on_csv_op(
training_data=training_data,
label_column=0,
objective='reg:squarederror',
num_iterations=400,
).outputs['model']
# Predicting
predictions = xgboost_predict_on_csv_op(
data=testing_data,
model=model,
label_column=0,
).output
# Calculating the regression metrics
metrics_task = calculate_regression_metrics_from_csv_op(
true_values=true_values,
predicted_values=predictions,
)
# Checking the metrics
with kfp.dsl.Condition(metrics_task.outputs['mean_squared_error'] < 2.0):
# Uploading the model
model_uri = upload_to_gcs_unique_op(
data=model,
gcs_path_prefix=model_dir_uri,
).set_display_name('Upload model').output
# Setting the model as prod
upload_to_gcs_op(
data=model_uri,
gcs_path=prod_model_pointer_uri,
).set_display_name('Set prod model')
# Training new model starting from the prod model
with kfp.dsl.Condition(prod_model_uri != ""):
# Downloading the model
prod_model = download_from_gcs_op(prod_model_uri).output
# Training
model = xgboost_train_on_csv_op(
training_data=training_data,
starting_model=prod_model,
label_column=0,
objective='reg:squarederror',
num_iterations=100,
).outputs['model']
# Predicting
predictions = xgboost_predict_on_csv_op(
data=testing_data,
model=model,
label_column=0,
).output
# Calculating the regression metrics
metrics_task = calculate_regression_metrics_from_csv_op(
true_values=true_values,
predicted_values=predictions,
)
# Checking the metrics
with kfp.dsl.Condition(metrics_task.outputs['mean_squared_error'] < 2.0):
# Uploading the model
model_uri = upload_to_gcs_unique_op(
data=model,
gcs_path_prefix=model_dir_uri,
).set_display_name('Upload model').output
# Setting the model as prod
upload_to_gcs_op(
data=model_uri,
gcs_path=prod_model_pointer_uri,
).set_display_name('Set prod model')
if __name__ == '__main__':
# Running the first time. The trainer will train the model from scratch and set as prod after testing it
pipelin_run = kfp.Client(host=kfp_endpoint).create_run_from_pipeline_func(
continuous_training_pipeline,
arguments=dict(
model_dir_uri=model_dir_uri,
training_start_date='2019-02-01',
training_end_date='2019-03-01',
),
)
pipelin_run.wait_for_run_completion()
# Running the second time. The trainer should warm-start the training from the prod model and set the new model as prod after testing it
kfp.Client(host=kfp_endpoint).create_run_from_pipeline_func(
continuous_training_pipeline,
arguments=dict(
model_dir_uri=model_dir_uri,
training_start_date='2019-02-01',
training_end_date='2019-03-01',
),
)
| apache-2.0 |
igem-waterloo/uwaterloo-igem-2015 | models/targeting/genome_simulation.py | 4 | 8108 | import datetime
import matplotlib.pyplot as plt
import os
import random
import make_video
from genome_csv import results_to_csv, csv_to_dict, map_genome_events, map_target_events
from genome_plot import genome_plot_polar, plot_states
from init_genome_camv import init_genome_camv, init_targets_all_domains, init_targets_multi_P6
from probabilistic import prob_repair
def genome_simulate(flag_plot=True, flag_multirun=False, batch_data_path=None):
# output management
if not flag_multirun:
runs_folder = "runs" + os.sep # store timestamped runs here
current_time = datetime.datetime.now().strftime("%Y-%m-%d %I.%M.%S%p")
time_folder = current_time + os.sep
current_run_folder = runs_folder + time_folder
# subfolders in the timestamped run directory:
data_folder = os.path.join(current_run_folder, "data")
plot_genome_folder = os.path.join(current_run_folder, "plot_genome")
plot_data_folder = os.path.join(current_run_folder, "plot_data")
# create dirs conditionally
dir_list = [runs_folder, current_run_folder, data_folder]
if flag_plot:
dir_list += [plot_genome_folder, plot_data_folder]
for dirs in dir_list:
if not os.path.exists(dirs):
os.makedirs(dirs)
else:
assert batch_data_path is not None
assert not flag_plot # unfortunately, we don't support plotting during batch runs
data_folder = batch_data_path
# create dirs conditionally
dir_list = [data_folder]
for dirs in dir_list:
if not os.path.exists(dirs):
os.makedirs(dirs)
# simulation parameters (time in seconds)
complex_concentration = 22.101 # nM
dt = 1.0
t0 = 0.0
t1 = 2.0 * 18.0 * 3600.0
total_turns = int((t1 - t0) / dt)
time_sim = t0
plot_period = 30 # in turns
plot_count = 0
# initialize genome
pseudo_targets = init_targets_multi_P6(complex_concentration)
genome_camv = init_genome_camv(pseudo_targets)
genome_camv.initialize_target_cut_probabilities(dt)
# simulation pre-loop behaviour
target_dict = genome_camv.get_targets_from_genome()
open_targets = genome_camv.get_open_targets_from_genome()
probability_to_repair = prob_repair(dt)
double_cut_probability = 1.55*10.0**(-5) # see Tessa
# for logging data
data_log = ""
data_file = os.path.join(data_folder, "simulation_data.txt")
# variables for csv writing
genome_events = [0]*total_turns
target_events = [0]*total_turns
genome_header = ["time"] + genome_camv.domains.keys()
target_header = ["time"]
for key in genome_camv.domains.keys():
target_header += genome_camv.get_targets_from_genome()[key].keys()
def target_state(target_dict):
if {} == target_dict:
return None
for key, value in target_dict.iteritems():
if not value.repaired:
return "cut"
elif value.targetable:
return "targetable"
else:
return "untargetable"
for turn in xrange(total_turns):
# clear turn log and set to turn
turn_log = "Time: " + str(turn*dt) + "s\n"
# get current targets
targets_from_genome = genome_camv.get_targets_from_genome()
open_targets = genome_camv.get_open_targets_from_genome()
# place data in rows for csv to write later
genome_events[turn] = map_genome_events(str(turn*dt), genome_camv.domains, genome_header)
target_events[turn] = map_target_events(str(turn*dt), targets_from_genome, target_header)
# deletion module
if len(open_targets) > 1:
# time_with_double_cut += dt
double_cut_success = False
if random.random() < double_cut_probability:
double_cut_success = True
if double_cut_success:
targets = random.sample(open_targets, 2)
target1 = targets_from_genome[targets[0][0]][targets[0][1]]
target2 = targets_from_genome[targets[1][0]][targets[1][1]]
first = min(target1.current_start, target2.current_start)
second = max(target1.current_start, target2.current_start)
genome_camv.large_deletion(target1, target2, dt)
turn_log += "Large deletion spanning from " + str(first) + " to " + str(second) + "\n"
targets_from_genome = genome_camv.get_targets_from_genome()
# cut and repair module
for key_domain in targets_from_genome.keys():
domain = genome_camv.domains[key_domain]
targets_from_domain = domain.targets
for key_target in targets_from_domain.keys():
success_cut = False
success_repair = False
target = targets_from_domain[key_target]
if target.repaired: # i.e. not cut
probability_to_cut = target.cut_probability
if random.random() < probability_to_cut:
success_cut = True
if success_cut:
target.cut()
open_targets.append((key_domain, key_target))
turn_log += target.label + " cut at " + str(target.cut_position) + "\n"
else:
if random.random() < probability_to_repair:
success_repair = True
if success_repair:
extra = ""
old_sequence = target.sequence
old_shift = target.shift
target.repair(dt)
open_targets.remove((key_domain, key_target))
net_indel_size = target.shift - old_shift
if old_sequence != target.sequence:
extra = "The sequence was changed from " + old_sequence + " to " + target.sequence
turn_log += target.label + " repaired at " + str(target.repair_position) + " with an indel of " + str(net_indel_size) + "\n" + extra + "\n"
# save turn data (maybe only if stuff happened?)
# \n's count number of events in turn (starts with one)
if turn_log.count('\n') > 1:
data_log += turn_log
if not flag_multirun:
print turn_log
# update plots if actively showing plots
if turn % plot_period == 0 and flag_plot:
plot_path = os.path.join(plot_genome_folder, "genome_%05d.png" % plot_count)
genome_plot_polar(genome_camv, 'CaMV', time=time_sim/60.0, output_path=plot_path, flag_show=False)
plt.close()
plot_count += 1
# increment timer
time_sim += dt
# write data to csvs
csv_states_gene = "states_gene.csv"
csv_states_target = "states_target.csv"
results_to_csv(data_folder, csv_states_gene, genome_header, genome_events)
results_to_csv(data_folder, csv_states_target, target_header, target_events)
# print data_log
f = open(data_file, 'w')
f.write(data_log)
f.close()
# create data plots
if flag_plot:
states_gene = csv_to_dict(os.path.join(data_folder, csv_states_gene))
states_target = csv_to_dict(os.path.join(data_folder, csv_states_target))
domains_to_plot = list(set([elem["domain_label"] for elem in pseudo_targets]))
plot_states(states_gene, "gene", labels_to_plot=domains_to_plot, output_path=os.path.join(plot_data_folder, "states_gene.png"), flag_show=False)
plot_states(states_target, "target", output_path=os.path.join(plot_data_folder, "states_target.png"), flag_show=False)
# create video of genome plots
if flag_plot:
fps = 15
video_path = os.path.join(current_run_folder, "genome_%dmin_%dfps.mp4" % (int(t1/60.0), fps))
make_video.make_video_ffmpeg(plot_genome_folder, video_path, fps=fps)
return
if __name__ == '__main__':
genome_simulate(False)
| mit |
Nyker510/scikit-learn | examples/linear_model/plot_lasso_lars.py | 363 | 1080 | #!/usr/bin/env python
"""
=====================
Lasso path using LARS
=====================
Computes Lasso Path along the regularization parameter using the LARS
algorithm on the diabetes dataset. Each color represents a different
feature of the coefficient vector, and this is displayed as a function
of the regularization parameter.
"""
print(__doc__)
# Author: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
print("Computing regularization path using the LARS ...")
alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
shenzebang/scikit-learn | sklearn/tests/test_metaestimators.py | 226 | 4954 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba', 'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
motion-planning/OMPL-trajectories | src/OMPLtrajectories.py | 1 | 2304 | import matplotlib.pyplot as plt
### Global Variables ###
# Obstacles
obstacles = []
# Robots
# Each robot is represented by a trajectory
# Each trajectory is composed of a series of states
# Each state is composed of ([0] x-coordinate, [1] y-coordinate, [2] orientation [0-2pi)
robots = []
# Read files and split data by line
def readFile (filename):
info = []
with open(filename, 'r') as openFile:
data = openFile.readlines()
for line in data:
info.append(line.split())
return info
# Extract obstacles
def extractObstacles(info):
# Each obstacle is a set of points on a line
for line in info:
points = []
# Iterate over all points and add them to an array representing the obstacle
for endpoint in line:
point = []
x, y = map(float, endpoint.strip('()').split(','))
point.append(float(x))
point.append(float(y))
points.append(point)
obstacles.append(points)
# Extract policy of each robot
def extractPolicies(info):
trajectory = []
for line in info:
state = []
state.append(float(line[0])) # x-coordinate
state.append(float(line[1])) # y-coordinate
state.append(float(line[2])) # angle
trajectory.append(state)
robots.append(trajectory)
# Plot the trajectories of each robot
def plotPaths():
# Iterate over every state of every robot
for idx, robot in enumerate(robots):
xs = []
ys = []
for state in robot:
xs.append(state[0])
ys.append(state[1])
plt.plot(xs, ys, label='Robot: ' + str(idx))
# Iterate over every point in every obstacle
for polygon in obstacles:
polygon.append(polygon[0])
xs = []
ys = []
for i in range(len(polygon)):
xs.append(polygon[i][0])
ys.append(polygon[i][1])
plt.plot(xs, ys)
plt.suptitle('Paths')
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plt.show()
extractObstacles(readFile("../data/obstacles.txt")) # obstacles
# extractPolicies(readFile(policyFilename)) # policies generated by OMPL
for i in range(1, 5):
fileName = '../data/p' + `i` + '.txt'
extractPolicies(readFile(fileName))
plotPaths()
| mit |
DTUWindEnergy/Python4WindEnergy | lesson 1/demonstration, Mads/matplotlibwidget.py | 5 | 3797 | # -*- coding: utf-8 -*-
#
# Copyright © 2009 Pierre Raybaut
# Licensed under the terms of the MIT License
"""
MatplotlibWidget
================
Example of matplotlib widget for PyQt4
Copyright © 2009 Pierre Raybaut
This software is licensed under the terms of the MIT License
Derived from 'embedding_in_pyqt4.py':
Copyright © 2005 Florent Rougon, 2006 Darren Dale
"""
__version__ = "1.0.0"
from PyQt4.QtGui import QSizePolicy
from PyQt4.QtCore import QSize
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as Canvas
from matplotlib.figure import Figure
from matplotlib import rcParams
rcParams['font.size'] = 9
class MatplotlibWidget(Canvas):
"""
MatplotlibWidget inherits PyQt4.QtGui.QWidget
and matplotlib.backend_bases.FigureCanvasBase
Options: option_name (default_value)
-------
parent (None): parent widget
title (''): figure title
xlabel (''): X-axis label
ylabel (''): Y-axis label
xlim (None): X-axis limits ([min, max])
ylim (None): Y-axis limits ([min, max])
xscale ('linear'): X-axis scale
yscale ('linear'): Y-axis scale
width (4): width in inches
height (3): height in inches
dpi (100): resolution in dpi
hold (False): if False, figure will be cleared each time plot is called
Widget attributes:
-----------------
figure: instance of matplotlib.figure.Figure
axes: figure axes
Example:
-------
self.widget = MatplotlibWidget(self, yscale='log', hold=True)
from numpy import linspace
x = linspace(-10, 10)
self.widget.axes.plot(x, x**2)
self.wdiget.axes.plot(x, x**3)
"""
def __init__(self, parent=None, title='', xlabel='', ylabel='',
xlim=None, ylim=None, xscale='linear', yscale='linear',
width=4, height=3, dpi=100, hold=False):
self.figure = Figure(figsize=(width, height), dpi=dpi)
self.axes = self.figure.add_subplot(111)
self.axes.set_title(title)
self.axes.set_xlabel(xlabel)
self.axes.set_ylabel(ylabel)
if xscale is not None:
self.axes.set_xscale(xscale)
if yscale is not None:
self.axes.set_yscale(yscale)
if xlim is not None:
self.axes.set_xlim(*xlim)
if ylim is not None:
self.axes.set_ylim(*ylim)
self.axes.hold(hold)
Canvas.__init__(self, self.figure)
self.setParent(parent)
Canvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
Canvas.updateGeometry(self)
def sizeHint(self):
w, h = self.get_width_height()
return QSize(w, h)
def minimumSizeHint(self):
return QSize(10, 10)
#===============================================================================
# Example
#===============================================================================
if __name__ == '__main__':
import sys
from PyQt4.QtGui import QMainWindow, QApplication
from numpy import linspace
class ApplicationWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.mplwidget = MatplotlibWidget(self, title='Example',
xlabel='Linear scale',
ylabel='Log scale',
hold=True, yscale='log')
self.mplwidget.setFocus()
self.setCentralWidget(self.mplwidget)
self.plot(self.mplwidget.axes)
def plot(self, axes):
x = linspace(-10, 10)
axes.plot(x, x**2)
axes.plot(x, x**3)
app = QApplication(sys.argv)
win = ApplicationWindow()
win.show()
sys.exit(app.exec_())
| apache-2.0 |
crichardson17/starburst_atlas | Low_resolution_sims/Dusty_LowRes/Padova_cont/padova_cont_8/fullgrid/FullReader.py | 1 | 14773 | #Imports
import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#input data files loaded in here
print "Starting"
numFiles = 3
gridfile = [None]*numFiles
Elines = [None]*numFiles
for i in range(3):
for file in os.listdir('.'):
if file.endswith("padova_cont_{:d}.grd".format(i+1)):
gridfile[i] = file
print file
if file.endswith("padova_cont_{:d}.txt".format(i+1)):
Elines[i] = file
print file
# ------------------------------------------------------------------------------------------------------
#Patches data
#this section adds the rectangles on the plots of the three other studies
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the add subplot routine
def add_sub_plot(sub_num, elinesplot):
numplots = 16
plt.subplot(numplots/4.,4,sub_num) #define rows and columns by desired amount of subplots
rbf = scipy.interpolate.Rbf(x, y, z[elinesplot][:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed') #teal contours, dashed
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5) #black contours, solid
plt.scatter(max_values[line[elinesplot][sub_num-1],2], max_values[line[elinesplot][sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[elinesplot][sub_num-1]], xy=(8,11), xytext=(4.5,8.5), fontsize = 10)
plt.annotate(max_values[line[elinesplot][sub_num-1],0], xy = (max_values[line[elinesplot][sub_num-1],2], max_values[line[elinesplot][sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
#if sub_num == numplots / 2.:
print " --- {:d} of the sub-plots of plot{:d} are complete".format(sub_num+1, elinesplot+1)
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 10
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
#here we make sure we have all the correct axes labeled.
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
grid1 = [];
grid2 = [];
grid3 = [];
with open(gridfile[0], 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid1.append(row);
grid1 = asarray(grid1)
with open(gridfile[1], 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid2.append(row);
grid2 = asarray(grid2)
with open(gridfile[2], 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid3.append(row);
grid3 = asarray(grid3)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines1 = [];
dataEmissionlines2 = [];
dataEmissionlines3 = [];
with open(Elines[0], 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines1.append(row);
dataEmissionlines1 = asarray(dataEmissionlines1)
with open(Elines[1], 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers2 = csvReader.next()
for row in csvReader:
dataEmissionlines2.append(row);
dataEmissionlines2 = asarray(dataEmissionlines2)
with open(Elines[2], 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers3 = csvReader.next()
for row in csvReader:
dataEmissionlines3.append(row);
dataEmissionlines3 = asarray(dataEmissionlines3)
print "import files complete"
# ---------------------------------------------------
#for concatenating grid
#pull the phi and hdens values from each of the runs. exclude header lines
grid1new = zeros((len(grid1[:,0])-1,2))
grid1new[:,0] = grid1[1:,6]
grid1new[:,1] = grid1[1:,7]
grid2new = zeros((len(grid2[:,0])-1,2))
x = array(17.00000)
grid2new[:,0] = repeat(x,len(grid2[:,0])-1)
grid2new[:,1] = grid2[1:,6]
grid3new = zeros((len(grid3[:,0])-1,2))
grid3new[:,0] = grid3[1:,6]
grid3new[:,1] = grid3[1:,7]
grid = concatenate((grid1new,grid2new,grid3new))
hdens_values = grid[:,1]
phi_values = grid[:,0]
Emissionlines = concatenate((dataEmissionlines1[:,1:],dataEmissionlines2[:,1:],dataEmissionlines3[:,1:]))
headers = headers[1:]
# ---------------------------------------------------
#To fix when hdens > 10
hdens_values_2 = empty(shape=[0, 1])
phi_values_2 = empty(shape=[0, 1])
Emissionlines_2 = empty(shape=[0, len(Emissionlines[0,:])])
for i in range(len(hdens_values)):
if float(hdens_values[i]) < 10.100 :
hdens_values_2 = append(hdens_values_2, hdens_values[i])
phi_values_2 = append(phi_values_2, phi_values[i])
Emissionlines_2 = vstack([Emissionlines_2, Emissionlines[i,:]])
#overwrite old arrays
hdens_values = hdens_values_2
phi_values = phi_values_2
Emissionlines = Emissionlines_2
print "import files complete"
# ---------------------------------------------------
headers = ["C III $\lambda$977",
"N III $\lambda$991",
"H I $\lambda$1026",
"O IV $\lambda$1035",
"Incident $\lambda$1215",
"H I $\lambda$1216",
"N V $\lambda$1239",
"N V $\lambda$1240",
"N V $\lambda$1243",
"Si II $\lambda$1263",
"O I $\lambda$1304",
"Si II $\lambda$1308",
"Si IV $\lambda$1397",
"O IV] $\lambda$1402",
"S IV $\lambda$1406",
"N IV $\lambda$1485",
"N IV $\lambda$1486",
"Si II $\lambda$1531",
"C IV $\lambda$1549",
"He II $\lambda$1640",
"O III] $\lambda$1665",
"Al II $\lambda$1671",
"N 4 1719A",
"N III] $\lambda$1750",
"Al III $\lambda$1860",
"Si III] $\lambda$1888",
"C III] $\lambda$1907",
"TOTL 1909A",
"C III $\lambda$2297",
"[O III] $\lambda$2321",
"[O II] $\lambda$2471",
"C II] $\lambda$2326",
"Si II] $\lambda$2335",
"Al II] $\lambda$2665",
"Mg II $\lambda$2798",
"Mg II $\lambda$2803",
"[Ne III] $\lambda$3343",
"[Ne V] $\lambda$3426",
"Balmer Cont.",
"Balmer Jump $\lambda$3646",
"[O II] $\lambda$3726",
"[O II] $\lambda$3727",
"[O II] $\lambda$3729",
"[Ne III] $\lambda$3869",
"H I $\lambda$3889",
"Ca II $\lambda$3933",
"He I $\lambda$4026",
"[S II] $\lambda$4070",
"[S II] $\lambda$4074",
"[S II] $\lambda$4078",
"H I $\lambda$4102",
"H I $\lambda$4340",
"[O III] $\lambda$4363",
"He II $\lambda$4686",
"CA B 4686A",
"[Ar IV] $\lambda$4711",
"[Ne IV] $\lambda$4720",
"[Ar IV] $\lambda$4740",
"Incident $\lambda$4860",
"H $\\beta$ $\lambda$4861",
"[O III] $\lambda$4959",
"[O III] $\lambda$5007",
"[N I] $\lambda$5200",
"FE14 5303A",
"[O I] $\lambda$5577",
"[N II] $\lambda$5755",
"He I $\lambda$5876",
"[O I] $\lambda$6300",
"[S III] $\lambda$6312",
"[O I] $\lambda$6363",
"H $\\alpha$ $\lambda$6563",
"[N II] $\lambda$6584",
"[S II] $\lambda$6716",
"[S II] $\lambda$6720",
"[S II] $\lambda$6731",
"AR 5 7005A",
"[Ar III] $\lambda$7135",
"[O II] $\lambda$7325",
"[Ar IV] $\lambda$7331",
"[Ar III] $\lambda$7751",
"O I $\lambda$8446",
"Ca II $\lambda$8498",
"Ca II $\lambda$8542",
"Ca II $\lambda$8662",
"Ca II $\lambda$8579",
"[S III] $\lambda$9069",
"Pa9 $\lambda$9229",
"[S III] $\lambda$9532",
"Pa $\\epsilon$ $\lambda$9546",
"He I $\lambda$1.083$\mu$m",
"H I $\lambda$1.875$\mu$m",
"H I $\lambda$1.282$\mu$m",
"H I $\lambda$1.094$\mu$m",
"H I $\lambda$1.005$\mu$m",
"H I $\lambda$4.051$\mu$m",
"H I $\lambda$2.625$\mu$m",
"H 1 2.166m",
"H 1 1.945m",
"C II $\lambda$157.6$\mu$m",
"N II $\lambda$121.7$\mu$m",
"N II $\lambda$205.4$\mu$m",
"[N III] $\lambda$57.2$\mu$m",
"[O I] $\lambda$63$\mu$m",
"[O I] $\lambda$145.5$\mu$m",
"O III $\lambda$51.80$\mu$m",
"[O III] $\lambda$88$\mu$m",
"O IV $\lambda$25.88$\mu$m",
"Ne II $\lambda$12.81$\mu$m",
"Ne III $\lambda$15.55$\mu$m",
"Ne III $\lambda$36.01$\mu$m",
"[Ne V] $\lambda$14.3$\mu$m",
"Ne V $\lambda$24.31$\mu$m",
"Ne VI $\lambda$7.652$\mu$m",
"Na III $\lambda$7.320$\mu$m",
"NA 4 9.039m",
"NA 4 21.29m",
"NA 6 14.40m",
"NA 6 8.611m",
"MG 4 4.485m",
"MG 5 5.610m",
"MG 5 13.52m",
"MG 7 5.503m",
"MG 7 9.033m",
"MG 8 3.030m",
"AL 5 2.905m",
"AL 6 3.660m",
"AL 6 9.116m",
"AL 8 5.848m",
"AL 8 3.690m",
"SI 2 34.81m",
"SI 6 1.963m",
"SI 7 2.481m",
"SI 7 6.492m",
"SI 9 3.929m",
"SI 9 2.584m",
"SI10 1.430m",
"S 3 18.67m",
"S 3 33.47m",
"S 4 10.51m",
"S 8 9914A",
"S 9 1.252m",
"S 9 3.754m",
"S 11 1.920m",
"S 11 1.393m",
"AR 2 6.980m",
"AR 3 9.000m",
"AR 3 21.83m",
"AR 5 8.000m",
"AR 5 13.10m",
"AR 6 4.530m",
"AR10 5534A",
"AR11 2.595m",
"CA 4 3.210m",
"CA 5 4.157m",
"CA 5 11.48m",
"CA 8 2.321m",
"SC 5 2.310m",
"TI 6 1.715m",
"V 7 1.304m",
"CR 8 1.011m",
"MN 9 7968A",
"CO11 5168A",
"NI12 4231A",
"O 3 1661A",
"O 3 1666A",
"[O V] $\lambda$1218",
"SI 3 1892A",
"TOTL 2335A"]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,58]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,58])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,58])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(Emissionlines),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
#change desired lines here!
line = [
#UV1Lines
[0, #977
1, #991
2, #1026
5, #1216
91, #1218
6, #1239
7, #1240
8, #1243
9, #1263
10, #1304
11,#1308
12, #1397
13, #1402
14, #1406
16, #1486
17], #1531
#UV2line
[18, #1549
19, #1640
20, #1665
21, #1671
23, #1750
24, #1860
25, #1888
26, #1907
27, #2297
28, #2321
29, #2471
30, #2326
31, #2335
32, #2665
33, #2798
34],
#Optical Lines
[36, #NE 3 3343A
38, #BA C
39, #3646
40, #3726
41, #3727
42, #3729
43, #3869
44, #3889
45, #3933
46, #4026
47, #4070
48, #4074
49, #4078
50, #4102
51, #4340
52], #4363
#Optical Lines 2
[56, #AR 4 4740
58, #4861
59, #O III 4959
60, #O 3 5007
61, #N 1 5200
63, #O 1 5577
64, #N 2 5755
65, #HE 1 5876
66, #O 1 6300
67, #S 3 6312
68, #O 1 6363
69, #H 1 6563
70, #N 2 6584
71, #S II 6716
72, #S 2 6720
73], #S II 6731
#IR Lines
[75, #AR 3 7135
76, #TOTL 7325
77,
78, #AR 3 7751
79, #6LEV 8446
80, #CA2X 8498
81, #CA2Y 8542
82, #CA2Z 8662
83, #CA 2 8579A
84, #S 3 9069
85, #H 1 9229
86, #S 3 9532
87,
88,
89,
90], #H 1 9546
#Rest Lines
[3,4,15,22,37,53,54,55,58,63,78,89,90,91,93,94],
#More Lines
[97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112],
#More Lines 2
[113,114,115,116,117,118,119,120,121,122,123,124,125,126,127]
]
#create z array for this plot
z = [concatenated_data[:,line[0]],concatenated_data[:,line[1]], concatenated_data[:,line[2]], concatenated_data[:,line[3]], concatenated_data[:,line[4]], concatenated_data[:,line[5]], concatenated_data[:,line[6]], concatenated_data[:,line[7]]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), (x.max()-x.min())), linspace(y.min(), y.max(), (y.max()-y.min()))
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
# ---------------------------------------------------
plt.clf()
for j in range (7):
for i in range(16):
add_sub_plot(i,j)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "plot {:d} complete".format(j+1)
plt.savefig(("Full_lines_%d.pdf")%j)
plt.clf()
| gpl-2.0 |
shankari/e-mission-server | emission/analysis/plotting/geojson/geojson_feature_converter.py | 2 | 15279 | from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import map
from builtins import str
from builtins import *
import logging
import geojson as gj
import copy
import attrdict as ad
import pandas as pd
import emission.storage.timeseries.abstract_timeseries as esta
import emission.net.usercache.abstract_usercache as enua
import emission.storage.timeseries.timequery as estt
import emission.storage.decorations.trip_queries as esdt
import emission.storage.decorations.analysis_timeseries_queries as esda
import emission.storage.decorations.section_queries as esds
import emission.storage.decorations.timeline as esdtl
import emission.core.wrapper.location as ecwl
import emission.core.wrapper.cleanedsection as ecwcs
import emission.core.wrapper.entry as ecwe
import emission.core.common as ecc
# TODO: Move this to the section_features class instead
import emission.analysis.intake.cleaning.location_smoothing as eaicl
import emission.analysis.config as eac
def _del_non_derializable(prop_dict, extra_keys):
for key in extra_keys:
if key in prop_dict:
del prop_dict[key]
def _stringify_foreign_key(prop_dict, key_names):
for key_name in key_names:
if hasattr(prop_dict, key_name):
setattr(prop_dict, key_name, str(getattr(prop_dict,key_name)))
def location_to_geojson(location):
"""
Converts a location wrapper object into geojson format.
This is pretty easy - it is a point.
Since we have other properties that we care about, we make it a feature.
Then, all the other stuff goes directly into the properties since the wrapper is a dict too!
:param location: the location object
:return: a geojson version of the location. the object is of type "Feature".
"""
try:
ret_feature = gj.Feature()
ret_feature.id = str(location.get_id())
ret_feature.geometry = location.data.loc
ret_feature.properties = copy.copy(location.data)
ret_feature.properties["feature_type"] = "location"
_del_non_derializable(ret_feature.properties, ["loc"])
return ret_feature
except Exception as e:
logging.exception(("Error while converting object %s" % location))
raise e
def place_to_geojson(place):
"""
Converts a place wrapper object into geojson format.
This is also pretty easy - it is just a point.
Since we have other properties that we care about, we make it a feature.
Then, all the other stuff goes directly into the properties since the wrapper is a dict too!
:param place: the place object
:return: a geojson version of the place. the object is of type "Feature".
"""
ret_feature = gj.Feature()
ret_feature.id = str(place.get_id())
ret_feature.geometry = place.data.location
ret_feature.properties = copy.copy(place.data)
ret_feature.properties["feature_type"] = "place"
# _stringify_foreign_key(ret_feature.properties, ["ending_trip", "starting_trip"])
_del_non_derializable(ret_feature.properties, ["location"])
return ret_feature
def stop_to_geojson(stop):
"""
Converts a stop wrapper object into geojson format.
This is also pretty easy - it is just a point.
Since we have other properties that we care about, we make it a feature.
Then, all the other stuff goes directly into the properties since the wrapper is a dict too!
:param stop: the stop object
:return: a geojson version of the stop. the object is of type "Feature".
"""
ret_feature = gj.Feature()
ret_feature.id = str(stop.get_id())
ret_feature.geometry = gj.LineString()
ret_feature.geometry.coordinates = [stop.data.enter_loc.coordinates, stop.data.exit_loc.coordinates]
ret_feature.properties = copy.copy(stop.data)
ret_feature.properties["feature_type"] = "stop"
# _stringify_foreign_key(ret_feature.properties, ["ending_section", "starting_section", "trip_id"])
_del_non_derializable(ret_feature.properties, ["location"])
return ret_feature
def section_to_geojson(section, tl):
"""
This is the trickiest part of the visualization.
The section is basically a collection of points with a line through them.
So the representation is a feature in which one feature which is the line, and one feature collection which is the set of point features.
:param section: the section to be converted
:return: a feature collection which is the geojson version of the section
"""
ts = esta.TimeSeries.get_time_series(section.user_id)
entry_it = ts.find_entries(["analysis/recreated_location"],
esda.get_time_query_for_trip_like(
"analysis/cleaned_section",
section.get_id()))
# TODO: Decide whether we want to use Rewrite to use dataframes throughout instead of python arrays.
# dataframes insert nans. We could use fillna to fill with default values, but if we are not actually
# using dataframe features here, it is unclear how much that would help.
feature_array = []
section_location_entries = [ecwe.Entry(entry) for entry in entry_it]
if len(section_location_entries) != 0:
logging.debug("first element in section_location_array = %s" % section_location_entries[0])
if not ecc.compare_rounded_arrays(section.data.end_loc.coordinates,
section_location_entries[-1].data.loc.coordinates,
digits=4):
logging.info("section_location_array[-1].data.loc %s != section.data.end_loc %s even after df.ts fix, filling gap" % \
(section_location_entries[-1].data.loc, section.data.end_loc))
if eac.get_config()["output.conversion.validityAssertions"]:
assert(False)
last_loc_doc = ts.get_entry_at_ts("background/filtered_location", "data.ts", section.data.end_ts)
if last_loc_doc is None:
logging.warning("can't find entry to patch gap, leaving gap")
else:
last_loc_entry = ecwe.Entry(last_loc_doc)
logging.debug("Adding new entry %s to fill the end point gap between %s and %s"
% (last_loc_entry.data.loc, section_location_entries[-1].data.loc,
section.data.end_loc))
section_location_entries.append(last_loc_entry)
points_line_feature = point_array_to_line(section_location_entries)
points_line_feature.id = str(section.get_id())
points_line_feature.properties.update(copy.copy(section.data))
# Update works on dicts, convert back to a section object to make the modes
# work properly
points_line_feature.properties = ecwcs.Cleanedsection(points_line_feature.properties)
points_line_feature.properties["feature_type"] = "section"
if eac.get_section_key_for_analysis_results() == esda.INFERRED_SECTION_KEY:
ise = esds.cleaned2inferred_section(section.user_id, section.get_id())
if ise is not None:
logging.debug("mapped cleaned section %s -> inferred section %s" %
(section.get_id(), ise.get_id()))
logging.debug("changing mode from %s -> %s" %
(points_line_feature.properties.sensed_mode, ise.data.sensed_mode))
points_line_feature.properties["sensed_mode"] = str(ise.data.sensed_mode)
else:
points_line_feature.properties["sensed_mode"] = str(points_line_feature.properties.sensed_mode)
else:
points_line_feature.properties["sensed_mode"] = str(points_line_feature.properties.sensed_mode)
_del_non_derializable(points_line_feature.properties, ["start_loc", "end_loc"])
# feature_array.append(gj.FeatureCollection(points_feature_array))
feature_array.append(points_line_feature)
return gj.FeatureCollection(feature_array)
def incident_to_geojson(incident):
ret_feature = gj.Feature()
ret_feature.id = str(incident.get_id())
ret_feature.geometry = gj.Point()
ret_feature.geometry.coordinates = incident.data.loc.coordinates
ret_feature.properties = copy.copy(incident.data)
ret_feature.properties["feature_type"] = "incident"
# _stringify_foreign_key(ret_feature.properties, ["ending_section", "starting_section", "trip_id"])
_del_non_derializable(ret_feature.properties, ["loc"])
return ret_feature
def geojson_incidents_in_range(user_id, start_ts, end_ts):
MANUAL_INCIDENT_KEY = "manual/incident"
ts = esta.TimeSeries.get_time_series(user_id)
uc = enua.UserCache.getUserCache(user_id)
tq = estt.TimeQuery("data.ts", start_ts, end_ts)
incident_entry_docs = list(ts.find_entries([MANUAL_INCIDENT_KEY], time_query=tq)) \
+ list(uc.getMessage([MANUAL_INCIDENT_KEY], tq))
incidents = [ecwe.Entry(doc) for doc in incident_entry_docs]
return list(map(incident_to_geojson, incidents))
def point_array_to_line(point_array):
points_line_string = gj.LineString()
# points_line_string.coordinates = [l.loc.coordinates for l in filtered_section_location_array]
points_line_string.coordinates = []
points_times = []
points_timestamps = []
for l in point_array:
# logging.debug("About to add %s to line_string " % l)
points_line_string.coordinates.append(l.data.loc.coordinates)
points_times.append(l.data.ts)
points_timestamps.append(int(round(l.data.ts * 1000)))
points_line_feature = gj.Feature()
points_line_feature.geometry = points_line_string
points_line_feature.properties = {}
points_line_feature.properties["times"] = points_times
points_line_feature.properties["timestamps"] = points_timestamps
return points_line_feature
def trip_to_geojson(trip, tl):
"""
Trips are the main focus of our current visualization, so they are most complex.
Each trip is represented as a feature collection with the following features:
- two features for the start and end places
- features for each stop in the trip
- features for each section in the trip
:param trip: the trip object to be converted
:param tl: the timeline used to retrieve related objects
:return: the geojson version of the trip
"""
feature_array = []
curr_start_place = tl.get_object(trip.data.start_place)
curr_end_place = tl.get_object(trip.data.end_place)
start_place_geojson = place_to_geojson(curr_start_place)
start_place_geojson["properties"]["feature_type"] = "start_place"
feature_array.append(start_place_geojson)
end_place_geojson = place_to_geojson(curr_end_place)
end_place_geojson["properties"]["feature_type"] = "end_place"
feature_array.append(end_place_geojson)
trip_tl = esdt.get_cleaned_timeline_for_trip(trip.user_id, trip.get_id())
stops = trip_tl.places
for stop in stops:
feature_array.append(stop_to_geojson(stop))
for i, section in enumerate(trip_tl.trips):
section_gj = section_to_geojson(section, tl)
feature_array.append(section_gj)
trip_geojson = gj.FeatureCollection(features=feature_array, properties=trip.data)
trip_geojson.id = str(trip.get_id())
feature_array.extend(geojson_incidents_in_range(trip.user_id,
curr_start_place.data.exit_ts,
curr_end_place.data.enter_ts))
if trip.metadata.key == esda.CLEANED_UNTRACKED_KEY:
# trip_geojson.properties["feature_type"] = "untracked"
# Since the "untracked" type is not correctly handled on the phone, we just
# skip these trips until
# https://github.com/e-mission/e-mission-phone/issues/118
# is fixed
# TODO: Once it is fixed, re-introduce the first line in this block
# and remove the None check in get_geojson_for_timeline
return None
else:
trip_geojson.properties["feature_type"] = "trip"
return trip_geojson
def get_geojson_for_ts(user_id, start_ts, end_ts):
tl = esdtl.get_cleaned_timeline(user_id, start_ts, end_ts)
tl.fill_start_end_places()
return get_geojson_for_timeline(user_id, tl)
def get_geojson_for_dt(user_id, start_local_dt, end_local_dt):
logging.debug("Getting geojson for %s -> %s" % (start_local_dt, end_local_dt))
tl = esdtl.get_cleaned_timeline_from_dt(user_id, start_local_dt, end_local_dt)
tl.fill_start_end_places()
return get_geojson_for_timeline(user_id, tl)
def get_geojson_for_timeline(user_id, tl):
"""
tl represents the "timeline" object that is queried for the trips and locations
"""
geojson_list = []
for trip in tl.trips:
try:
trip_geojson = trip_to_geojson(trip, tl)
if trip_geojson is not None:
geojson_list.append(trip_geojson)
except Exception as e:
logging.exception("Found error %s while processing trip %s" % (e, trip))
raise e
logging.debug("trip count = %d, geojson count = %d" %
(len(tl.trips), len(geojson_list)))
return geojson_list
def get_all_points_for_range(user_id, key, start_ts, end_ts):
import emission.storage.timeseries.timequery as estt
# import emission.core.wrapper.location as ecwl
tq = estt.TimeQuery("metadata.write_ts", start_ts, end_ts)
ts = esta.TimeSeries.get_time_series(user_id)
entry_it = ts.find_entries([key], tq)
points_array = [ecwe.Entry(entry) for entry in entry_it]
return get_feature_list_for_point_array(points_array)
def get_feature_list_for_point_array(points_array):
points_feature_array = [location_to_geojson(le) for le in points_array]
print ("Found %d features from %d points" %
(len(points_feature_array), len(points_array)))
feature_array = []
feature_array.append(gj.FeatureCollection(points_feature_array))
feature_array.append(point_array_to_line(points_array))
feature_coll = gj.FeatureCollection(feature_array)
return feature_coll
def get_feature_list_from_df(loc_time_df, ts="ts", latitude="latitude", longitude="longitude", fmt_time="fmt_time"):
"""
Input DF should have columns called "ts", "latitude" and "longitude", or the corresponding
columns can be passed in using the ts, latitude and longitude parameters
"""
points_array = get_location_entry_list_from_df(loc_time_df, ts, latitude, longitude, fmt_time)
return get_feature_list_for_point_array(points_array)
def get_location_entry_list_from_df(loc_time_df, ts="ts", latitude="latitude", longitude="longitude", fmt_time="fmt_time"):
location_entry_list = []
for idx, row in loc_time_df.iterrows():
retVal = {"latitude": row[latitude], "longitude": row[longitude], "ts": row[ts],
"_id": str(idx), "fmt_time": row[fmt_time], "loc": gj.Point(coordinates=[row[longitude], row[latitude]])}
location_entry_list.append(ecwe.Entry.create_entry(
"dummy_user", "background/location", ecwl.Location(retVal)))
return location_entry_list
| bsd-3-clause |
jaustinpage/frc_rekt | frc_rekt/wheel.py | 1 | 1493 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""Wheel model.
Models a wheel on an frc robot.
"""
import logging
import pandas as pd
import numpy as np
# Pandas options
pd.set_option('max_rows', 121)
pd.set_option('max_columns', 132)
pd.set_option('expand_frame_repr', False)
# just a convenience, so we dont have to type np.poly.poly
POLY = np.polynomial.polynomial
class Wheel(object):
"""Model of a Wheel."""
def __init__(self, diameter=4.0, cof=1.3, torque=0.0):
"""Wheel.
:param diameter: The diameter of the wheel in inches
:type diameter: int float
:param cof: The coefficient of friction of the wheel
:type cof: int float
"""
self._logger = logging.getLogger(__name__)
# store diameter in meters
self._diameter = float(diameter) * 0.0254
self.cof = float(cof)
self.torque = float(torque)
self._logger.debug('%s created', str(self))
def __repr__(self):
"""Represent a wheel."""
return 'Wheel(diameter={0}, cof={1}, torque={2})'.format(
self.diameter, self.cof, self.torque)
@property
def diameter(self):
"""Wheel diameter in inches."""
return self._diameter / 0.0254
@property
def _force(self):
"""Force at wheel in N*m."""
return self.torque / (self._diameter / 2.0)
@property
def force(self):
"""Force at wheel in ft*lbs."""
return self._force * 0.737562149277
| mit |
metaml/NAB | tests/integration/true_positive_test.py | 8 | 8620 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import datetime
import pandas
import unittest
from nab.scorer import Scorer
from nab.test_helpers import generateTimestamps, generateWindows, generateLabels
class TruePositiveTest(unittest.TestCase):
def _checkCounts(self, counts, tn, tp, fp, fn):
"""Ensure the metric counts are correct."""
self.assertEqual(counts['tn'], tn, "Incorrect tn count")
self.assertEqual(counts['tp'], tp, "Incorrect tp count")
self.assertEqual(counts['fp'], fp, "Incorrect fp count")
self.assertEqual(counts['fn'], fn, "Incorrect fn count")
def setUp(self):
self.costMatrix = {"tpWeight": 1.0,
"fnWeight": 1.0,
"fpWeight": 1.0,
"tnWeight": 1.0}
def testFirstTruePositiveWithinWindow(self):
"""
First record within window has a score approximately equal to
self.costMatrix["tpWeight"]; within 4 decimal places is more than enough
precision.
"""
start = datetime.datetime.now()
increment = datetime.timedelta(minutes=5)
length = 10
numWindows = 1
windowSize = 2
timestamps = generateTimestamps(start, increment, length)
windows = generateWindows(timestamps, numWindows, windowSize)
labels = generateLabels(timestamps, windows)
predictions = pandas.Series([0]*length)
index = timestamps[timestamps == windows[0][0]].index[0]
predictions[index] = 1
scorer = Scorer(timestamps, predictions, labels, windows, self.costMatrix,
probationaryPeriod=0)
(_, score) = scorer.getScore()
self.assertAlmostEquals(score, self.costMatrix["tpWeight"], 4)
self._checkCounts(scorer.counts, length-windowSize*numWindows, 1, 0,
windowSize*numWindows-1)
def testEarlierTruePositiveIsBetter(self):
"""
If two algorithms both get a true positive within a window, the algorithm
with the earlier true positive (in the window) should get a higher score.
"""
start = datetime.datetime.now()
increment = datetime.timedelta(minutes=5)
length = 10
numWindows = 1
windowSize = 2
timestamps = generateTimestamps(start, increment, length)
windows = generateWindows(timestamps, numWindows, windowSize)
labels = generateLabels(timestamps, windows)
predictions1 = pandas.Series([0]*length)
predictions2 = pandas.Series([0]*length)
t1, t2 = windows[0]
index1 = timestamps[timestamps == t1].index[0]
predictions1[index1] = 1
scorer1 = Scorer(timestamps, predictions1, labels, windows, self.costMatrix,
probationaryPeriod=0)
(_, score1) = scorer1.getScore()
index2 = timestamps[timestamps == t2].index[0]
predictions2[index2] = 1
scorer2 = Scorer(timestamps, predictions2, labels, windows, self.costMatrix,
probationaryPeriod=0)
(_, score2) = scorer2.getScore()
self.assertTrue(score1 > score2, "The earlier TP score is not greater than "
"the later TP. They are %f and %f, respectively." % (score1, score2))
self._checkCounts(scorer1.counts, length-windowSize*numWindows, 1, 0,
windowSize*numWindows-1)
self._checkCounts(scorer2.counts, length-windowSize*numWindows, 1, 0,
windowSize*numWindows-1)
def testOnlyScoreFirstTruePositiveWithinWindow(self):
"""
An algorithm making multiple detections within a window (i.e. true positive)
should only be scored for the earliest true positive.
"""
start = datetime.datetime.now()
increment = datetime.timedelta(minutes=5)
length = 10
numWindows = 1
windowSize = 2
timestamps = generateTimestamps(start, increment, length)
windows = generateWindows(timestamps, numWindows, windowSize)
labels = generateLabels(timestamps, windows)
predictions = pandas.Series([0]*length)
window = windows[0]
t1, t2 = window
index1 = timestamps[timestamps == t1].index[0]
predictions[index1] = 1
scorer1 = Scorer(timestamps, predictions, labels, windows, self.costMatrix,
probationaryPeriod=0)
(_, score1) = scorer1.getScore()
index2 = timestamps[timestamps == t2].index[0]
predictions[index2] = 1
scorer2 = Scorer(timestamps, predictions, labels, windows, self.costMatrix,
probationaryPeriod=0)
(_, score2) = scorer2.getScore()
self.assertEqual(score1, score2)
self._checkCounts(scorer1.counts, length-windowSize*numWindows, 1, 0,
windowSize*numWindows-1)
self._checkCounts(scorer2.counts, length-windowSize*numWindows, 2, 0,
windowSize*numWindows-2)
def testTruePositivesWithDifferentWindowSizes(self):
"""
True positives at the left edge of windows should have the same score
regardless of width of window.
"""
start = datetime.datetime.now()
increment = datetime.timedelta(minutes=5)
length = 10
numWindows = 1
timestamps = generateTimestamps(start, increment, length)
windowSize1 = 2
windows1 = generateWindows(timestamps, numWindows, windowSize1)
labels1 = generateLabels(timestamps, windows1)
index = timestamps[timestamps == windows1[0][0]].index[0]
predictions1 = pandas.Series([0]*length)
predictions1[index] = 1
windowSize2 = 3
windows2 = generateWindows(timestamps, numWindows, windowSize2)
labels2 = generateLabels(timestamps, windows2)
index = timestamps[timestamps == windows2[0][0]].index[0]
predictions2 = pandas.Series([0]*length)
predictions2[index] = 1
scorer1 = Scorer(timestamps, predictions1, labels1, windows1,
self.costMatrix, probationaryPeriod=0)
(_, score1) = scorer1.getScore()
scorer2 = Scorer(timestamps, predictions2, labels2, windows2,
self.costMatrix, probationaryPeriod=0)
(_, score2) = scorer2.getScore()
self.assertEqual(score1, score2)
self._checkCounts(scorer1.counts, length-windowSize1*numWindows, 1, 0,
windowSize1*numWindows-1)
self._checkCounts(scorer2.counts, length-windowSize2*numWindows, 1, 0,
windowSize2*numWindows-1)
def testTruePositiveAtRightEdgeOfWindow(self):
"""
True positives at the right edge of a window should yield a score of
approximately zero; the scaled sigmoid scoring function crosses the zero
between a given window's last timestamp and the next timestamp (immediately
following the window.
"""
start = datetime.datetime.now()
increment = datetime.timedelta(minutes=5)
length = 1000
numWindows = 1
windowSize = 100
timestamps = generateTimestamps(start, increment, length)
windows = generateWindows(timestamps, numWindows, windowSize)
labels = generateLabels(timestamps, windows)
predictions = pandas.Series([0]*length)
# Make prediction at end of the window; TP
index = timestamps[timestamps == windows[0][1]].index[0]
predictions[index] = 1
scorer1 = Scorer(timestamps, predictions, labels, windows, self.costMatrix,
probationaryPeriod=0)
(_, score1) = scorer1.getScore()
# Make prediction just after the window; FP
predictions[index] = 0
index += 1
predictions[index] = 1
scorer2 = Scorer(timestamps, predictions, labels, windows, self.costMatrix,
probationaryPeriod=0)
(_, score2) = scorer2.getScore()
# TP score + FP score + 1 should be very close to 0; the 1 is added to
# account for the subsequent FN contribution.
self.assertAlmostEquals(score1 + score2 + 1, 0.0, 3)
self._checkCounts(scorer1.counts, length-windowSize*numWindows, 1, 0,
windowSize*numWindows-1)
self._checkCounts(scorer2.counts, length-windowSize*numWindows-1, 0, 1,
windowSize*numWindows)
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
lilleswing/deepchem | examples/toxcast/toxcast_rf.py | 4 | 1089 | """
Script that trains Sklearn multitask models on toxcast & tox21 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from deepchem.molnet import load_toxcast
import deepchem as dc
toxcast_tasks, toxcast_datasets, transformers = load_toxcast()
(train_dataset, valid_dataset, test_dataset) = toxcast_datasets
metric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean)
def model_builder(model_dir):
sklearn_model = RandomForestClassifier(
class_weight="balanced", n_estimators=500, n_jobs=-1)
return dc.models.SklearnModel(sklearn_model, model_dir)
model = dc.models.SingletaskToMultitask(toxcast_tasks, model_builder)
# Fit trained model
model.fit(train_dataset)
print("About to evaluate model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
| mit |
dermotte/liresolr | src/main/python/weights32_import_data.py | 1 | 1994 | import re
import numpy as np
import matplotlib.pyplot as plt
input_file = '/home/mlux/projects/wipo2018/train/weights32.txt'
state = 0
count_images = 0
count_classes = 0
current_image = ''
current_classes = ''
def output(data, file = None):
"""
outputs either to file or to std out
:param data: the string to write
:param file: the file to write into
"""
if file is None:
print(data)
else:
file.write(data + "\n")
if __name__ == "__main__":
file_out = open('test.xml', 'w')
# parse data into an XML file understood by Solr ..
with open(input_file) as f:
for line in f:
l = line.strip()
if len(l) == 0:
if state == 2 and count_classes != 32:
print(current_image + ': ' + str(count_classes))
if current_image is not '':
output("<doc>", file_out)
output("\t<field name=\"id\">" + current_image + "</field>", file_out)
output("\t<field name=\"classes_ws\">" + current_classes.strip() + "</field>", file_out)
output("</doc>", file_out)
# reset everything
state = 0
count_classes = 0
current_image = ''
current_classes = ''
if (l.lower().endswith('.png') or l.lower().endswith('.jpg')) and state == 0:
current_image = l[2:]
count_images += 1
state = 1
if state == 1 and l.lower() == 'found classes:':
state = 2
if state == 2 and re.search("\d\d\.\d\d\.\d\d", l) is not None:
count_classes += 1
class_id = re.search("\d\d\.\d\d\.\d\d", l).group(0)
weight = float(re.search("\d+\.\d\d$", l).group(0))
for i in range(0, round(weight)):
current_classes += class_id + " "
print("{} files".format(count_images))
| gpl-2.0 |
cbertinato/pandas | pandas/tests/io/formats/test_console.py | 1 | 2447 | import pytest
from pandas._config import detect_console_encoding
class MockEncoding: # TODO(py27): replace with mock
"""
Used to add a side effect when accessing the 'encoding' property. If the
side effect is a str in nature, the value will be returned. Otherwise, the
side effect should be an exception that will be raised.
"""
def __init__(self, encoding):
super().__init__()
self.val = encoding
@property
def encoding(self):
return self.raise_or_return(self.val)
@staticmethod
def raise_or_return(val):
if isinstance(val, str):
return val
else:
raise val
@pytest.mark.parametrize('empty,filled', [
['stdin', 'stdout'],
['stdout', 'stdin']
])
def test_detect_console_encoding_from_stdout_stdin(monkeypatch, empty, filled):
# Ensures that when sys.stdout.encoding or sys.stdin.encoding is used when
# they have values filled.
# GH 21552
with monkeypatch.context() as context:
context.setattr('sys.{}'.format(empty), MockEncoding(''))
context.setattr('sys.{}'.format(filled), MockEncoding(filled))
assert detect_console_encoding() == filled
@pytest.mark.parametrize('encoding', [
AttributeError,
IOError,
'ascii'
])
def test_detect_console_encoding_fallback_to_locale(monkeypatch, encoding):
# GH 21552
with monkeypatch.context() as context:
context.setattr('locale.getpreferredencoding', lambda: 'foo')
context.setattr('sys.stdout', MockEncoding(encoding))
assert detect_console_encoding() == 'foo'
@pytest.mark.parametrize('std,locale', [
['ascii', 'ascii'],
['ascii', Exception],
[AttributeError, 'ascii'],
[AttributeError, Exception],
[IOError, 'ascii'],
[IOError, Exception]
])
def test_detect_console_encoding_fallback_to_default(monkeypatch, std, locale):
# When both the stdout/stdin encoding and locale preferred encoding checks
# fail (or return 'ascii', we should default to the sys default encoding.
# GH 21552
with monkeypatch.context() as context:
context.setattr(
'locale.getpreferredencoding',
lambda: MockEncoding.raise_or_return(locale)
)
context.setattr('sys.stdout', MockEncoding(std))
context.setattr('sys.getdefaultencoding', lambda: 'sysDefaultEncoding')
assert detect_console_encoding() == 'sysDefaultEncoding'
| bsd-3-clause |
pysg/pyther | parafinas_rkpr.py | 1 | 30466 | import pyther as pt
from scipy import optimize
from scipy.optimize import fsolve
import numpy as np
import pandas as pd
from numpy import linalg as LA
from matplotlib import pyplot
import matplotlib.pyplot as plt
class Eos_equations():
def __init__(self, eq, w, Tc, Pc, Tr, R, ep, ni, nT, nC, V, T, P, kij, lij, delta_1, k):
self.eq = eq
self.w = w
self.Tc = Tc
self.Pc = Pc
self.Tr = Tr
self.R = R
self.ep = ep
self.ni = ni
self.nT = nT
self.nC = nC
self.V = V
self.T = T
self.P = P
self.kij = kij
self.lij = lij
self.delta_1 = delta_1
self.k = k
if self.eq == "SRK":
# Soave-Redlich-Kwong (SRK)
self.s1, self.s2 = 1, 2
self.m = 0.480 + 1.574 * self.w - 0.175 * self.w ** 2
self.ac = 0.077796070 * self.R ** 2, self.Tc ** 2 / self.Pc
self.bc = 0.086640 * self.R * self.Tc / self.Pc
elif self.eq == "PR":
# Peng-Robinson (PR)
self.s1, self.s2 = 1 + 2 ** 0.5, 1 - (2 ** 0.5)
self.m = 0.37464 + 1.54226 * self.w - 0.26992 * self.w ** 2
self.ac = 0.45723553 * self.R ** 2 * self.Tc ** 2 / self.Pc
self.bc = 0.077796070 * self.R * self.Tc / self.Pc
self.alfa = (1 + self.m * (1 - (self.T / self.Tc) ** 0.5)) ** 2
self.dalfadT = - (self.m / self.T) * (self.T / self.Tc) ** 0.5 * (self.m * (- (self.T / self.Tc) ** 0.5 + 1) + 1)
ter_1 = 0.5 * self.m ** 2 * (self.T / self.Tc) ** 1.0 / self.T ** 2
ter_2 = 0.5 * self.m * (self.T / self.Tc) ** 0.5 * (self.m * (- (self.T / self.Tc) ** 0.5 + 1) + 1) / self.T ** 2
self.d2alfaT2 = ter_1 + ter_2
self.a_ii = self.ac * self.alfa
self.b_ii = self.bc
self.da_iidT = self.ac * self.dalfadT
d2adT2_puros = self.ac * self.d2alfaT2
elif self.eq == "RKPR":
# (RKPR)
self.delta_1m = sum(self.ni * self.delta_1)
self.s1, self.s2 = self.delta_1m, (1-self.delta_1m)/(1+self.delta_1m)
# datos C1 - C24
self.ac = np.array([2.3213, 208.3471])
self.bc = np.array([0.030088, 0.531299])
self.a_ii = self.ac * (3/(2+(self.T / self.Tc))) ** self.k
self.b_ii = self.bc
self.da_iidT = -self.k * self.a_ii / self.Tc/(2+(self.T / self.Tc))
dadT2 = -(self.k + 1) * self.da_iidT / self.Tc / (2 + (self.T / self.Tc))
else:
print("Che boludo... Modelo no valido, intentalo de nuevo !!! ")
def parametros(self):
if self.nC > 1:
self.aij = np.ones((len(self.ni), len(self.ni)))
self.bij = np.ones((len(self.ni), len(self.ni)))
self.daijdT = np.ones((len(self.ni), len(self.ni)))
for j in range(self.nC):
for i in range(self.nC):
self.aij[i, j] = (self.a_ii[i] * self.a_ii[j]) ** 0.5
self.bij[i, j] = (self.b_ii[i] + self.b_ii[j]) / 2
self.bij[i, j] = self.bij[i, j]
self.daijdT[i, j] = (self.da_iidT[i] * self.da_iidT[j]) ** 0.5
for i in range(self.nC):
for j in range(self.nC):
if i == j:
self.aij[i, j] = self.a_ii[i] * (1 - self.kij[i, j])
self.daijdT[i, j] = self.da_iidT[i] * (1 - self.kij[i, j])
elif i != j:
self.aij[i, j] = self.aij[i, j] * (1 - self.kij[i, j])
self.daijdT[i, j] = self.daijdT[i, j] * (1 - self.kij[i, j])
if self.nC == 1:
return self.a_ii, self.b_ii, self.da_iidT
else:
# print("inicial aij = ", self.aij)
# print("bij = ", self.bij)
# print("daijT = ", self.daijdT)
return self.aij, self.bij, self.daijdT
def parametro_D(self):
if self.nC == 1:
self.D = self.ni ** 2 * self.a_ii
self.Di = 2 * self.ni * self.a_ii
else:
di = np.ones((len(self.ni), len(self.ni)))
self.Di = np.ones((len(self.ni)))
self.D = np.ones((len(self.ni)))
for i in range(self.nC):
for j in range(self.nC):
di[i, j] = self.ni[j] * self.aij[i, j]
self.Di[i] = 2 * np.sum(di[i, :])
self.D = 0.5 * np.sum(self.ni * self.Di)
return self.D
def parametro_delta_1(self):
if self.nC == 1:
self.D1m = np.zeros((len(self.ni) - 1))
self.dD1i = np.ones((len(self.ni)))
self.dD1ij = np.ones((len(self.ni), len(self.ni)))
for i in range(self.nC):
self.D1m = self.D1m + self.ni[i] * self.delta_1[i]
self.D1m = self.D1m / self.nT
# for i in range(self.nC):
# self.dD1i[i] = (self.delta_1[i] - self.D1m) / self.nT
# for j in range(self.nC):
# self.dD1ij[i,j] = (2.0 * self.D1m - self.delta_1[i] - self.delta_1[j]) / self.nT ** 2
else:
self.D1m = np.zeros((len(self.ni) - 1))
self.dD1i = np.ones((len(self.ni)))
self.dD1ij = np.ones((len(self.ni), len(self.ni)))
for i in range(self.nC):
self.D1m = self.D1m + self.ni[i] * self.delta_1[i]
self.D1m = self.D1m / self.nT
for i in range(self.nC):
self.dD1i[i] = (self.delta_1[i] - self.D1m) / self.nT
for j in range(self.nC):
self.dD1ij[i,j] = (2.0 * self.D1m - self.delta_1[i] - self.delta_1[j]) / self.nT ** 2
return self.D1m, self.dD1i, self.dD1ij
def parametro_B(self):
if self.nC == 1:
self.B = self.ni * self.b_ii
else:
self.aux = np.zeros((len(self.ni)))
for i in range(self.nC):
for j in range(self.nC):
self.aux[i] = self.aux[i] + self.ni[j] * self.bij[i, j]
self.B = np.sum(self.ni * self.b_ii)
# print("B = ", self.B)
return self.B
def presion(self):
'''
Con el metodo presion(), se calcula la Presión P(T, V, N) del sistema
para una temperatura T, cantidad de moles N y un volumen V
R = Constante universal de los gases
nT = Número total de moles en el sistema
Pcal = Peos = Presión calculada con la ecuación de estado
Arv = Primera derivada parcial de la energía de Helmholz con respecto
al volumen V, a T y N constantes
'''
self.gv = self.R * self.B / (self.V * (self.V - self.B))
self.fv = - 1 / ((self.V + self.s1 * self.B) * (self.V + self.s2 * self.B))
self.ArV = -self.nT * self.gv * self.T - self.D * self.fv
self.Pcal = self.nT * self.R * self.T / self.V - self.ArV
return self.Pcal
def dP_dV(self):
self.dPdV = -self.ArV2 - self.R * self.T * self.nT / self.V ** 2
return self.dPdV
def Z_factor(self):
self.Z = (self.P * self.V) / (self.nT * self.R * self.T)
return self.Z
def P_ideal(self):
self.Pxi = (self.ni * self.P) / self.nT
return self.Pxi
def dF_dV(self):
'''
Primera derivada de F con respecto al volumen Ecu. (68)
'''
self.gv = self.R * self.B / (self.V * (self.V - self.B))
self.fv = - 1 / ((self.V + self.s1 * self.B) * (self.V + self.s2 * self.B))
self.ArV = -self.nT * self.gv * self.T - self.D * self.fv
return self.ArV
def dF_dVV(self):
'''
Segunda derivada de F con respecto al volumen Ecu. (74)
'''
self.gv2 = self.R * (1 / self.V ** 2 - 1 / (self.V - self.B) ** 2)
self.fv2 = (- 1 / (self.V + self.s1 * self.B) ** 2 + 1 / (self.V + self.s2 * self.B) ** 2) / self.B / (self.s1 - self.s2)
self.ArV2 = - self.nT * self.gv2 * self.T - self.D * self.fv2
return self.ArV2
def volumen_1(self):
'''
Calculo del volumen V(T,P,n) del fluido a una temperatura T, presión P
y número de moles totales nT especificados.
Se utiliza el método de Newton con derivada de la función analitica.
Pendiente cambiar por una función de Scipy.
'''
# self.P = P
self.V = 1.05 * self.B # SRK y PR
# self.V = 1.10 * self.B # RKPR
lnP = np.log(self.P)
# print ("P_esp = ", self.P)
# print ("V_ini = ", self.V)
Pite = self.presion()
lnPcal = np.log(Pite)
# h = self.P - Pite
h = lnP - lnPcal
errorEq = abs(h)
# print ("ErrorP = ", errorEq)
i = 0
s = 1.0
while errorEq > self.ep:
self.parametro_D()
self.parametro_B()
self.dF_dV()
self.dF_dVV()
dPite = self.dP_dV()
Pite = self.presion()
lnPcal = np.log(Pite)
# h = self.P - Pite
h = lnP - lnPcal
dh = -dPite
# print self.nT
self.V = self.V - s * h / dh
errorEq = abs(h)
# print "ErrorP = ", errorEq
# print "V = ", self.V
# print "Pite = ", Pite
i += 1
if i >= 900:
pass
# break
# print ("FV = ", dPite)
return self.V
def funcion_energia_F(self):
self.g = self.R * np.log(1 - self.B / self.V)
self.bv = self.B / self.V
self.f = np.log((self.V + self.s1 * self.B) / (self.V + self.s2 * self.B)) / self.B / (self.s1 - self.s2)
self.Ar = -self.nT * self.g * self.T - self.D * self.f
# print (("g = ", self.g))
# print (("f: ", self.f))
# print (("Ar: ", self.Ar))
return self.g, self.f, self.Ar, self.bv
def tomar_B(self):
# print ("tomando B =", self.B)
return self.B + 10
def derivadas_delta_1(self):
# DERIVATIVES OF f WITH RESPECT TO DELTA1
auxD2 = (1 + 2 / (1 + self.s1) ** 2)
# print("B delta1 = ", self.B)
como_1 = (1 / (self.V + self.s1 * self.B) + 2 / (self.V + self.s2 * self.B) / (1 + self.s1) ** 2)
como_2 = self.f * auxD2
self.fD1 = como_1 - como_2
# self.fD1 = (1 / (self.V + self.s1 * self.B) + 2 / (self.V + self.s2 * self.B) / (1 + self.s1) ** 2) - self.f * auxD2
self.fD1 = self.fD1/(self.s1 - self.s2)
return self.fD1
def primeras_derivadas1(self):
# print ("# compo = .......", self.nC)
if self.nC == 1:
AUX = self.R * self.T / (self.V - self.B)
self.fB = -(self.f + self.V * self.fv) / self.B
self.FFB = self.nT * AUX - self.D * self.fB
self.Di = 2 * self.nT * self.ac * self.alfa
self.Bi = self.bc
if self.eq != "RKPR":
self.Arn = -self.g * self.T + self.FFB * self.Bi - self.f * self.Di
else:
self.Arn = -self.g * self.T + self.FFB * self.Bi - self.f * self.Di - self.D * self.fD1 * self.dD1i
else:
# Derivando la ecuación (64) se obtiene la ecuación eq (106)
self.Bi = np.ones((len(self.ni)))
for i in range(self.nC):
self.Bi[i] = (2 * self.aux[i] - self.B) / self.nT
AUX = self.R * self.T / (self.V - self.B)
self.fB = -(self.f + self.V * self.fv) / self.B
self.FFB = self.nT * AUX - self.D * self.fB
if self.eq != "RKPR":
self.Arn = -self.g * self.T + self.FFB * self.Bi - self.f * self.Di
else:
# DERIVATIVES OF f WITH RESPECT TO DELTA1
auxD2 = (1 + 2 / (1 + self.s1) ** 2)
print("B delta1 = ", self.B)
co_1 = (1 / (self.V + self.s1 * self.B) + 2 / (self.V + self.s2 * self.B) / (1 + self.s1) ** 2)
co_2 = self.f * auxD2
self.fD1 = co_1 - co_2
# self.fD1 = (1 / (self.V + self.s1 * self.B) + 2 / (self.V + self.s2 * self.B) / (1 + self.s1) ** 2) - self.f * auxD2
self.fD1 = self.fD1/(self.s1 - self.s2)
self.Arn = -self.g * self.T + self.FFB * self.Bi - self.f * self.Di - self.D * self.fD1 * self.dD1i
# print("Bi = ", self.Bi)
# print ("Di = ", self.Di)
# print ("fB = ", self.fB)
# print ("FFB = ", self.FFB)
# print ("Arn cal = ", self.Arn)
return self.Arn, self.Arn, self.Arn
def coeficientes_fugacidad(self):
self.Z = self.Z_factor()
self.lnOi = self.Arn / (self.R * self.T) - np.log(self.Z)
self.Oi = np.exp(self.lnOi)
# print("lnOi = ", self.lnOi)
# print("Oi = ", self.Oi)
return self.Oi
def fugacidad(self):
self.Z = self.Z_factor()
self.Pxi = self.P_ideal()
self.lnFi = self.Arn / (self.R * self.T) - np.log(self.Z) + np.log(self.Pxi)
self.Fi = np.exp(self.lnFi)
self.PHILOG = self.Arn / (self.R * self.T) - np.log(self.Z)
self.PHILOG_i = self.Arn - np.log(self.Z)
self.FUGLOG = self.Arn / (self.R * self.T) + np.log(self.ni) + np.log((self.nT * self.R * self.T) / self.V)
# print ("Z = ", self.Z)
# print ("Arn = ", self.Arn)
# print ("Arn/RT = ", self.Arn / (self.R * self.T))
# print ("ln(ni) = ", np.log(self.ni))
# print ("ln (nT*R*T/V) = ", np.log((self.nT * self.R * self.T) / self.V))
# print ("lnFi = ", self.lnFi)
# print ("Fi = ", self.Fi)
# print ("PHILOG = ", self.PHILOG)
# print ("PHILOG_i = ", self.PHILOG_i)
# print ("FUGLOG = ", self.FUGLOG)
return self.Fi
def exp_sol(self):
'''
Este método calcula el factor de corrección de la fugacidad del
componente fluido para determinar la fugacidad del mismo componente
en estado sólido.
Fugacidad del sólido puro
fi_s(T, P) = fi_l(T, P) * EXP(T, P)
'''
Tfus = 323.75
# Temperatura de fusion de n-tetracosane
# Unidad de Ti_f en Kelvin
par_sol = np.array([[-176120.0, 8196.20, -55.911, 0.19357, -0.0002235],
[-1.66e6, 8.31e3, 0.0, 0.0, 0.0]])
par_liq = np.array([[423160.0, 1091.9, 0.0, 0.0, 0.0],
[7.01e5, 1.47e3, 0.0, 0.0, 0.0]])
#print ("par_sol", par_sol)
#print ("par_liq", par_liq)
# Las unidades de Cp están en J/Kmol.K
Cp_solido = par_sol[:, 0] + par_sol[:, 1] * T + par_sol[:, 2] * T ** 2 + par_sol[:, 3] * T ** 3 + par_sol[:, 4] * T ** 4
#print ("Cp_solido", Cp_solido)
Cp_liquido= par_liq[:, 0] + par_liq[:, 1] * T + par_liq[:, 2] * T ** 2 + par_liq[:, 3] * T ** 3 + par_liq[:, 4] * T ** 4
#print ("Cp_liquido", Cp_liquido)
DeltaCp = (Cp_solido - Cp_liquido) * (1.0 / 1000)
#print ("Delta Cp", DeltaCp)
#Unidades de Delta H de fusión en Kcal/mol
DeltaH_f = np.array([13.12, 21.23]) * (1000 / 1.0) * (4.18 / 1.0)
#print ("Delta H de fusion", DeltaH_f)
T_f = np.array([323.75, 349.05])
#print ("Temperaturas de fusion = ", T_f)
Rp = 8.314
A = (DeltaH_f / (Rp * Tfus)) * (1 - (Tfus / T))
B = (DeltaCp / Rp) * (1 - (Tfus / T))
C = (DeltaCp / Rp) * np.log(Tfus / T)
self.EXP = np.exp(A - B - C)
#print ("A = ", A)
#print ("B = ", B)
#print ("C = ", C)
#print ("EXP = ", self.EXP)
return self.EXP
def exp_sol_1(self):
'''
Este método calcula el factor de corrección de la fugacidad del
componente fluido para determinar la fugacidad del mismo componente
en estado sólido.
Fugacidad del sólido puro
fi_s(T, P) = fi_l(T, P) * EXP(T, P)
'''
Tpt = 323.75
Ppt = 1.38507E-8
R = 8.314472
AH = 54894000
Av = -0.0376300841 #m3/kmol
a = ((AH / (R * Tpt)) * (1 - (Tpt / self.T))) / 1000
b = ((Av / (R * self.T)) * (self.P - Ppt)) / 100
self.EXP_1 = a + b
return self.EXP_1
def exp_sol_3(self):
'''
Este método calcula el factor de corrección de la fugacidad del
componente fluido para determinar la fugacidad del mismo componente
en estado sólido.
Fugacidad del sólido puro
fi_s(T, P) = fi_l(T, P) * EXP(T, P)
'''
# [=] K
# [=] bar
# [m3 / Kmol]
# Constante R [=] 0.08314472 bar.l/(mol.K)
Tpt = 323.75
Ppt = 3.2015002E-8
Avsl = -0.0565500835
c1 = -14213.5004
c2 = 605153.4382
c3 = -591592.556
R = 0.08314472
A1 = c1 * (1 - Tpt / self.T)
A2 = c2 * (-1 + Tpt / self.T + np.log(self.T / Tpt))
A3 = c3 * (-1 + self.T / (2 * Tpt) + Tpt / (2 * self.T)) + (Tpt / self.T) * (self.P - Ppt)
FE = (Avsl / (R * self.T)) * (A1 + A2 + A3)
self.EXP_3 = np.exp(FE)
#EFE
return self.EXP_3
def fluido(self):
#self.P = P
#ab = self.parametros(self.ni, self.nT, self.nC, self.V, self.T)
ab = self.parametros()
#print (("aij = ", ab[0]))
#print (("bij = ", ab[1]))
#print ("................................................................")
D = self.parametro_D()
B = self.parametro_B()
#print (("D = ", D))
#print (("B = ", B))
#print ("................................................................")
Vol_1 = self.volumen_1()
#print (("Volumen_1 = ", Vol_1))
#print (("Densidad =", 1 / Vol_1))
#print ("................................................................")
F = self.funcion_energia_F()
#print (("g = ", F[0]))
#print (("f = ", F[1]))
#print (("F = ", F[2]))
#print (("bv = ", F[3]))
#print ("................................................................")
dF = self.primeras_derivadas1()
#print (("dFdni = ", dF[0]))
#print (("dFdT = ", dF[1]))
#print (("dFdV = ", dF[2]))
#print ("................................................................")
Z = self.Z_factor()
#print ("Z =", Z)
Zcal = (self.P * Vol_1) / (self.nT * self.R * self.T)
#print ("Zcal =", Zcal)
#print ("................................................................")
Pq = self.presion()
#print (("Pcal =", Pq))
#print ("................................................................")
self.Fug = self.fugacidad()
#print (("Fug = ", self.Fug[0]))
self.CoeFug = self.coeficientes_fugacidad()
#self.CoeFug = self.Fug / (self.ni * self.P)
#print (("Fug = ", self.Fug))
#print (("CoeFug = ", self.CoeFug))
#print (("lnCoeFug = ", np.log(self.Fug / (self.ni * self.P))))
#print ("................................................................")
return self.Fug
def solido(self):
if self.nC == 1:
Fug = self.fluido()
#EXP = self.exp_sol()
#EXP = self.exp_sol_1()
EXP = self.exp_sol_3()
#print("Exponente = ", EXP)
FugS = Fug[0] * EXP
#print ("FugS = ", FugS)
else:
print ("Aún no se qué hacer para una mezcla de sólidos !!!")
FugS = 1
return FugS
# In[17]:
def calculaFugacidad(x, Pe, nCf, eq, TcmDato, PcmDato, wmDAto):
#---------------------------------------------------------------------------
#print ("-"*100)
#itera +=
#print (itera)
# Temperatura en K
# Presión en bar
# Constante R [=] 0.08314472 bar.l/(mol.K)
# selección de la Ecuación de Estado,
# eq = 1, (SRK)
# eq = 2, (PR)
# ep = Criterio de convergencia del método def volumen_1(self, P)
T = x # 335.42 # x # 366.78 # 356.429 # 335.42 # 348.89 #327.0
#print("Temperatura = ", T)
P = Pe # 2575.0 # 2064.7 # 1524.4 #1164.2 # 865.0
# 560.3 # x #1054.6 #1560.3 # 2064.7 # 1524.4 # 560.3 # 1164.2 #865.0
R = 0.08314472
#eq = "PR"
ep = 1e-5#1e-6
#---------------------------------------------------------------------------
# metano - C24
#Tcm = np.array([190.564, 804.0])
#Pcm = np.array([45.99, 9.672])
#wm = np.array([0.0115478, 1.07102])
Tcm = TcmDato
Pcm = PcmDato
wcm = wcmDato
nC = nCf
if nC == 1:
#print ("...............................................................")
ni = np.array([1.0])
#print ("Número de moles = ", ni)
# C24
kij = 0.0
lij = 0.0
# Metano - Etano
delta_1 = np.array([0.85])
k = np.array([1.50758])
#----------------------------------------------------------------------
#C24
Tc = Tcm[1]
Pc = Pcm[1]
w = wcm[1]
#print ("Temperatura Critica = ", Tc, "K")
#print ("Presión Critica = ", Pc, "bar")
#print ("Factor Acentrico = ", w)
#print ("...............................................................")
elif nC == 2:
#print ("...............................................................")
# metano - C24
ni = np.array([1 - 0.901, 0.901])
#ni = np.array([1 - 0.104, 0.104])
#print ("Número de moles = ", ni)
kij = np.array([[0.000000, 0.083860],
[0.083860, 0.000000]])
kij = np.array([[0.000000, 0.059600],
[0.059600, 0.000000]])
lij = 0.0132
# Metano - C24
delta_1 = np.array([0.85, 2.40])
k = np.array([1.50758, 4.90224])
# metano sigma1 = 0.9253, sigma = 0.85, k = 1.49345, k = 1.50758
# C24 sigma = 2.40 k = 4.90224
Tc = Tcm
Pc = Pcm
w = wcm
#print ("Temperatura Critica = ", Tc, "K")
#print ("Presión Critica = ", Pc, "bar")
#print ("Factor Acentrico = ", w)
#print ("...............................................................")
# ---------------------------------------------------------------------------
# Tempertura reducidad
Tr = T / Tc
# C24 puro
V = 0.141604834257319
nT = np.sum(ni)
# print ("...................................................................")
fugacidad = Eos_equations(eq, w, Tc, Pc, Tr, R, ep, ni, nT, nC, V, T, P, kij, lij, delta_1, k)
#print(fugacidad.parametro_delta_1())
#print(fugacidad.ac)
#print(fugacidad.bc)
#print(fugacidad.parametros())
#print("D = ",fugacidad.parametro_D())
#print("B = ",fugacidad.parametro_B())
#print("tomar B = ", fugacidad.tomar_B())
#print("fD1 = ", fugacidad.derivadas_delta_1())
if nC == 1:
SOL = fugacidad.solido()
#print ("...................................................................")
#print ("FUG_SOLID = ", SOL)
#print ("lnFUG_SOLID = ", np.log(SOL))
#print ("...................................................................")
return SOL
else:
flu_1 = fugacidad.fluido()
return flu_1
# In[20]:
eq = "PR"
#TcmDato #= np.array([Temperatura_Critica_1, Temperatura_Critica_2])
#PcmDato #= np.array([Presion_Critica_1, Presion_Critica_2])
#wmDato #= np.array([Factor_Acentrico_1, Factor_Acentrico_2])
#TcmDato
#calculaFugacidad(335.2, 1034.4, 2, eq, TcmDato, PcmDato, wcmDato)
# In[21]:
t_exp = [323.65, 326.04, 326.43, 328.12, 329.45, 329.89, 333.43, 335.12,
340.19, 344.58, 346.65, 352.53, 362.45, 362.76, 371.82, 379.74]
temp = np.array(t_exp)
p_exp = [1, 101.0, 136.9, 183.8, 266.2, 266.8, 426.9, 480.3, 718.9, 912.5,
1010.6, 1277.8, 1778.0, 1825.1, 2323.4, 2736.1]
pres = np.array(p_exp)
pos = np.arange(len(pres))
Tcal = np.ones((len(pres)))
Tcal
# In[22]:
def equilibrioSF(x, Pe, n1, n2):
# fugacidad del sólido puro
FugS = calculaFugacidad(x, Pe, n1, eq, TcmDato, PcmDato, wcmDato)
#print(eq, TcmDato, PcmDato, wcmDato)
# fugacidad del fluido pesado en la mezcla fluida
FugF = calculaFugacidad(x, Pe, n2, eq, TcmDato, PcmDato, wcmDato)
# Función de igualdad de fugacidades del sólido y el fluido
eqSF = np.abs(np.abs(np.log(FugS)) - np.abs(np.log(FugF[1])))
#print ("-"*100)
#print ("ln(Fugacidad Sólido) = ", np.log(FugS))
#print ("ln(Fugacidad Fluido) = ", np.log(FugF[1]))
#print ("ln(Fugacidad Sólido) - ln(Fugacidad Fluido) = ", eqSF)
return eqSF
# T [=] K
guess = [346.5]
# Pe [=] bar
Pe = 136.9
dppr_file = "PureFull_mod.xls"
# component = 'METHANE'
# component = "ETHANE"
# component = "3-METHYLHEPTANE"
# component = "n-PENTACOSANE"
component = "ISOBUTANE"
components = ["METHANE", "n-TETRACOSANE"]
properties_data = pt.Data_parse()
component_eos_list = np.zeros( (len(components),4) )
dinputs = np.zeros( (len(components),4) )
for index, component in enumerate(components):
properties_component = properties_data.selec_component(dppr_file, component)
pt.print_properties_component(component, properties_component)
dinputs[index] = np.array([properties_component[1]['Tc'], properties_component[1]['Pc'],
properties_component[1]['Omega'], properties_component[1]['Vc']])
components_table = pd.DataFrame(component_eos_list, index=components, columns=['ac', 'b', 'rm', 'del1'])
print(dinputs)
TcmDato = dinputs[:, 0]
PcmDato = dinputs[:, 1]
wcmDato = dinputs[:, 2]
#Tcal = fsolve(equilibrioSF,guess,args=(Pe, 1, 2), xtol=1e-4)
#print(Tcal, "K")
# In[9]:
#button = widgets.Button(description="Sólido-Fluido")
#display(button)
nnCC_1 = 1
nnCC_2 = 2
def calcularSolidoFluido():
#clear_output()
#Tcal = fsolve(equilibrioSF,guess,args=(Pe, 1, 2), xtol=1e-4)
TemEquilibrioSF = fsolve(equilibrioSF,guess,args=(Pe, 1, 2), xtol=1e-4)
print ("-"*100)
print("Temperatura de Equilibrio SF = ", TemEquilibrioSF, "K")
#button.on_click(calcularSolidoFluido)
#display(button)
calcularSolidoFluido()
# In[23]:
#Tcal = np.ones()
for x, Pes, i in zip(temp, pres, pos):
print ("Temperatura inicial = ", x, "K", "Presión = ", Pes, "bar", "Datos experimental = ", i+1)
guess = x # T [=] K
Pe = Pes # [=] bar
# Tcal[i] = fsolve(equilibrioSF,guess,args=(Pe, 1, 2), xtol=1e-4)
Tcal[i] = fsolve(equilibrioSF,guess,args=(Pe, 1, 2), xtol=1e-4)
#ErrorT = np.abs(Tcal - temp)
#ErrorT
#print (Tcal, temp, ErrorT)
Tcal
# In[24]:
Tcal
# In[25]:
Tres = np.array([322.65861561, 324.91946742, 325.73456905, 326.80151121,
328.68045402, 328.69415114, 332.3526483 , 333.57248076,
338.99640222, 343.33723415, 345.50684642, 351.28742799,
361.49784425, 362.4145721 , 371.63445321, 378.63493779])
difference_temperature = Tcal - temp
# In[26]:
fig= pyplot.scatter(Tres,p_exp)
# In[27]:
def diagramaSolidoFluido():
pyplot.scatter(Tres,pres, color = 'red', label = 'PR')
pyplot.scatter(temp,pres, label = 'Datos')
pyplot.title('Temperatura Equilibrio sólido-Líquido')
pyplot.legend(loc="upper left")
pyplot.xlabel('Temperatura [=] K')
pyplot.ylabel('Presión [=] bar')
# diagramaSolidoFluido()
# In[30]:
C4 = 325.74196687
C5 = 325.74761488
C6 = 325.75368989
C7 = 325.75989348
C8 = 325.76613659
C9 = 325.76789332
C10 = 325.77033257
C11 = 325.77111651
C12 = 325.77254179
C13 = 325.773858
Ccomp = np.array([4, 5, 6, 7, 8, 9, 10, 11, 12, 13])
Tcomp = np.array([C4, C5, C6, C7, C8, C9, C10, C11, C12, C13])
Tcomp
# In[31]:
def DiagramaSerieSF():
pyplot.scatter(Ccomp,Tcomp, color = 'red', label = 'PR')
pyplot.title('Serie C4-C11 Temp. Equilibrio SF')
pyplot.legend(loc="upper left")
pyplot.xlabel('Número de Carbonos')
pyplot.ylabel('Temperatura [=] K')
#DiagramaSerieSF()
# In[27]:
def diagramaSolidoFluido():
pyplot.scatter(Tres,pres, color = 'red', label = 'PR')
pyplot.scatter(temp,pres, label = 'Datos')
pyplot.title('Temperatura Equilibrio sólido-Líquido')
pyplot.legend(loc="upper left")
pyplot.xlabel('Temperatura [=] K')
pyplot.ylabel('Presión [=] bar')
# diagramaSolidoFluido()
# In[30]:
C4 = 325.74196687
C5 = 325.74761488
C6 = 325.75368989
C7 = 325.75989348
C8 = 325.76613659
C9 = 325.76789332
C10 = 325.77033257
C11 = 325.77111651
C12 = 325.77254179
C13 = 325.773858
Ccomp = np.array([4, 5, 6, 7, 8, 9, 10, 11, 12, 13])
Tcomp = np.array([C4, C5, C6, C7, C8, C9, C10, C11, C12, C13])
def volumen_1(self):
self.V = 1.05 * self.B # SRK y PR
# self.V = 1.10 * self.B # RKPR
lnP = np.log(self.P)
Pite = self.presion()
lnPcal = np.log(Pite)
h = lnP - lnPcal
errorEq = abs(h)
i, s = 0, 1.0
while errorEq > self.ep:
self.parametro_D()
self.parametro_B()
self.dF_dV()
self.dF_dVV()
dPite = self.dP_dV()
Pite = self.presion()
lnPcal = np.log(Pite)
h = lnP - lnPcal
dh = -dPite
self.V = self.V - s * h / dh
errorEq = abs(h)
i += 1
if i >= 900:
pass
# break
return self.V
def volumen_1_cal(self):
self.V = 1.05 * self.B # SRK y PR
# self.V = 1.10 * self.B # RKPR
lnP = np.log(self.P)
Pite = self.presion()
lnPcal = np.log(Pite)
h = lnP - lnPcal
errorEq = abs(h)
i, s = 0, 1.0
while True:
self.parametro_D()
self.parametro_B()
self.dF_dV()
self.dF_dVV()
dPite = self.dP_dV()
Pite = self.presion()
lnPcal = np.log(Pite)
h = lnP - lnPcal
dh = -dPite
self.V = self.V - s * h / dh
errorEq = abs(h)
i += 1
if i >= 900 or errorEq > self.ep:
pass
# break
return self.V
volume_liquid = volumen_1_cal()
class properties_data(object):
"""docstring for properties_data"""
def __init__(self, arg):
super(properties_data, self).__init__()
self.arg = arg
| mit |
HeraclesHX/scikit-learn | examples/applications/plot_out_of_core_classification.py | 255 | 13919 | """
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <eustache@diemert.fr>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
###############################################################################
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
###############################################################################
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batchs of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
###############################################################################
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
#fig = plt.gcf()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
| bsd-3-clause |
navigator8972/ensemble_ioc | ensemble_ioc.py | 1 | 37900 | """
A module that implements the ensemble of inverse optimal control models
"""
import cPickle as cp
from collections import defaultdict
import copy
import numpy as np
import matplotlib.pyplot as plt
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn import decomposition
from sklearn.ensemble import RandomTreesEmbedding, RandomForestRegressor
from sklearn.linear_model import LinearRegression
# <hyin/Oct-23rd replace sklearn GMM as it starts deprecating since 0.18
# from sklearn import mixture
import gmr.gmr.gmm as gmm
from sklearn.cluster import SpectralClustering, KMeans, DBSCAN
import scipy.optimize as sciopt
import scipy.stats as sps
from scipy.misc import logsumexp
EPS = np.finfo(float).eps
class EnsembleIOC(BaseEstimator, RegressorMixin):
'''
Handling state/state pairs as input
'''
def __init__(self, n_estimators=20,
max_depth=5, min_samples_split=10, min_samples_leaf=10, clustering=0,
random_state=0,
em_itrs=5,
regularization=0.05,
passive_dyn_func=None,
passive_dyn_ctrl=None,
passive_dyn_noise=None,
verbose=False):
'''
n_estimators - number of ensembled models
... - a batch of parameters used for RandomTreesEmbedding, see relevant documents
clustering - whether or not to force the number of subset. If non-zero, call a clustering scheme with the learned metric
em_itrs - maximum number of EM iterations to take if one would like to increase the likelihood of the MaxEnt approximation
regularization - small positive scalar to prevent singularity of matrix inversion. This is especially necessary when passive dynamics
is considered. Notably, the underactuated system will assum zero covariance for uncontrolled state dimensions but this might not
not be the case in reality since the collected data could be corrupted by noises.
passive_dyn_func - function to evaluate passive dynamics; None for MaxEnt model
passive_dyn_ctrl - function to return the control matrix which might depend on the state...
passive_dyn_noise - covariance of a Gaussian noise; only applicable when passive_dyn is Gaussian; None for MaxEnt model
note this implies a dynamical system with constant input gain. It is extendable to have state dependent
input gain then we need covariance for each data point
verbose - output training information
'''
BaseEstimator.__init__(self)
self.n_estimators=n_estimators
self.max_depth=max_depth
self.min_samples_split=min_samples_split
self.min_samples_leaf=min_samples_leaf
self.clustering=clustering
self.random_state=random_state
self.em_itrs=em_itrs
self.reg=regularization
self.passive_dyn_func=passive_dyn_func
self.passive_dyn_ctrl=passive_dyn_ctrl
self.passive_dyn_noise=passive_dyn_noise
self.verbose=verbose
return
def predict(self, X):
n_samples, n_dim = X.shape
# use approximated GMM to capture the correlation, which provides us an initialization to iterate
# the MAP estimation
tmp_gmm = gmm.GMM( n_components=len(self.gmm_estimators_full_['weights']),
priors=np.array(self.gmm_estimators_full_['weights']),
means=np.array(self.gmm_estimators_full_['means']),
covariances=self.gmm_estimators_full_['covars'])
init_guess, init_covar = tmp_gmm.predict_with_covariance(indices=range(n_dim), X=X)
def objfunc(x, *args):
prior_mu, prior_inv_var = args
vals, grads = self.value_eval_samples_helper(np.array([x]), average=False, const=True)
prior_prob = .5*(x - prior_mu).dot(prior_inv_var).dot(x - prior_mu)
prior_grad = prior_inv_var.dot(x-prior_mu)
return vals[0] + prior_prob, grads[0] + prior_grad
res = []
for sample_idx in range(n_samples):
opt_res = sciopt.minimize( fun=objfunc,
x0=init_guess[sample_idx, :],
args=(init_guess[sample_idx, :], np.linalg.pinv(init_covar[sample_idx])),
method='BFGS',
jac=True,
options={'gtol': 1e-8, 'disp': False})
# print opt_res.message, opt_res.x,
# print opt_res.fun, opt_res.jac
# print init_guess[sample_idx, :], init_covar[sample_idx], opt_res.x
res.append(opt_res.x)
res = np.array(res)
return res
def _check_grads(self, X):
n_samples, n_dim = X.shape
# #predict the next state x_{t+1} given x_{t}
tmp_gmm = gmm.GMM( n_components=len(self.gmm_estimators_full_['weights']),
priors=np.array(self.gmm_estimators_full_['weights']),
means=np.array(self.gmm_estimators_full_['means']),
covariances=self.gmm_estimators_full_['covars'])
init_guess, init_covar = tmp_gmm.predict_with_covariance(indices=range(n_dim), X=X)
def objfunc(x, *args):
prior_mu, prior_var = args
vals, grads = self.value_eval_samples_helper(np.array([x]), average=False, const=True)
prior_prob = .5*(x - prior_mu).dot(prior_var).dot(x - prior_mu)
prior_grad = prior_var.dot(x-prior_mu)
return vals[0] + prior_prob, grads[0] + prior_grad
res = []
for sample_idx in range(n_samples):
def check_grad_fun(x):
return objfunc(x, init_guess[sample_idx, :], init_covar[sample_idx])[0]
def check_grad_fun_jac(x):
return objfunc(x, init_guess[sample_idx, :], init_covar[sample_idx])[1]
res.append(sciopt.check_grad(check_grad_fun, check_grad_fun_jac, X[sample_idx, :]))
return np.mean(res)
def fit(self, X, y=None):
'''
X - an array of concatenated features X_i = (x_{t-1}, x_{t}) corresponding to the infinite horizon case
'''
#check parameters...
assert(type(self.n_estimators)==int)
assert(self.n_estimators > 0)
assert(type(self.max_depth)==int)
assert(self.max_depth > 0)
assert(type(self.min_samples_split)==int)
assert(self.min_samples_split > 0)
assert(type(self.min_samples_leaf)==int)
assert(self.min_samples_leaf > 0)
assert(type(self.em_itrs)==int)
n_samples, n_dims = X.shape
#an initial partitioning of data with random forest embedding
self.random_embedding_mdl_ = RandomTreesEmbedding(
n_estimators=self.n_estimators,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
random_state=self.random_state
)
#we probably do not need the data type to differentiate it is a demonstration
#of trajectory or commanded state, do we?
if self.passive_dyn_func is not None and self.passive_dyn_ctrl is not None and self.passive_dyn_noise is not None:
# self.random_embedding_mdl_.fit(X[:, X.shape[1]/2:])
# indices = self.random_embedding_mdl_.apply(X[:, X.shape[1]/2:])
self.random_embedding_mdl_.fit(X[:, :X.shape[1]/2])
indices = self.random_embedding_mdl_.apply(X[:, :X.shape[1]/2])
# X_tmp = np.array(X)
# X_tmp[:, X.shape[1]/2:] = X_tmp[:, X.shape[1]/2:] - X_tmp[:, :X.shape[1]/2]
# self.random_embedding_mdl_.fit(X_tmp)
# indices = self.random_embedding_mdl_.apply(X_tmp)
else:
self.random_embedding_mdl_.fit(X)
#figure out indices
indices = self.random_embedding_mdl_.apply(X)
#prepare ensemble for prediction
self.random_prediction_mdl_ = RandomForestRegressor(
n_estimators=self.n_estimators,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
random_state=self.random_state
)
self.random_prediction_mdl_.fit(X[:, :X.shape[1]/2], X[:, X.shape[1]/2:])
if self.clustering > 0:
#we need to force the data to situate in clusters with the given number and the random embeddings
#first construct affinity
#use extracted indices as sparse features to construct an affinity matrix
if self.n_estimators > 1:
if self.verbose:
print 'Building {0} subset of data depending on their random embedding similarity...'.format(self.clustering)
#it makes sense to use the random embedding to do the clustering if we have ensembled features
aff_mat = _affinity_matrix_from_indices(indices, 'binary')
#using spectral mapping (Laplacian eigenmap)
self.cluster = SpectralClustering(n_clusters=self.clustering, affinity='precomputed')
self.cluster.fit(aff_mat)
else:
if self.verbose:
print 'Building {0} subset of data depending on their Euclidean similarity...'.format(self.clustering)
#otherwise, use euclidean distance, this should be enough when the state space is low dimensional
self.cluster = KMeans(n_clusters=self.clustering, max_iter=200, n_init=5)
self.cluster.fit(X)
partitioned_data = defaultdict(list)
leaf_idx = defaultdict(set)
weight_idx = defaultdict(float)
for d_idx, d, p_idx in zip(range(len(X)), X, self.cluster.labels_):
partitioned_data[0, p_idx].append(d)
leaf_idx[0] |= {p_idx}
for p_idx in range(self.clustering):
weight_idx[0, p_idx] = 1./self.clustering
num_estimators = 1
else:
partitioned_data = defaultdict(list)
leaf_idx = defaultdict(set)
weight_idx = defaultdict(float)
#group data belongs to the same partition and have the weights...
#is weight really necessary for EM steps? Hmm, seems to be for the initialization
#d_idx: data index; p_idx: partition index (comprised of estimator index and leaf index)
for d_idx, d, p_idx in zip(range(len(X)), X, indices):
for e_idx, l_idx in enumerate(p_idx):
partitioned_data[e_idx, l_idx].append(d)
leaf_idx[e_idx] |= {l_idx}
for e_idx, l_idx in enumerate(p_idx):
weight_idx[e_idx, l_idx] = float(len(partitioned_data[e_idx, l_idx])) / len(X)
# weight_idx[e_idx, l_idx] = 1. / len(p_idx)
num_estimators = self.n_estimators
#for each grouped data, solve an easy IOC problem by assuming quadratic cost-to-go function
#note that, if the passive dynamics need to be learned, extra steps is needed to train a regressor with weighted data
#otherwise, just a simply gaussian for each conditional probability distribution model
self.estimators_ = []
#another copy to store the parameters all together, for EM/evaluation on all of the models
self.estimators_full_ = defaultdict(list)
#<hyin/Feb-6th-2016> an estimator and leaf indexed structure to record the passive likelihood of data...
passive_likelihood_dict = defaultdict(list)
for e_idx in range(num_estimators):
#for each estimator
estimator_parms = defaultdict(list)
for l_idx in leaf_idx[e_idx]:
if self.verbose:
print 'Processing {0}-th estimator and {1}-th leaf/partition...'.format(e_idx, l_idx)
#and for each data partition
data_partition=np.array(partitioned_data[e_idx, l_idx])
estimator_parms['means'].append(np.mean(data_partition, axis=0))
estimator_parms['covars'].append(np.cov(data_partition.T) + np.eye(data_partition.shape[1])*self.reg)
#for MaxEnt, uniform passive likelihood
passive_likelihood_dict[e_idx, l_idx] = np.ones(len(data_partition)) / float(len(data_partition))
estimator_parms['weights'].append(weight_idx[e_idx, l_idx])
self.estimators_.append(estimator_parms)
#can stop here or go for expectation maximization for each estimator...
if self.em_itrs > 0:
#prepare em results for each estimator
em_res = [self._em_steps(e_idx, X, y) for e_idx in range(num_estimators)]
self.estimators_ = em_res
#record the gmm approximation
self.gmm_estimators_ = copy.deepcopy(self.estimators_)
self.gmm_estimators_full_ = defaultdict(list)
for est in self.estimators_:
for comp_idx in range(len(est['weights'])):
est['means'][comp_idx] = est['means'][comp_idx][(n_dims/2):]
est['covars'][comp_idx] = est['covars'][comp_idx][(n_dims/2):, (n_dims/2):]
self.estimators_full_['weights'].append(est['weights'][comp_idx]/float(num_estimators))
#for full estimators
self.estimators_full_['means'].append(est['means'][comp_idx])
self.estimators_full_['covars'].append(est['covars'][comp_idx])
if self.passive_dyn_func is not None and self.passive_dyn_ctrl is not None and self.passive_dyn_noise is not None:
X_new = X[:, X.shape[1]/2:]
X_old = X[:, 0:X.shape[1]/2]
#merge the model knowledge if passive dynamics model is available, use MaxEnt assumption otherwise
X_new_passive = np.array([self.passive_dyn_func(X_old[sample_idx]) for sample_idx in range(X.shape[0])])
passive_likelihood = _passive_dyn_likelihood(X_new, X_new_passive, self.passive_dyn_noise, self.passive_dyn_ctrl, self.reg)
weights = passive_likelihood / (np.sum(passive_likelihood) + self.reg)
if np.sum(weights) < 1e-10:
weights = 1./len(weights) * np.ones(len(weights))
#a GMM as a MaxEnt surrogate
tmp_gmm = gmm.GMM( n_components=len(self.estimators_[0]['weights']),
priors=self.estimators_[0]['weights'],
means=self.estimators_[0]['means'],
covariances=self.estimators_[0]['covars'])
for e_idx in range(num_estimators):
tmp_gmm.n_components = len(self.estimators_[e_idx]['weights'])
tmp_gmm.priors = self.estimators_[e_idx]['weights']
tmp_gmm.means = self.estimators_[e_idx]['means']
tmp_gmm.covariances = self.estimators_[e_idx]['covars']
responsibilities = tmp_gmm.to_responsibilities(X_new)
responsibilities = responsibilities / (np.sum(responsibilities, axis=0) + 1e-10)
new_weights = (weights * responsibilities.T).T
new_weights = (new_weights + 1e-10) / (np.sum(new_weights +1e-10, axis=0))
weighted_means = [np.sum((new_weight*X_new.T).T, axis=0) for new_weight in new_weights.T]
weighted_covars =[ _frequency_weighted_covariance(X_new, weighted_mean, new_weight, spherical=False)
for new_weight, weighted_mean in zip(new_weights.T, weighted_means)]
self.estimators_[e_idx]['means'] = weighted_means
self.estimators_[e_idx]['covars'] = weighted_covars
self.prepare_inv_and_constants()
return indices, leaf_idx, partitioned_data, passive_likelihood_dict
def _em_steps(self, estimator_idx, X, y=None):
#use current estimation as initialization to perform expectation-maximization
#now reuse the procedure implemented by scikit-learn, actually a costumized implementation
#is required if the passive dynamics also needs to be learned.
if self.verbose:
if estimator_idx is not None:
print 'EM steps for the estimator {0}'.format(estimator_idx)
else:
print 'EM steps...'
if estimator_idx is not None:
n_partitions=len(self.estimators_[estimator_idx]['weights'])
if self.verbose:
print 'num of partitions:', n_partitions
#use our own initialization
g = gmm.GMM(n_components=n_partitions, priors=np.array(self.estimators_[estimator_idx]['weights']),
means=np.array(self.estimators_[estimator_idx]['means']),
covariances=np.array(self.estimators_[estimator_idx]['covars']),
n_iter=self.em_itrs,
covariance_type='full')
else:
n_partitions=len(self.estimators_full_['weights'])
g = mixture.GaussianMixture(n_components=n_partitions, priors=np.array(self.estimators_[estimator_idx]['weights']),
means=np.array(self.estimators_[estimator_idx]['means']),
covariances=np.array(self.estimators_[estimator_idx]['covars']),
n_iter=self.em_itrs,
covariance_type='full')
# g.fit(X[:, (X.shape[1]/2):])
g.fit(X)
#prepare to return a defaultdict
res=defaultdict(list)
res['means']=list(g.means)
res['covars']=list(g.covariances)
res['weights']=list(g.priors)
return res
def sample(self, n_samples=1, random_state=None):
'''
return samples that are synthesized from the model
'''
if not hasattr(self, 'estimators_'):
print 'The model has not been trained yet...'
return
else:
pass
return
def score(self, X, y=None):
return self.value_eval_samples(X, y, False, True)
def value_eval_samples(self, X, y=None, average=False, const=True):
scores, grads = self.value_eval_samples_helper(X, y, average, const)
return scores
def value_eval_samples_helper(self, X, y=None, average=False, const=True):
n_samples, n_dim = X.shape
grads = np.zeros((n_samples, n_dim))
if self.clustering > 0:
num_estimators = 1
else:
num_estimators = self.n_estimators
if not average:
res = np.zeros(X.shape[0])
res_mat = np.zeros((X.shape[0], len(self.estimators_full_['means'])))
res_grad_tmp = []
for i, (m, c_inv) in enumerate( zip(self.estimators_full_['means'],
self.estimators_full_['inv_covars'])):
diff_data = X - m
res_mat[:, i] = np.array([e_prod.dot(e)*0.5 + self.estimators_full_['beta'][i]*const for e_prod, e in zip(diff_data.dot(c_inv), diff_data)])
res_grad_tmp.append(c_inv.dot(diff_data.T).T)
for d_idx, r in enumerate(res_mat):
res[d_idx] = -logsumexp(-r, b=np.array(self.estimators_full_['weights']))
resp = ((np.exp(-res_mat)*np.array(self.estimators_full_['weights'])).T / np.exp(-res)).T
for e_idx in range(res_mat.shape[1]):
grads += (res_grad_tmp[e_idx].T * resp[:, e_idx]).T
else:
def value_estimator_eval(d, est_idx):
res = []
for i, (m, c_inv) in enumerate( zip(self.estimators_[est_idx]['means'],
self.estimators_[est_idx]['inv_covars'])):
diff_data = d - m
res.append((.5*diff_data.dot(c_inv).dot(diff_data.T) + self.estimators_[est_idx]['beta'][i]*const)[0])
return np.array(res).T
def value_estimator_grad(d, est_idx, val):
res_grad = 0
for i, (m, c_inv) in enumerate( zip(self.estimators_[est_idx]['means'],
self.estimators_[est_idx]['inv_covars'])):
diff_data = d - m
resp = np.exp(-(.5*diff_data.dot(c_inv).dot(diff_data.T) + self.estimators_[est_idx]['beta'][i]*const)[0]) * self.estimators_[est_idx]['weights'][i]
grad_comp = c_inv.dot(diff_data.T).T
res_grad += (grad_comp.T * (resp / np.exp(-val))).T
return res_grad
res = np.array([-logsumexp(-value_estimator_eval(X, idx), axis=1, b=self.estimators_[idx]['weights']) for idx in range(num_estimators)]).T
res_grad = [value_estimator_grad(X, idx, res[:, idx]) for idx in range(num_estimators)]
res = np.mean(res, axis=1)
grads = np.mean(res_grad, axis=0)
return res, grads
def prepare_inv_and_constants(self):
'''
supplement steps to prepare inverse of variance matrices and constant terms
'''
regularization = self.reg
if self.clustering > 0:
num_estimators = 1
else:
num_estimators = self.n_estimators
for idx in range(num_estimators):
self.estimators_[idx]['inv_covars'] = [ np.linalg.pinv(covar + np.eye(covar.shape[0])*regularization) for covar in self.estimators_[idx]['covars']]
self.estimators_[idx]['beta'] = [.5*np.log(pseudo_determinant(covar + np.eye(covar.shape[0])*regularization)) + .5*np.log(2*np.pi)*covar.shape[0] for covar in self.estimators_[idx]['covars']]
self.estimators_full_['weights'] = []
self.estimators_full_['means'] = []
self.estimators_full_['covars'] = []
self.gmm_estimators_full_['weights'] = []
self.gmm_estimators_full_['means'] = []
self.gmm_estimators_full_['covars'] = []
for e_idx in range(num_estimators):
for leaf_idx in range(len(self.estimators_[e_idx]['weights'])):
self.estimators_full_['weights'].append(self.estimators_[e_idx]['weights'][leaf_idx]/float(num_estimators))
self.estimators_full_['covars'].append(self.estimators_[e_idx]['covars'][leaf_idx])
self.estimators_full_['means'].append(self.estimators_[e_idx]['means'][leaf_idx])
self.estimators_full_['inv_covars'].append(self.estimators_[e_idx]['inv_covars'][leaf_idx])
self.estimators_full_['beta'].append(self.estimators_[e_idx]['beta'][leaf_idx])
self.gmm_estimators_full_['weights'].append(self.gmm_estimators_[e_idx]['weights'][leaf_idx]/float(num_estimators))
self.gmm_estimators_full_['covars'].append(self.gmm_estimators_[e_idx]['covars'][leaf_idx])
self.gmm_estimators_full_['means'].append(self.gmm_estimators_[e_idx]['means'][leaf_idx])
return
from scipy import linalg
def pseudo_determinant(S, thres=1e-3, min_covar=1.e-7):
n_dim = S.shape[0]
try:
S_chol = linalg.cholesky(S, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
S_chol = linalg.cholesky(S + min_covar * np.eye(n_dim),
lower=True)
S_chol_diag = np.diag(S_chol)
return np.prod(S_chol_diag[S_chol_diag>thres]) ** 2
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""
Log probability for full covariance matrices.
A shameless copy from scikit-learn
"""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
cv_log_det = 2 * np.sum(np.log(np.diag(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _passive_dyn_likelihood_helper(X_new, X_new_passive, passive_dyn_noise, passive_dyn_ctrl, reg=1e-5):
#regularized sigma
log_grads = np.zeros(X_new.shape)
sigma = passive_dyn_noise*passive_dyn_ctrl + reg*np.eye(X_new.shape[1])
#<hyin/Feb-9th-2016> slightly modify the sequence to prevent potential overflow issue
denom = ((2*np.pi)**(X_new.shape[1]/2.0))*np.linalg.det(sigma)**.5
err = X_new - X_new_passive
err_prod = err.dot(np.linalg.pinv(sigma))
quad_term = np.array([e.dot(ep) for e, ep in zip(err, err_prod)])
num = np.exp(-.5*quad_term)
log_likelihood = -.5*quad_term - np.log(denom)
log_grads = -err_prod
return num/denom, log_likelihood, log_grads
def _passive_dyn_likelihood(X_new, X_new_passive, passive_dyn_noise, passive_dyn_ctrl, reg=1e-5):
likelihoods, _, _ = _passive_dyn_likelihood_helper(X_new, X_new_passive, passive_dyn_noise, passive_dyn_ctrl, reg)
return likelihoods
def _frequency_weighted_covariance(X, m, weights, spherical=False):
coeff = np.array(weights) / np.sum(weights)
if spherical:
#need numpy 1.9
diff_data = np.linalg.norm(X - m, axis=1)
sigma = np.sum(coeff * diff_data)
covar = sigma * np.eye(len(m))
else:
diff_data = X - m
covar = (coeff*diff_data.T).dot(diff_data)
#need numpy 1.10
# covar = np.cov(X, aweights=weights)
return covar
def _stratified_weighted_covariance(X, m, weights):
coeff = np.array(weights) / np.sum(weights)
norm_coeff = 1./ (1. - np.sum(coeff**2))
covar = np.zeros((X.shape[1], X.shape[1]))
for j in range(covar.shape[0]):
for k in range(covar.shape[1]):
covar[j, k] = np.sum([c*(d[j]-m[j])*(d[k]-m[k]) for c, d in zip(coeff, X)])
return covar
def _affinity_matrix_from_indices(indices, metric='binary', param=1.0):
#input is an array of data represented by sparse encoding
if metric == 'binary':
#binary metric is parm free
aff_op = lambda a, b: np.mean([int(ind_a==ind_b) for ind_a, ind_b in zip(a, b)])
elif metric == 'gaussian':
aff_op = lambda a, b: np.mean([np.exp(-(a-b).dot(a-b)*param) if ind_a==ind_b else 0 for ind_a, ind_b in zip(a, b)])
elif metric == 'mahalanobis':
aff_op = lambda a, b: np.mean([np.exp(-param.dot(a-b).dot(a-b)) if ind_a==ind_b else 0 for ind_a, ind_b in zip(a, b)])
else:
aff_op = None
if aff_op is not None:
n_samples = indices.shape[0]
aff_mat = [[aff_op(indices[i], indices[j]) for j in range(n_samples)] for i in range(n_samples)]
else:
print 'Invalid metric specified.'
aff_mat = None
return aff_mat
class EnsembleIOCTraj(BaseEstimator, RegressorMixin):
'''
Handling the entire trajectories as the input
'''
def __init__(self, traj_clusters=3, ti=True,
n_estimators=20,
max_depth=5, min_samples_split=10, min_samples_leaf=10, state_n_estimators=100, state_n_clusters=0,
random_state=0,
em_itrs=5,
regularization=0.05,
passive_dyn_func=None,
passive_dyn_ctrl=None,
passive_dyn_noise=None,
verbose=False):
'''
traj_clusters - number of clusters of trajectories
ti - whether or not to extract time invariant states
***The remained parameters are for the state ioc estimators***
n_estimators - number of ensembled models
... - a batch of parameters used for RandomTreesEmbedding, see relevant documents
state_n_estimators - number of state estimators
state_n_clusters - number of clusters for states for each trajectory group
em_itrs - maximum number of EM iterations to take
regularization - small positive scalar to prevent singularity of matrix inversion
passive_dyn_func - function to evaluate passive dynamics; None for MaxEnt model
passive_dyn_ctrl - function to return the control matrix which might depend on the state...
passive_dyn_noise - covariance of a Gaussian noise; only applicable when passive_dyn is Gaussian; None for MaxEnt model
note this implies a dynamical system with constant input gain. It is extendable to have state dependent
input gain then we need covariance for each data point
verbose - output training information
'''
self.n_traj_clusters = traj_clusters
if isinstance(state_n_clusters, int):
state_clusters_lst = [state_n_clusters] * self.n_traj_clusters
else:
state_clusters_lst = state_n_clusters
self.eioc_mdls = [ EnsembleIOC( n_estimators=state_n_estimators,
max_depth=max_depth, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, clustering=state_n_clusters, #let random embedding decides how many clusters we should have
random_state=random_state,
em_itrs=em_itrs,
regularization=regularization,
passive_dyn_func=passive_dyn_func,
passive_dyn_ctrl=passive_dyn_ctrl,
passive_dyn_noise=passive_dyn_noise,
verbose=verbose) for i in range(self.n_traj_clusters) ]
self.ti = ti
self.n_estimators=n_estimators
self.max_depth=max_depth
self.min_samples_split=min_samples_split
self.min_samples_leaf=min_samples_leaf
self.random_state=random_state
self.state_n_estimators = state_n_estimators
self.state_n_clusters = state_n_clusters
self.em_itrs=em_itrs
self.reg=regularization
self.passive_dyn_func=passive_dyn_func
self.passive_dyn_ctrl=passive_dyn_ctrl
self.passive_dyn_noise=passive_dyn_noise
self.verbose=verbose
self.clustered_trajs = None
return
def cluster_trajectories(self, trajs):
#clustering the trajectories according to random embedding parameters and number of clusters
#flatten each trajectories
flattened_trajs = np.array([np.array(traj).T.flatten() for traj in trajs])
#an initial partitioning of data with random forest embedding
self.random_embedding_mdl_ = RandomTreesEmbedding(
n_estimators=self.n_estimators,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
random_state=self.random_state
)
self.random_embedding_mdl_.fit(flattened_trajs)
#figure out indices
indices = self.random_embedding_mdl_.apply(flattened_trajs)
#we need to force the data to situate in clusters with the given number and the random embeddings
#first construct affinity
#use extracted indices as sparse features to construct an affinity matrix
if self.verbose:
print 'Building {0} subset of trajectories depending on their random embedding similarity...'.format(self.n_traj_clusters)
aff_mat = _affinity_matrix_from_indices(indices, 'binary')
#using spectral mapping (Laplacian eigenmap)
self.cluster = SpectralClustering(n_clusters=self.n_traj_clusters, affinity='precomputed')
self.cluster.fit(aff_mat)
clustered_trajs = [[] for i in range(self.n_traj_clusters)]
for d_idx, d, p_idx in zip(range(len(trajs)), trajs, self.cluster.labels_):
clustered_trajs[p_idx].append(d)
#let's see how the DBSCAN works
#here it means at least how many trajectories do we need to form a cluster
#dont know why always assign all of the data as noise...
# self.cluster = DBSCAN(eps=0.5, min_samples=self.n_traj_clusters, metric='euclidean', algorithm='auto')
# flatten_trajs = [traj.T.flatten() for traj in trajs]
# self.cluster.fit(flatten_trajs)
# labels = self.cluster.labels_
# print labels
# # Number of clusters in labels, ignoring noise if present.
# n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
#
# clustered_trajs = [[] for i in range(n_clusters_)]
#
# for d_idx, d, p_idx in zip(range(len(trajs)), trajs, labels):
# clustered_trajs[p_idx].append(d)
return np.array(clustered_trajs)
def fit(self, X, y=None):
'''
X is an array of trajectories
'''
#first cluster these trajectories to locally similar data sets (here 'locally' does not necessarily mean euclidean distance)
clustered_trajs = self.cluster_trajectories(X)
for i in range(len(clustered_trajs)):
#for each clustered trajectories train the sub eioc model
#reform the trajectories if necessary
if not self.ti:
#time varing system, just flatten them
flattened_trajs = [ np.array(traj).T.flatten() in clustered_trajs[i]]
self.eioc_mdls[i].clustering=1
self.eioc_mdls[i].fit(flattened_trajs)
#note the fit model retains mean and covariance of the flattened trajectories
else:
#time invariant
aug_states = []
for traj in clustered_trajs[i]:
for t_idx in range(len(traj)-1):
aug_states.append(np.array(traj)[t_idx:t_idx+2, :].flatten())
self.eioc_mdls[i].fit(np.array(aug_states))
self.clustered_trajs = clustered_trajs
return
def score(self, X, gamma=1.0, average=False):
#score a query state
if self.clustered_trajs is not None:
#the model ensemble has been trained
# score_ensemble = [np.array(model.score(X)[0]) for model in self.eioc_mdls]
score_ensemble = [np.array(model.value_eval_samples(X,average=average)) for model in self.eioc_mdls]
#average (maximum likelihood) or logsumexp (softmaximum -> maximum posterior)
if gamma is None:
res = np.mean(score_ensemble, axis=0)
else:
# mdl_eval = lambda scores: [logsumexp(x_score) for x_score in scores]
res = np.array([-logsumexp(-gamma*np.array([score[sample_idx] for score in score_ensemble])) for sample_idx, sample in enumerate(X)])
return res
def EnsembleIOCTest():
''''
A test to try modeling the occurences of state visiting
Use the scikit-learn example
'''
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = 1.5*np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
np.random.shuffle(X_train)
model=EnsembleIOC(n_estimators=1, max_depth=3, min_samples_split=10, min_samples_leaf=10, clustering=2,
random_state=10,
em_itrs=0)
#learn
indices, leaf_idx, partitioned_data, passive_likelihood_dict = model.fit(X_train)
# print len(model.estimators_)
# print model.estimators_[0]['means']
# print model.estimators_[0]['covars']
# print model.estimators_[0]['weights']
#visualize the data and heating map
xmin=-20;xmax=30
ymin=-20;ymax=40
fig=plt.figure()
ax=fig.add_subplot(111)
ax.scatter(X_train[:, 0], X_train[:, 1], .8)
ax.hold(True)
ax.set_xlim([xmin, xmax])
ax.set_ylim([ymin, ymax])
#evaluate testing points
grid_dim1=np.linspace(xmin, xmax)
grid_dim2=np.linspace(ymin, ymax)
Sgrid=np.meshgrid(grid_dim1, grid_dim2)
states = np.array([np.reshape(dim, (1, -1))[0] for dim in Sgrid])
costs, _=model.score(states.T)
pcol = ax.pcolormesh(grid_dim1, grid_dim2,
np.reshape(costs, (len(grid_dim1), len(grid_dim2))),
shading='none')
pcol.set_edgecolor('face')
colors = ['b','w']
for idx, c in enumerate(colors):
pnts = np.array(partitioned_data[0, idx])
ax.plot(pnts[:, 0], pnts[:, 1], '*', color=c)
mean = model.estimators_[0]['means'][idx]
ax.plot([mean[0]], [mean[1]], 'o', markersize=24, color=c)
print mean, model.estimators_[0]['covars'][idx]
plt.show()
return
if __name__ == '__main__':
EnsembleIOCTest()
| bsd-2-clause |
eickenberg/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 24 | 1384 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
import matplotlib.pyplot as plt
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
censusreporter/census-pandas | util.py | 1 | 2251 | import pandas as pd
import requests
API_URL="http://api.censusreporter.org/1.0/data/show/{release}?table_ids={table_ids}&geo_ids={geoids}"
def get_data(tables=None, geoids=None, release='latest'):
if geoids is None:
geoids = ['040|01000US']
elif isinstance(geoids,basestring):
geoids = [geoids]
if tables is None:
tables = ['B01001']
elif isinstance(tables,basestring):
tables=[tables]
url = API_URL.format(table_ids=','.join(tables).upper(),
geoids=','.join(geoids),
release=release)
response = requests.get(url)
return response.json()
def get_dataframe(tables=None, geoids=None, release='latest',geo_names=False,col_names=False,include_moe=False):
response = get_data(tables=tables,geoids=geoids,release=release)
frame = pd.DataFrame.from_dict(prep_for_pandas(response['data'],include_moe),orient='index')
frame = frame[sorted(frame.columns.values)] # data not returned in order
if geo_names:
geo = pd.DataFrame.from_dict(response['geography'],orient='index')
frame.insert(0,'name',geo['name'])
if col_names:
d = {}
for table_id in response['tables']:
columns = response['tables'][table_id]['columns']
for column_id in columns:
d[column_id] = columns[column_id]['name']
frame = frame.rename(columns=d)
return frame
def prep_for_pandas(json_data,include_moe=False):
"""Given a dict of dicts as they come from a Census Reporter API call, set it up to be amenable to pandas.DataFrame.from_dict"""
result = {}
for geoid, tables in json_data.items():
flat = {}
for table,values in tables.items():
for kind, columns in values.items():
if kind == 'estimate':
flat.update(columns)
elif kind == 'error' and include_moe:
renamed = dict((k+"_moe",v) for k,v in columns.items())
flat.update(renamed)
result[geoid] = flat
return result
if __name__ == '__main__':
df = get_dataframe()
print "Top 10 most populous states"
print df.sort('B01001001',ascending=False)['B01001001'].head(10)
| mit |
r-owen/TUI | TUI/Base/BaseFocusScript.py | 1 | 78636 | """A basic focus script for slitviewers
(changes will be required for gcam and instruments).
Subclass for more functionality.
Take a series of exposures at different focus positions to estimate best focus.
Note:
- The script runs in two phases:
1) If a slitviewer:
Move the boresight and take an exposure. Then pause.
The user is expected to acquire a suitable star before resuming.
Once this phase begins (i.e. once you start the script)
changes to boresight offset are ignored.
Other imagers:
Take an exposure and look for the best centroidable star. Then pause.
The user is expected to acquire a suitable star before resuming.
2) Take the focus sweep.
Once this phase begins all inputs are ignored.
History:
2006-11-07 ROwen From DIS:Focus, which was from NICFPS:Focus.
2006-11-09 ROwen Removed use of plotAxis.autoscale_view(scalex=False, scaley=True)
since it was not compatible with older versions of matplotlib.
Stopped using float("nan") since it doesn't work on all pythons.
Modified to always pause before the focus sweep.
Modified to window the exposure.
2006-11-13 ROwen Modified to have user set center focus and range.
Added Expose and Sweep buttons.
2006-12-01 ROwen Refactored to make it easier to use for non-slitviewers:
- Added waitFocusSweep method.
- Modified to use focPosFWHMList instead of two lists.
Improved sanity-checking the best focus fit.
Created SlitviewerFocusScript and OffsetGuiderFocusScript classes;
the latter is not yet fully written.
2006-12-08 ROwen More refactoring. Created ImagerFocusScript class.
Needs extensive testing.
2006-12-13 ROwen Added Find button and changed Centroid to Measure.
Data is always nulled at start of sweep. This is much easier than
trying to be figure out when I can safely keep existing data.
Fit error is logged.
Fit is logged and graphed even if fit is rejected (unless fit is a maximum).
Changed from Numeric to numarray to avoid a bug in matplotlib 0.87.7
Changed test for max fit focus error to a multiple of the focus range.
2006-12-28 ROwen Bug fix: tried to send <inst>Expose time=<time> bin=<binfac>
command for imaging instruments. The correct command is:
<inst>Expose object time=<time>.
Noted that bin factor and window must be configured via special
instrument-specific commands.
ImagerFocusScript no longer makes use of windowing (while centroiding),
though a subclass could do so.
2006-12-28 ROwen ImagerFocusScript.waitExpose now aborts the exposure if the script is aborted.
This change did not get into TUI 1.3a11. Note that this fix only applies to imaging
instruments; there is not yet any documented way to abort a guider exposure.
2007-01-02 ROwen Fixed a bug in waitExpose: <inst> <inst>Expose -> <instExpose>.
Fixed a bug in waitFindStar: centroidRad used but not supplied.
Improved help text for Star Pos entry widgets.
2007-01-03 ROwen Bug fixes:
- Used sr instead of self.sr in two places.
- ImagerFocusScript.getCentroidArgs returned bad
starpos due to wanting to window.
- ImagerFocusScript.waitCentroid failed if no star found
rather than returning self.sr.value = None.
2007-01-12 ROwen Added a threshold for star finding (maxFindAmpl).
Added logging of sky and star amplitude.
2007-01-26 ROwen Tweak various formats:
- All reported and command floats use %0.xf (some used %.xf).
- Focus is rounded to nearest integer for logging and setting.
If new focus found, set Center Focus to the new value.
Increased minimum # of focus positions from 2 to 3.
Bug fix: if only 3 measurements, divided by zero while computing std. dev.
Bug fix: could not restore initial focus (missing = in set focus command).
Minor bug fix: focus interval was computed as int, not float.
2007-01-29 ROwen Improved OffsetGuiderFocusScript to get guider info based on instPos
instead of insisting that the guider be the current instrument.
Modified to take advantage of RO.Wdg.Entry's new label attribute.
2007-01-29 ROwen Fixed ImagerFocusScript (it was giving an illegal arg to OffsetGuiderFocusScript).
Refactored so run is in BaseFocusScript and ImagerFocusScript inherits from that.
Renamed extraSetup method to waitExtraSetup.
2007-02-13 ROwen Added a Clear button.
Never auto-clears the log.
Waits to auto-clear the graph until new data is about to be graphed.
Simplified graph range handling.
2007-04-24 ROwen Modified to use numpy instead of numarray.
2007-06-01 ROwen Hacked in support for sfocus for SPIcam.
2007-06-04 ROwen Added doWindow argument to BaseFocusScript.
2007-07-25 ROwen ImagerFocusScript modified to sending windowing info as part of the expose command
if windowing is being used (due to improvements in spicamExpose).
Pings the gcam actor when it starts. This eliminates the situation where the actor
is dead and the script should halt, but keeps exposing and reporting fwhm=NaN instead.
2007-07-26 ROwen Added user-settable bin factor.
Modified to take a final exposure (after restoring boresight) if boresight moved.
2007-07-27 ROwen Increased the fidelity of debug mode and fixed some bugs.
2007-07-30 ROwen Added windowOrigin and windowIsInclusive arguments.
Bug fix: if the user changed the bin factor during script execution,
it would change the bin factor used in the script (and not necessarily properly).
2007-09-12 ROwen SlitviewerFocusScript bug fix: Cancel would fail if no image ever taken.
2007-12-20 ROwen Moved matplotlib configuration statements to TUI's startup because
in matplotlib 0.91.1 one may not call "use" after importing matplotlib.backends.
2008-01-24 ROwen BaseFocusScript bug fixes:
- PR 686: Find button broken (waitFindStar ran "expose" instead of "findstars"
and so never found anything.).
- recordUserParams didn't round window so relStarPos could be off by a fraction of a pixel.
2008-01-25 ROwen Added a digit after the decimal point for reporting fwhm in arcsec.
Implemented a lower limit on focus increment.
2008-02-01 ROwen Changed configuration constants from globals to class variables of BaseFocusScript
so subclasses can more easily override them.
Fixed debug mode to use proper defaults for number of steps and focus range.
Setting current focus successfully clears the status bar.
2008-03-28 ROwen PR 775: used exposeModel in classes where it did not exist.
Fixed by adding tccInstPrefix argument.
2008-04-02 ROwen PR 781: Many focus scripts fail to start with TypeError...:
BaseFocusScript.getInstInfo was missing () on a string method lower()
2008-04-22 ROwen Modified to use new Log.addMsg method.
2008-04-23 ROwen Added some diagnostic output for PR 777 and its kin.
2008-04-29 ROwen Open guide image window *after* checking for correct instrument.
2008-08-14 ROwen CR 818: take a final full-frame exposure if script windows
(or, as before, if boresight was restored).
2009-03-02 ROwen Added a brief header for PR 777 diagnostic output.
2009-11-23 ROwen Reduced MinFocusIncr from 50 to 25 um.
2010-10-20 ROwen Bug fix: end did not call moveBoresight properly because it was a generator.
Changed moveBoresight to return the cmdVar and never wait; removed the doWait argument.
Modified to wait for the cleanup commands for most failures in waitFocusSweep
and to log such errors in the log window.
Print cleanup tasks to the log window.
Print a "please wait" message if the cleanup tasks are run by end.
Call setCurrFocus after setting focus to best estimated focus (why was it done the other way?).
2010-12-09 ROwen PR 1211: if a focus sweep failed to converge, restored secondary focus by commanding
the TCC directly instead of calling waitSetFocus. This is bad for the focus script
that moves the NA2 guider focus, since it should never alter secondary focus.
2011-07-29 ROwen Made taking a final image more reliable. Formerly if the script was cancelled
during the first exposure it would not take a final exposure.
Reduced the minimum focus step size from 25um to 10um.
2011-10-25 ROwen Added getBinFactor and isFinalExposureWanted methods.
Added isFinal argument to various methods.
Added finalBinFactor argument as an ugly hack until guide actors report bin factor.
Modified ImagerFocusScript to record initial bin factor, if it is adjustable.
2011-11-04 ROwen Bug fix: SlitviewerFocusScript and OffsetGuiderFocusScript final exposure not full frame.
Bug fix: ImagerFocusScript did not set exposeModel soon enough for spicam.
2012-02-16 ROwen Bug fix: BaseFocusScript.isFinalExposureWanted used undefined variable doRestoreBoresight;
fixed by adding a method doRestoreBoresight.
Changed to not log diagnostic information when sr.ScriptError is raised.
2014-04-22 ROwen Updated to use instInfo.centroidActor. Removed a few unused imports.
"""
import inspect
import traceback
import sys
import math
import random # for debug
import numpy
import Tkinter
import RO.Wdg
import RO.Constants
import RO.StringUtil
import TUI.TUIModel
import TUI.TCC.TCCModel
import TUI.Inst.ExposeModel
import TUI.Guide.GuideModel
import matplotlib
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
MicronStr = RO.StringUtil.MuStr + "m"
def formatNum(val, fmt="%0.1f"):
"""Convert a number into a string
None is returned as NaN
"""
if val is None:
return "NaN"
try:
return fmt % (val,)
except TypeError:
raise TypeError("formatNum failed on fmt=%r, val=%r" % (fmt, val))
class Extremes(object):
"""Class to keep track of minimum and maximum value.
"""
def __init__(self, val=None):
self.minVal = None
self.maxVal = None
if val is not None:
self.addVal(val)
def addVal(self, val):
if val is None:
return
if self.isOK():
self.minVal = min(self.minVal, val)
self.maxVal = max(self.maxVal, val)
else:
self.minVal = val
self.maxVal = val
def isOK(self):
return self.minVal is not None
def __eq__(self, other):
return (self.minVal == other.minVal) and (self.maxVal == other.maxVal)
def __str__(self):
return "[%s, %s]" % (self.minVal, self.maxVal)
def __repr__(self):
return "Extremes(%s, %s)" % (self.minVal, self.maxVal)
class StarMeas(object):
def __init__(self,
xyPos = None,
sky = None,
ampl = None,
fwhm = None,
):
self.xyPos = xyPos
self.sky = sky
self.ampl = ampl
self.fwhm = fwhm
@classmethod
def fromStarKey(cls, starKeyData):
"""Create an instance from star keyword data.
"""
return cls(
fwhm = starKeyData[8],
sky = starKeyData[13],
ampl = starKeyData[14],
xyPos = starKeyData[2:4],
)
def makeStarData(
typeChar = "f",
xyPos = (10.0, 10.0),
sky = 200,
ampl = 1500,
fwhm = 2.5,
):
"""Make a list containing one star data list for debug mode"""
xyPos = [float(xyPos[ii]) for ii in range(2)]
fwhm = float(fwhm)
return [[typeChar, 1, xyPos[0], xyPos[1], 1.0, 1.0, fwhm * 5, 1, fwhm, fwhm, 0, 0, ampl, sky, ampl]]
class BaseFocusScript(object):
"""Basic focus script object.
This is a virtual base class. The inheritor must:
- Provide widgets
- Provide a "run" method
"""
cmd_Find = "find"
cmd_Measure = "measure"
cmd_Sweep = "sweep"
# constants
#DefRadius = 5.0 # centroid radius, in arcsec
#NewStarRad = 2.0 # amount of star position change to be considered a new star
DefFocusNPos = 5 # number of focus positions
DefFocusRange = 200 # default focus range around current focus
FocusWaitMS = 1000 # time to wait after every focus adjustment (ms)
BacklashComp = 0 # amount of backlash compensation, in microns (0 for none)
WinSizeMult = 2.5 # window radius = centroid radius * WinSizeMult
FocGraphMargin = 5 # margin on graph for x axis limits, in um
MaxFocSigmaFac = 0.5 # maximum allowed sigma of best fit focus as a multiple of focus range
MinFocusIncr = 10 # minimum focus increment, in um
def __init__(self,
sr,
gcamActor,
instName,
tccInstPrefix = None,
imageViewerTLName = None,
defRadius = 5.0,
defBinFactor = 1,
finalBinFactor = None,
canSetStarPos = True,
maxFindAmpl = None,
doWindow = True,
windowOrigin = 0,
windowIsInclusive = True,
helpURL = None,
debug = False,
):
"""The setup script; run once when the script runner
window is created.
Inputs:
- gcamActor: name of guide camera actor (e.g. "dcam")
- instName: name of instrument (e.g. "DIS"); must be a name known to TUI.Inst.ExposeModel.
- tccInstPrefix: instrument name as known by the TCC; defaults to instName;
if the instrument has multiple names in the TCC then supply the common prefix
- imageViewerTLName: name of image viewer toplevel (e.g. "Guide.DIS Slitviewer")
- defRadius: default centroid radius, in arcsec
- defBinFactor: default bin factor; if None then bin factor cannot be set
- finalBinFactor: the final bin factor; this is an ugly hack that works around the problem
that guiders do not provide a bin factor keyword so there's no way to tell what
bin factor the guider started out with.
None means: if you can tell what the starting bin factor is then restore that; otherwise
leave the bin factor as the it was set by this script.
- canSetStarPos: if True the user can set the star position;
if False then the Star Pos entries and Find button are not shown.
- maxFindAmpl: maximum star amplitude for finding stars (peak - sky in ADUs);
if None then star finding is disabled.
- doWindow: if True, subframe images during focus sequence
- windowOrigin: index of left or lower pixel for window (0 or 1 unless very wierd);
this is not use for star positions, which all have the same convention
- windowIsInclusive: is the upper-right window coord included in the image?
- helpURL: URL of help file
- debug: if True, run in debug mode, which uses fake data and does not communicate with the hub.
"""
self.sr = sr
self.sr.debug = bool(debug)
self.gcamActor = gcamActor
self.instName = instName
self.tccInstPrefix = tccInstPrefix or self.instName
self.imageViewerTLName = imageViewerTLName
if defBinFactor is None:
self.defBinFactor = None
self.binFactor = 1
self.dispBinFactor = 1
else:
self.defBinFactor = int(defBinFactor)
self.binFactor = self.defBinFactor
self.dispBinFactor = self.defBinFactor
self.finalBinFactor = finalBinFactor
self.defRadius = defRadius
self.helpURL = helpURL
self.canSetStarPos = canSetStarPos
self.maxFindAmpl = maxFindAmpl
self.doWindow = bool(doWindow)
self.windowOrigin = int(windowOrigin)
self.windowIsInclusive = bool(windowIsInclusive)
# fake data for debug mode
self.debugIterFWHM = None
# get various models
self.tccModel = TUI.TCC.TCCModel.getModel()
self.tuiModel = TUI.TUIModel.getModel()
self.guideModel = TUI.Guide.GuideModel.getModel(self.gcamActor)
# create and grid widgets
self.gr = RO.Wdg.Gridder(self.sr.master, sticky="ew")
self.createSpecialWdg()
self.createStdWdg()
self.initAll()
# try to get GUI's focus away from graph widget (but it doesn't work; why?)
self.expTimeWdg.focus_set()
self.setCurrFocus()
def createSpecialWdg(self):
"""Create script-specific widgets.
"""
pass
def createStdWdg(self):
"""Create the standard widgets.
"""
self.expTimeWdg = RO.Wdg.FloatEntry(
self.sr.master,
label = "Exposure Time",
minValue = self.guideModel.gcamInfo.minExpTime,
maxValue = self.guideModel.gcamInfo.maxExpTime,
defValue = self.guideModel.gcamInfo.defExpTime,
defFormat = "%0.1f",
defMenu = "Default",
minMenu = "Minimum",
helpText = "Exposure time",
helpURL = self.helpURL,
)
self.gr.gridWdg(self.expTimeWdg.label, self.expTimeWdg, "sec")
self.binFactorWdg = RO.Wdg.IntEntry(
master = self.sr.master,
label = "Bin Factor",
minValue = 1,
maxValue = 1024,
defValue = self.defBinFactor or 1,
defMenu = "Default",
callFunc = self.updBinFactor,
helpText = "Bin factor (for rows and columns)",
helpURL = self.helpURL,
)
if self.defBinFactor is not None:
self.gr.gridWdg(self.binFactorWdg.label, self.binFactorWdg)
self.starPosWdgSet = []
for ii in range(2):
letter = ("X", "Y")[ii]
starPosWdg = RO.Wdg.FloatEntry(
master = self.sr.master,
label = "Star Pos %s" % (letter,),
minValue = 0,
maxValue = 5000,
helpText = "Star %s position (binned, full frame)" % (letter,),
helpURL = self.helpURL,
)
if self.canSetStarPos:
self.gr.gridWdg(starPosWdg.label, starPosWdg, "pix")
self.starPosWdgSet.append(starPosWdg)
self.centroidRadWdg = RO.Wdg.IntEntry(
master = self.sr.master,
label = "Centroid Radius",
minValue = 5,
maxValue = 1024,
defValue = self.defRadius,
defMenu = "Default",
helpText = "Centroid radius; don't skimp",
helpURL = self.helpURL,
)
self.gr.gridWdg(self.centroidRadWdg.label, self.centroidRadWdg, "arcsec", sticky="ew")
setCurrFocusWdg = RO.Wdg.Button(
master = self.sr.master,
text = "Center Focus",
callFunc = self.setCurrFocus,
helpText = "Set to current focus",
helpURL = self.helpURL,
)
self.centerFocPosWdg = RO.Wdg.IntEntry(
master = self.sr.master,
label = "Center Focus",
defValue = 0,
defMenu = "Default",
helpText = "Center of focus sweep",
helpURL = self.helpURL,
)
self.gr.gridWdg(setCurrFocusWdg, self.centerFocPosWdg, MicronStr)
self.focusRangeWdg = RO.Wdg.IntEntry(
master = self.sr.master,
label = "Focus Range",
maxValue = self.DefFocusRange * 10,
defValue = self.DefFocusRange,
defMenu = "Default",
helpText = "Range of focus sweep",
helpURL = self.helpURL,
)
self.gr.gridWdg(self.focusRangeWdg.label, self.focusRangeWdg, MicronStr)
self.numFocusPosWdg = RO.Wdg.IntEntry(
master = self.sr.master,
label = "Focus Positions",
minValue = 3,
defValue = self.DefFocusNPos,
defMenu = "Default",
helpText = "Number of focus positions for sweep",
helpURL = self.helpURL,
)
self.gr.gridWdg(self.numFocusPosWdg.label, self.numFocusPosWdg, "")
self.focusIncrWdg = RO.Wdg.FloatEntry(
master = self.sr.master,
label = "Focus Increment",
defFormat = "%0.1f",
readOnly = True,
relief = "flat",
helpText = "Focus step size; must be at least %s %s" % (self.MinFocusIncr, MicronStr),
helpURL = self.helpURL,
)
self.gr.gridWdg(self.focusIncrWdg.label, self.focusIncrWdg, MicronStr)
# create the move to best focus checkbox
self.moveBestFocus = RO.Wdg.Checkbutton(
master = self.sr.master,
text = "Move to Best Focus",
defValue = True,
relief = "flat",
helpText = "Move to estimated best focus and measure FWHM after sweep?",
helpURL = self.helpURL,
)
self.gr.gridWdg(None, self.moveBestFocus, colSpan = 3, sticky="w")
graphCol = self.gr.getNextCol()
graphRowSpan = self.gr.getNextRow()
# table of measurements (including separate unscrolled header)
TableWidth = 32
self.logHeader = RO.Wdg.Text(
master = self.sr.master,
readOnly = True,
height = 2,
width = TableWidth,
helpText = "Measured and fit results",
helpURL = self.helpURL,
relief = "sunken",
bd = 0,
)
self.logHeader.insert("0.0", """\tfocus\tFWHM\tFWHM\tsky\tampl\tsky+ampl
\t%s\tpixels\tarcsec\tADUs\tADUs\tADUs""" % MicronStr)
self.logHeader.setEnable(False)
self.gr.gridWdg(False, self.logHeader, sticky="ew", colSpan = 10)
self.logWdg = RO.Wdg.LogWdg(
master = self.sr.master,
height = 10,
width = TableWidth,
helpText = "Measured and fit results",
helpURL = self.helpURL,
relief = "sunken",
bd = 2,
)
self.gr.gridWdg(False, self.logWdg, sticky="ew", colSpan = 10)
# graph of measurements
plotFig = matplotlib.figure.Figure(figsize=(4, 1), frameon=True)
self.figCanvas = FigureCanvasTkAgg(plotFig, self.sr.master)
self.figCanvas.get_tk_widget().grid(row=0, column=graphCol, rowspan=graphRowSpan, sticky="news")
self.plotAxis = plotFig.add_subplot(1, 1, 1)
self.focusRangeWdg.addCallback(self.updFocusIncr, callNow=False)
self.numFocusPosWdg.addCallback(self.updFocusIncr, callNow=True)
# add command buttons
cmdBtnFrame = Tkinter.Frame(self.sr.master)
self.findBtn = RO.Wdg.Button(
master = cmdBtnFrame,
text = "Find",
callFunc = RO.Alg.GenericCallback(self.doCmd, self.cmd_Find),
helpText = "Update focus, expose and find best star",
helpURL = self.helpURL,
)
if self.maxFindAmpl is not None:
self.findBtn.pack(side="left")
self.measureBtn = RO.Wdg.Button(
master = cmdBtnFrame,
text = "Measure",
callFunc = RO.Alg.GenericCallback(self.doCmd, self.cmd_Measure),
helpText = "Update focus, expose and measure FWHM",
helpURL = self.helpURL,
)
self.measureBtn.pack(side="left")
self.sweepBtn = RO.Wdg.Button(
master = cmdBtnFrame,
text = "Sweep",
callFunc = RO.Alg.GenericCallback(self.doCmd, self.cmd_Sweep),
helpText = "Start focus sweep",
helpURL = self.helpURL,
)
self.sweepBtn.pack(side="left")
self.clearBtn = RO.Wdg.Button(
master = cmdBtnFrame,
text = "Clear",
callFunc = self.doClear,
helpText = "Clear table and graph",
helpURL = self.helpURL,
)
self.clearBtn.pack(side="right")
nCol = self.gr.getMaxNextCol()
self.gr.gridWdg(False, cmdBtnFrame, colSpan=nCol)
if self.sr.debug:
self.expTimeWdg.set("1")
self.centerFocPosWdg.set(0)
def clearGraph(self):
self.plotAxis.clear()
self.plotAxis.grid(True)
# start with autoscale disabled due to bug in matplotlib
self.plotAxis.set_autoscale_on(False)
self.figCanvas.draw()
self.plotLine = None
def doClear(self, wdg=None):
self.logWdg.clearOutput()
self.clearGraph()
def doCmd(self, cmdMode, wdg=None):
if cmdMode not in (
self.cmd_Measure,
self.cmd_Find,
self.cmd_Sweep,
):
raise self.sr.RuntimeError("Unknown command mode %r" % (cmdMode,))
self.cmdMode = cmdMode
self.sr.resumeUser()
def enableCmdBtns(self, doEnable):
"""Enable or disable command buttons (e.g. Expose and Sweep).
"""
self.findBtn.setEnable(doEnable)
self.measureBtn.setEnable(doEnable)
self.sweepBtn.setEnable(doEnable)
self.clearBtn.setEnable(doEnable)
def end(self, sr):
"""Run when script exits (normally or due to error)
"""
try:
self.enableCmdBtns(False)
doAskWait = False
if self.focPosToRestore is not None:
tccCmdStr = "set focus=%0.0f" % (self.focPosToRestore,)
self.logWdg.addMsg("Setting focus to %0.0f %s" % (self.focPosToRestore, MicronStr))
doAskWait = True
self.focPosToRestore = None
self.sr.startCmd(actor="tcc", cmdStr=tccCmdStr)
if self.doRestoreBoresight():
self.currBoreXYDeg = self.begBoreXYDeg
self.logWdg.addMsg("Restoring boresight to %0.7f, %0.7f deg" % (self.begBoreXYDeg[0], self.begBoreXYDeg[1]))
doAskWait = True
self.moveBoresight(self.begBoreXYDeg)
if self.isFinalExposureWanted():
self.doTakeFinalImage = False
doAskWait = True
exposeCmdDict = self.getExposeCmdDict(doWindow=False, isFinal=True)
self.logWdg.addMsg("Taking a final exposure")
self.sr.startCmd(**exposeCmdDict)
if doAskWait:
self.logWdg.addMsg("Wait for these tasks to finish before running again", severity=RO.Constants.sevWarning)
except sr.ScriptError:
raise # no diagnostics needed
except Exception:
traceback.print_exc(file=sys.stderr)
self._printDiagnostics()
raise
def formatBinFactorArg(self, isFinal):
"""Return bin factor argument for expose/centroid/findstars command
Inputs:
- isFinal: if True then return parameters for final exposure
"""
#print "defBinFactor=%r, binFactor=%r" % (self.defBinFactor, self.binFactor)
binFactor = self.getBinFactor(isFinal=isFinal)
if binFactor is None:
return ""
return "bin=%d" % (binFactor,)
def formatExposeArgs(self, doWindow=True, isFinal=False):
"""Format arguments for exposure command.
Inputs:
- doWindow: if true, window the exposure (if permitted)
- isFinal: if True then return parameters for final exposure
"""
argList = [
"time=%s" % (self.expTime,),
self.formatBinFactorArg(isFinal=isFinal),
self.formatWindowArg(doWindow),
]
argList = [arg for arg in argList if arg]
return " ".join(argList)
def formatWindowArg(self, doWindow=True):
"""Format window argument for expose/centroid/findstars command.
Inputs:
- doWindow: if true, window the exposure (if permitted)
"""
if not doWindow or not self.doWindow:
return ""
if self.windowIsInclusive:
urOffset = self.windowOrigin
else:
urOffset = self.windowOrigin + 1
windowLL = [self.window[ii] + self.windowOrigin for ii in range(2)]
windowUR = [self.window[ii+2] + urOffset for ii in range(2)]
return "window=%d,%d,%d,%d" % (windowLL[0], windowLL[1], windowUR[0], windowUR[1])
def getBinFactor(self, isFinal):
"""Get bin factor (as a single int), or None if not relevant
Inputs:
- isFinal: if True then return parameters for final exposure
"""
if self.defBinFactor is None:
return None
if isFinal and self.finalBinFactor is not None:
return self.finalBinFactor
return self.binFactor
def getInstInfo(self):
"""Obtains instrument data.
Verifies the correct instrument and sets these attributes:
- instScale: x,y image scale in unbinned pixels/degree
- instCtr: x,y image center in unbinned pixels
- instLim: xmin, ymin, xmax, ymax image limits, inclusive, in unbinned pixels
- arcsecPerPixel: image scale in arcsec/unbinned pixel;
average of x and y scales
Raises ScriptError if wrong instrument.
"""
if self.tccInstPrefix and not self.sr.debug:
# Make sure current instrument is correct
try:
currInstName = self.sr.getKeyVar(self.tccModel.instName)
except self.sr.ScriptError:
raise self.sr.ScriptError("current instrument unknown")
if not currInstName.lower().startswith(self.tccInstPrefix.lower()):
raise self.sr.ScriptError("%s is not the current instrument (%s)!" % (self.instName, currInstName))
self.instScale = self.sr.getKeyVar(self.tccModel.iimScale, ind=None)
self.instCtr = self.sr.getKeyVar(self.tccModel.iimCtr, ind=None)
self.instLim = self.sr.getKeyVar(self.tccModel.iimLim, ind=None)
else:
# data from tcc tinst:I_NA2_DIS.DAT 18-OCT-2006
self.instScale = [-12066.6, 12090.5] # unbinned pixels/deg
self.instCtr = [240, 224]
self.instLim = [0, 0, 524, 511]
self.arcsecPerPixel = 3600.0 * 2 / (abs(self.instScale[0]) + abs(self.instScale[1]))
def getEntryNum(self, wdg):
"""Return the numeric value of a widget, or raise ScriptError if blank.
"""
numVal = wdg.getNumOrNone()
if numVal is not None:
return numVal
raise self.sr.ScriptError(wdg.label + " not specified")
def getExposeCmdDict(self, doWindow=True, isFinal=False):
"""Get basic command arument dict for an expose command
This includes actor, cmdStr, abortCmdStr
Inputs:
- doWindow: if true, window the exposure (if permitted)
- isFinal: if True then return parameters for final exposure
"""
return dict(
actor = self.gcamActor,
cmdStr = "expose " + self.formatExposeArgs(doWindow, isFinal=isFinal),
abortCmdStr = "abort",
)
def graphFocusMeas(self, focPosFWHMList, extremeFocPos=None, extremeFWHM=None):
"""Graph measured fwhm vs focus.
Inputs:
- focPosFWHMList: list of data items:
- focus position (um)
- measured FWHM (binned pixels)
- extremeFocPos: extremes of focus position
- extremeFWHM: extremes of FWHM
- setFocRange: adjust displayed focus range?
extremes are an Extremes object with .minVal and .maxVal
"""
# "graphFocusMeas(focPosFWHMList=%s, extremeFocPos=%r, extremeFWHM=%r)" % (focPosFWHMList, extremeFocPos, extremeFWHM)
numMeas = len(focPosFWHMList)
if numMeas == 0:
return
focList, fwhmList = zip(*focPosFWHMList)
if not self.plotLine:
self.plotLine = self.plotAxis.plot(focList, fwhmList, 'bo')[0]
else:
self.plotLine.set_data(focList[:], fwhmList[:])
self.setGraphRange(extremeFocPos=extremeFocPos, extremeFWHM=extremeFWHM)
def initAll(self):
"""Initialize variables, table and graph.
"""
# initialize shared variables
self.doTakeFinalImage = False
self.focDir = None
self.currBoreXYDeg = None
self.begBoreXYDeg = None
self.instScale = None
self.arcsecPerPixel = None
self.instCtr = None
self.instLim = None
self.cmdMode = None
self.focPosToRestore = None
self.expTime = None
self.absStarPos = None
self.relStarPos = None
self.binFactor = None
self.window = None # LL pixel is 0, UL pixel is included
self.enableCmdBtns(False)
def isFinalExposureWanted(self):
"""Return True if a final exposure is wanted, else False
"""
return self.doTakeFinalImage and (self.doWindow or self.doRestoreBoresight() or self.finalBinFactor is not None)
def logFitFWHM(self, name, focPos, fwhm):
"""Log a fit value of FWHM or FWHM error.
"""
if fwhm is not None:
fwhmArcSec = fwhm * self.arcsecPerPixel * self.binFactor
else:
fwhmArcSec = None
dataStrs = (
formatNum(focPos, "%0.0f"),
formatNum(fwhm, "%0.1f"),
formatNum(fwhmArcSec, "%0.2f"),
)
outStr = "%s\t%s" % (name, "\t".join(dataStrs))
self.logWdg.addMsg(outStr)
def logStarMeas(self, name, focPos, starMeas):
"""Log a star measurement.
The name should be less than 8 characters long.
Any or all data fields in starMeas may be None.
Inputs:
- focPos: focus position, in um
- starMeas: StarMeas object
If fwhm is None, it is reported as NaN.
"""
fwhm = starMeas.fwhm
if fwhm is not None:
fwhmArcSec = fwhm * self.arcsecPerPixel * self.binFactor
else:
fwhmArcSec = None
if None not in (starMeas.ampl, starMeas.sky):
skyPlusAmpl = starMeas.ampl + starMeas.sky
else:
skyPlusAmpl = None
dataStrs = (
formatNum(focPos, "%0.0f"),
formatNum(fwhm, "%0.1f"),
formatNum(fwhmArcSec, "%0.2f"),
formatNum(starMeas.sky, "%0.0f"),
formatNum(starMeas.ampl, "%0.0f"),
formatNum(skyPlusAmpl, "%0.0f"),
)
outStr = "%s\t%s" % (name, "\t".join(dataStrs))
self.logWdg.addMsg(outStr)
def recordUserParams(self, doStarPos=True):
"""Record user-set parameters relating to exposures but not to focus
Inputs:
- doStarPos: if true: save star position and related information;
warning: if doStarPos true then there must *be* a valid star position
Set the following instance variables:
- expTime
- centroidRadPix
The following are set to None if doStarPos false:
- absStarPos
- relStarPos
- window
"""
self.expTime = self.getEntryNum(self.expTimeWdg)
self.binFactor = self.dispBinFactor
centroidRadArcSec = self.getEntryNum(self.centroidRadWdg)
self.centroidRadPix = centroidRadArcSec / (self.arcsecPerPixel * self.binFactor)
if doStarPos:
winRad = self.centroidRadPix * self.WinSizeMult
self.absStarPos = [None, None]
for ii in range(2):
wdg = self.starPosWdgSet[ii]
self.absStarPos[ii] = self.getEntryNum(wdg)
if self.doWindow:
windowMinXY = [max(self.instLim[ii], int(0.5 + self.absStarPos[ii] - winRad)) for ii in range(2)]
windowMaxXY = [min(self.instLim[ii-2], int(0.5 + self.absStarPos[ii] + winRad)) for ii in range(2)]
self.window = windowMinXY + windowMaxXY
self.relStarPos = [self.absStarPos[ii] - windowMinXY[ii] for ii in range(2)]
#print "winRad=%s, windowMinXY=%s, relStarPos=%s" % (winRad, windowMinXY, self.relStarPos)
else:
self.window = None
self.relStarPos = self.absStarPos[:]
else:
self.absStarPos = None
self.relStarPos = None
self.window = None
def run(self, sr):
"""Run the focus script.
"""
try:
self.initAll()
# fake data for debug mode
# iteration #, FWHM
self.debugIterFWHM = (1, 2.0)
self.getInstInfo()
yield self.waitExtraSetup()
# open image viewer window, if any
if self.imageViewerTLName:
self.tuiModel.tlSet.makeVisible(self.imageViewerTLName)
self.sr.master.winfo_toplevel().lift()
focPosFWHMList = []
extremeFocPos = Extremes()
extremeFWHM = Extremes()
# check that the gcam actor is alive. This is important because
# centroid commands can fail due to no actor or no star
# so we want to halt in the former case
yield self.sr.waitCmd(
actor = self.gcamActor,
cmdStr = "ping",
)
# command loop; repeat until error or user explicitly presses Stop
if self.maxFindAmpl is None:
btnStr = "Measure or Sweep"
else:
btnStr = "Find, Measure or Sweep"
waitMsg = "Press %s to continue" % (btnStr,)
testNum = 0
while True:
# wait for user to press the Expose or Sweep button
# note: the only time they should be enabled is during this wait
self.enableCmdBtns(True)
self.sr.showMsg(waitMsg, RO.Constants.sevWarning)
yield self.sr.waitUser()
self.enableCmdBtns(False)
if self.cmdMode == self.cmd_Sweep:
break
if testNum == 0:
self.clearGraph()
if self.maxFindAmpl is None:
self.logWdg.addMsg("===== Measure =====")
else:
self.logWdg.addMsg("===== Find/Measure =====")
testNum += 1
focPos = float(self.centerFocPosWdg.get())
if focPos is None:
raise self.sr.ScriptError("must specify center focus")
yield self.waitSetFocus(focPos, False)
if self.cmdMode == self.cmd_Measure:
cmdName = "Meas"
self.recordUserParams(doStarPos=True)
yield self.waitCentroid()
elif self.cmdMode == self.cmd_Find:
cmdName = "Find"
self.recordUserParams(doStarPos=False)
yield self.waitFindStar()
starData = self.sr.value
if starData.xyPos is not None:
self.sr.showMsg("Found star at %0.1f, %0.1f" % tuple(starData.xyPos))
self.setStarPos(starData.xyPos)
else:
raise RuntimeError("Unknown command mode: %r" % (self.cmdMode,))
starMeas = self.sr.value
self.logStarMeas("%s %d" % (cmdName, testNum,), focPos, starMeas)
fwhm = starMeas.fwhm
if fwhm is None:
waitMsg = "No star found! Fix and then press %s" % (btnStr,)
self.setGraphRange(extremeFocPos=extremeFocPos)
else:
extremeFocPos.addVal(focPos)
extremeFWHM.addVal(starMeas.fwhm)
focPosFWHMList.append((focPos, fwhm))
self.graphFocusMeas(focPosFWHMList, extremeFocPos, extremeFWHM)
waitMsg = "%s done; press %s to continue" % (cmdName, btnStr,)
self.recordUserParams(doStarPos=True)
yield self.waitFocusSweep()
except sr.ScriptError:
raise # no diagnostics needed
except Exception:
traceback.print_exc(file=sys.stderr)
self._printDiagnostics()
raise
def setCurrFocus(self, *args):
"""Set center focus to current focus.
"""
currFocus = self.sr.getKeyVar(self.tccModel.secFocus, defVal=None)
if currFocus is None:
self.sr.showMsg("Current focus not known",
severity=RO.Constants.sevWarning,
)
return
self.centerFocPosWdg.set(currFocus)
self.sr.showMsg("")
def setGraphRange(self, extremeFocPos=None, extremeFWHM=None):
"""Sets the displayed range of the graph.
Inputs:
- extremeFocPos: focus extremes
- extremeFWHM: FWHM extremes
"""
# "setGraphRange(extremeFocPos=%s, extremeFWHM=%s)" % (extremeFocPos, extremeFWHM)
if extremeFocPos and extremeFocPos.isOK():
minFoc = extremeFocPos.minVal - self.FocGraphMargin
maxFoc = extremeFocPos.maxVal + self.FocGraphMargin
if maxFoc - minFoc < 50:
minFoc -= 25
maxFoc += 25
self.plotAxis.set_xlim(minFoc, maxFoc)
if extremeFWHM and extremeFWHM.isOK():
minFWHM = extremeFWHM.minVal * 0.95
maxFWHM = extremeFWHM.maxVal * 1.05
self.plotAxis.set_ylim(minFWHM, maxFWHM)
self.figCanvas.draw()
def setStarPos(self, starXYPix):
"""Set star position widgets.
Inputs:
- starXYPix: star x, y position (binned pixels)
"""
for ii in range(2):
wdg = self.starPosWdgSet[ii]
wdg.set(starXYPix[ii])
def updBinFactor(self, *args, **kargs):
"""Called when the user changes the bin factor"""
newBinFactor = self.binFactorWdg.getNum()
if newBinFactor <= 0:
return
oldBinFactor = self.dispBinFactor
if oldBinFactor == newBinFactor:
return
self.dispBinFactor = newBinFactor
# adjust displayed star position
posFactor = float(oldBinFactor) / float(newBinFactor)
for ii in range(2):
oldStarPos = self.starPosWdgSet[ii].getNum()
if oldStarPos == 0:
continue
newStarPos = oldStarPos * posFactor
self.starPosWdgSet[ii].set(newStarPos)
def updFocusIncr(self, *args):
"""Update focus increment widget.
"""
focusRange = self.focusRangeWdg.getNumOrNone()
numPos = self.numFocusPosWdg.getNumOrNone()
if None in (focusRange, numPos):
self.focusIncrWdg.set(None, isCurrent = False)
return
focusIncr = focusRange / float(numPos - 1)
isOK = focusIncr >= self.MinFocusIncr
if not isOK:
errMsg = "Focus increment too small (< %s %s)" % (self.MinFocusIncr, MicronStr)
self.sr.showMsg(errMsg, RO.Constants.sevWarning)
self.focusIncrWdg.set(focusIncr, isCurrent = isOK)
def waitCentroid(self):
"""Take an exposure and centroid using 1x1 binning.
If the centroid is found, sets self.sr.value to the FWHM.
Otherwise sets self.sr.value to None.
"""
centroidCmdStr = "centroid on=%0.1f,%0.1f cradius=%0.1f %s" % \
(self.relStarPos[0], self.relStarPos[1], self.centroidRadPix, self.formatExposeArgs())
self.doTakeFinalImage = True
yield self.sr.waitCmd(
actor = self.gcamActor,
cmdStr = centroidCmdStr,
keyVars = (self.guideModel.files, self.guideModel.star),
checkFail = False,
)
cmdVar = self.sr.value
if self.sr.debug:
starData = makeStarData("c", self.relStarPos)
else:
starData = cmdVar.getKeyVarData(self.guideModel.star)
if starData:
self.sr.value = StarMeas.fromStarKey(starData[0])
return
else:
self.sr.value = StarMeas()
if not cmdVar.getKeyVarData(self.guideModel.files):
raise self.sr.ScriptError("exposure failed")
def waitExtraSetup(self):
"""Executed once at the start of each run
after calling initAll and getInstInfo but before doing anything else.
Override to do things such as move the boresight or put the instrument into a particular mode.
"""
yield self.sr.waitMS(1)
def waitFindStar(self):
"""Take a full-frame exposure and find the best star that can be centroided.
Sets self.sr.value to StarMeas.
Displays a warning if no star found.
"""
if self.maxFindAmpl is None:
raise RuntimeError("Find disabled; maxFindAmpl=None")
self.sr.showMsg("Exposing %s sec to find best star" % (self.expTime,))
findStarCmdStr = "findstars " + self.formatExposeArgs(doWindow=False)
self.doTakeFinalImage = True
yield self.sr.waitCmd(
actor = self.gcamActor,
cmdStr = findStarCmdStr,
keyVars = (self.guideModel.files, self.guideModel.star),
checkFail = False,
)
cmdVar = self.sr.value
if self.sr.debug:
filePath = "debugFindFile"
else:
if not cmdVar.getKeyVarData(self.guideModel.files):
raise self.sr.ScriptError("exposure failed")
fileInfo = cmdVar.getKeyVarData(self.guideModel.files)[0]
filePath = "".join(fileInfo[2:4])
if self.sr.debug:
starDataList = makeStarData("f", (50.0, 75.0))
else:
starDataList = cmdVar.getKeyVarData(self.guideModel.star)
if not starDataList:
self.sr.value = StarMeas()
self.sr.showMsg("No stars found", severity=RO.Constants.sevWarning)
return
yield self.waitFindStarInList(filePath, starDataList)
def waitFindStarInList(self, filePath, starDataList):
"""Find best centroidable star in starDataList.
If a suitable star is found: set starXYPos to position
and self.sr.value to the star FWHM.
Otherwise log a warning and set self.sr.value to None.
Inputs:
- filePath: image file path on hub, relative to image root
(e.g. concatenate items 2:4 of the guider Files keyword)
- starDataList: list of star keyword data
"""
if self.maxFindAmpl is None:
raise RuntimeError("Find disabled; maxFindAmpl=None")
for starData in starDataList:
starXYPos = starData[2:4]
starAmpl = starData[14]
if (starAmpl is None) or (starAmpl > self.maxFindAmpl):
continue
self.sr.showMsg("Centroiding star at %0.1f, %0.1f" % tuple(starXYPos))
centroidCmdStr = "centroid file=%s on=%0.1f,%0.1f cradius=%0.1f" % \
(filePath, starXYPos[0], starXYPos[1], self.centroidRadPix)
yield self.sr.waitCmd(
actor = self.gcamActor,
cmdStr = centroidCmdStr,
keyVars = (self.guideModel.star,),
checkFail = False,
)
cmdVar = self.sr.value
if self.sr.debug:
starData = makeStarData("f", starXYPos)
else:
starData = cmdVar.getKeyVarData(self.guideModel.star)
if starData:
self.sr.value = StarMeas.fromStarKey(starData[0])
return
self.sr.showMsg("No usable star fainter than %s ADUs found" % self.maxFindAmpl,
severity=RO.Constants.sevWarning)
self.sr.value = StarMeas()
def waitFocusSweep(self):
"""Conduct a focus sweep.
Sets self.sr.value to True if successful.
"""
scriptException = None
try:
focPosFWHMList = []
self.logWdg.addMsg("===== Sweep =====")
self.clearGraph()
centerFocPos = float(self.getEntryNum(self.centerFocPosWdg))
focusRange = float(self.getEntryNum(self.focusRangeWdg))
startFocPos = centerFocPos - (focusRange / 2.0)
endFocPos = startFocPos + focusRange
numFocPos = self.getEntryNum(self.numFocusPosWdg)
if numFocPos < 3:
raise self.sr.ScriptError("need at least three focus positions")
focusIncr = self.focusIncrWdg.getNum()
if focusIncr < self.MinFocusIncr:
raise self.sr.ScriptError("focus increment too small (< %s %s)" % (self.MinFocusIncr, MicronStr))
self.focDir = (endFocPos > startFocPos)
extremeFocPos = Extremes(startFocPos)
extremeFocPos.addVal(endFocPos)
extremeFWHM = Extremes()
self.setGraphRange(extremeFocPos=extremeFocPos)
numMeas = 0
self.focPosToRestore = centerFocPos
for focInd in range(numFocPos):
focPos = float(startFocPos + (focInd*focusIncr))
doBacklashComp = (focInd == 0)
yield self.waitSetFocus(focPos, doBacklashComp)
self.sr.showMsg("Exposing for %s sec at focus %0.0f %s" % \
(self.expTime, focPos, MicronStr))
yield self.waitCentroid()
starMeas = self.sr.value
if self.sr.debug:
starMeas.fwhm = 0.0001 * (focPos - centerFocPos) ** 2
starMeas.fwhm += random.gauss(1.0, 0.25)
extremeFWHM.addVal(starMeas.fwhm)
self.logStarMeas("Sw %d" % (focInd+1,), focPos, starMeas)
if starMeas.fwhm is not None:
focPosFWHMList.append((focPos, starMeas.fwhm))
self.graphFocusMeas(focPosFWHMList, extremeFWHM=extremeFWHM)
# Fit a curve to the data
numMeas = len(focPosFWHMList)
if numMeas < 3:
raise self.sr.ScriptError("need at least 3 measurements to fit best focus")
focList, fwhmList = zip(*focPosFWHMList)
focPosArr = numpy.array(focList, dtype=float)
fwhmArr = numpy.array(fwhmList, dtype=float)
weightArr = numpy.ones(numMeas, dtype=float)
if numMeas > 3:
coeffs, dumYFit, dumYBand, fwhmSigma, dumCorrMatrix = polyfitw(focPosArr, fwhmArr, weightArr, 2, True)
elif numMeas == 3:
# too few points to measure fwhmSigma
coeffs = polyfitw(focPosArr, fwhmArr, weightArr, 2, False)
fwhmSigma = None
# Make sure fit curve has a minimum
if coeffs[2] <= 0.0:
raise self.sr.ScriptError("could not find minimum focus")
# find the best focus position
bestEstFocPos = (-1.0*coeffs[1])/(2.0*coeffs[2])
bestEstFWHM = coeffs[0]+coeffs[1]*bestEstFocPos+coeffs[2]*bestEstFocPos*bestEstFocPos
extremeFocPos.addVal(bestEstFocPos)
extremeFWHM.addVal(bestEstFWHM)
self.logFitFWHM("Fit", bestEstFocPos, bestEstFWHM)
# compute and log standard deviation, if possible
if fwhmSigma is not None:
focSigma = math.sqrt(fwhmSigma / coeffs[2])
self.logFitFWHM(u"Fit \N{GREEK SMALL LETTER SIGMA}", focSigma, fwhmSigma)
else:
focSigma = None
self.logWdg.addMsg(u"Warning: too few points to compute \N{GREEK SMALL LETTER SIGMA}")
# plot fit as a curve and best fit focus as a point
fitFocArr = numpy.arange(min(focPosArr), max(focPosArr), 1)
fitFWHMArr = coeffs[0] + coeffs[1]*fitFocArr + coeffs[2]*(fitFocArr**2.0)
self.plotAxis.plot(fitFocArr, fitFWHMArr, '-k', linewidth=2)
self.plotAxis.plot([bestEstFocPos], [bestEstFWHM], 'go')
self.setGraphRange(extremeFocPos=extremeFocPos, extremeFWHM=extremeFWHM)
# check fit error
if focSigma is not None:
maxFocSigma = self.MaxFocSigmaFac * focusRange
if focSigma > maxFocSigma:
raise self.sr.ScriptError("focus std. dev. too large: %0.0f > %0.0f" % (focSigma, maxFocSigma))
# check that estimated best focus is in sweep range
if not startFocPos <= bestEstFocPos <= endFocPos:
raise self.sr.ScriptError("best focus=%0.0f out of sweep range" % (bestEstFocPos,))
# move to best focus if "Move to best Focus" checked
moveBest = self.moveBestFocus.getBool()
if not moveBest:
return
yield self.waitSetFocus(bestEstFocPos, doBacklashComp=True)
self.setCurrFocus()
self.sr.showMsg("Exposing for %s sec at estimated best focus %d %s" % \
(self.expTime, bestEstFocPos, MicronStr))
yield self.waitCentroid()
finalStarMeas = self.sr.value
if self.sr.debug:
finalStarMeas.fwhm = 1.1
extremeFWHM.addVal(finalStarMeas.fwhm)
self.logStarMeas("Meas", bestEstFocPos, finalStarMeas)
finalFWHM = finalStarMeas.fwhm
if finalFWHM is not None:
self.plotAxis.plot([bestEstFocPos], [finalFWHM], 'ro')
self.setGraphRange(extremeFocPos=extremeFocPos, extremeFWHM=extremeFWHM)
else:
raise self.sr.ScriptError("could not measure FWHM at estimated best focus")
# A new best focus was picked; don't restore the original focus
# and do set Center Focus to the new focus
self.focPosToRestore = None
self.centerFocPosWdg.set(int(round(bestEstFocPos)))
except self.sr.ScriptError as e:
scriptException = e
self.logWdg.addMsg(str(e), severity=RO.Constants.sevError)
if self.focPosToRestore is not None:
focPosToRestore, self.focPosToRestore = self.focPosToRestore, None
self.logWdg.addMsg("Setting focus to %0.0f %s" % (focPosToRestore, MicronStr))
yield self.waitSetFocus(focPosToRestore)
if self.doRestoreBoresight():
self.currBoreXYDeg = self.begBoreXYDeg
self.logWdg.addMsg("Restoring boresight to %0.7f, %0.7f deg" % (self.begBoreXYDeg[0], self.begBoreXYDeg[1]))
yield self.sr.waitCmdVars(self.moveBoresight(self.begBoreXYDeg, msgStr="Restoring original boresight position"))
if self.isFinalExposureWanted():
self.doTakeFinalImage = False
exposeCmdDict = self.getExposeCmdDict(doWindow=False, isFinal=True)
self.logWdg.addMsg("Taking a final exposure")
yield self.sr.waitCmd(**exposeCmdDict)
if scriptException:
raise scriptException
def doRestoreBoresight(self):
"""Has the boresight changed?
"""
return self.currBoreXYDeg != self.begBoreXYDeg
def waitSetFocus(self, focPos, doBacklashComp=False):
"""Adjust focus.
To use: yield waitSetFocus(...)
Inputs:
- focPos: new focus position in um
- doBacklashComp: if True, perform backlash compensation
"""
focPos = float(focPos)
# to try to eliminate the backlash in the secondary mirror drive move back 1/2 the
# distance between the start and end position from the bestEstFocPos
if doBacklashComp and self.BacklashComp:
backlashFocPos = focPos - (abs(self.BacklashComp) * self.focDir)
self.sr.showMsg("Backlash comp: moving focus to %0.0f %s" % (backlashFocPos, MicronStr))
yield self.sr.waitCmd(
actor = "tcc",
cmdStr = "set focus=%0.0f" % (backlashFocPos,),
)
yield self.sr.waitMS(self.FocusWaitMS)
# move to desired focus position
self.sr.showMsg("Moving focus to %0.0f %s" % (focPos, MicronStr))
yield self.sr.waitCmd(
actor = "tcc",
cmdStr = "set focus=%0.0f" % (focPos,),
)
yield self.sr.waitMS(self.FocusWaitMS)
def _printDiagnostics(self):
"""Print diagnostics to stderr in an attempt to diagnose a rare problem
"""
sys.stderr.write("self=%r; class hierarchy=%s\n" % (self, inspect.getclasstree([type(self)])))
class SlitviewerFocusScript(BaseFocusScript):
"""Focus script for slitviewers
"""
def __init__(self,
sr,
gcamActor,
instName,
imageViewerTLName,
defBoreXY,
defRadius = 5.0,
defBinFactor = 1,
finalBinFactor = None,
doWindow = True,
windowOrigin = 0,
windowIsInclusive = True,
helpURL = None,
debug = False,
):
"""The setup script; run once when the script runner
window is created.
Inputs:
- gcamActor: name of guide camera actor (e.g. "dcam")
- instName: name of instrument (e.g. "DIS"); must be a name known to TUI.Inst.ExposeModel.
- imageViewerTLName: name of image viewer toplevel (e.g. "Guide.DIS Slitviewer")
- defBoreXY: default boresight position in [x, y] arcsec;
If an entry is None then no offset widget is shown for that axis
and 0 is used.
- defRadius: default centroid radius, in arcsec
- defBinFactor: default bin factor; if None then bin factor cannot be set
- finalBinFactor: the final bin factor; this is an ugly hack that works around the problem
that guiders do not provide a bin factor keyword so there's no way to tell what
bin factor the guider started out with.
None means: if you can tell what the starting bin factor is then restore that; otherwise
leave the bin factor as the it was set by this script.
- doWindow: if True, subframe images during focus sequence
- windowOrigin: index of left or lower pixel for window (0 or 1 unless very wierd);
this is not use for star positions, which all have the same convention
- windowIsInclusive: is the upper-right window coord included in the image?
- helpURL: URL of help file
- debug: if True, run in debug mode, which uses fake data and does not communicate with the hub.
"""
if len(defBoreXY) != 2:
raise ValueError("defBoreXY=%s must be a pair of values" % defBoreXY)
self.defBoreXY = defBoreXY
BaseFocusScript.__init__(self,
sr = sr,
gcamActor = gcamActor,
instName = instName,
imageViewerTLName = imageViewerTLName,
defRadius = defRadius,
defBinFactor = defBinFactor,
finalBinFactor = finalBinFactor,
canSetStarPos = False,
maxFindAmpl = None,
doWindow = doWindow,
windowOrigin = windowOrigin,
windowIsInclusive = windowIsInclusive,
helpURL = helpURL,
debug = debug,
)
def createSpecialWdg(self):
"""Create boresight widget(s).
"""
self.boreNameWdgSet = []
for ii in range(2):
showWdg = (self.defBoreXY[ii] is not None)
if showWdg:
defVal = float(self.defBoreXY[ii])
else:
defVal = 0.0
letter = ("X", "Y")[ii]
wdgLabel = "Boresight %s" % (letter,)
boreWdg = RO.Wdg.FloatEntry(
master = self.sr.master,
label = wdgLabel,
minValue = -60.0,
maxValue = 60.0,
defValue = defVal,
defMenu = "Default",
helpText = wdgLabel + " position",
helpURL = self.helpURL,
)
if showWdg:
self.gr.gridWdg(boreWdg.label, boreWdg, "arcsec")
self.boreNameWdgSet.append(boreWdg)
def moveBoresight(self, boreXYDeg, msgStr="Moving the boresight"):
"""Move the boresight to the specified position and set starPos accordingly.
Inputs:
- boreXYDeg: new boresight position (x, y deg)
Returns the cmdVar
Other effects:
- If self.begBoreXYDeg is None then sets it to the current value as reported by the TCC
- Sets self.currBoreXYDeg to boreXYDeg
- Shifts the amount in the star position entry fields by boreXYDeg
"""
cmdStr = "offset boresight %0.7f, %0.7f/pabs/computed" % (boreXYDeg[0], boreXYDeg[1])
# save the initial boresight position, if not already done
if self.begBoreXYDeg is None:
begBorePVTs = self.sr.getKeyVar(self.tccModel.boresight, ind=None)
if not self.sr.debug:
begBoreXYDeg = [pvt.getPos() for pvt in begBorePVTs]
if None in begBoreXYDeg:
raise self.sr.ScriptError("current boresight position unknown")
self.begBoreXYDeg = begBoreXYDeg
else:
self.begBoreXYDeg = [0.0, 0.0]
# "self.begBoreXYDeg=%r" % self.begBoreXYDeg
# move boresight and adjust star position accordingly
starXYPix = [(boreXYDeg[ii] * self.instScale[ii]) + self.instCtr[ii] for ii in range(2)]
if msgStr:
self.sr.showMsg(msgStr)
self.currBoreXYDeg = boreXYDeg
self.setStarPos(starXYPix)
return self.sr.startCmd(actor = "tcc", cmdStr = cmdStr)
def waitExtraSetup(self):
"""Executed once at the start of each run
after calling initAll and getInstInfo but before doing anything else.
Override to do things such as put the instrument into a particular mode.
"""
# set boresight and star position and shift boresight
boreXYDeg = [self.getEntryNum(wdg) / 3600.0 for wdg in self.boreNameWdgSet]
yield self.sr.waitCmdVars(self.moveBoresight(boreXYDeg))
class OffsetGuiderFocusScript(BaseFocusScript):
"""Focus script for offset guiders
"""
def __init__(self,
sr,
gcamActor,
instPos,
imageViewerTLName,
defRadius = 5.0,
defBinFactor = 1,
finalBinFactor = None,
maxFindAmpl = None,
doWindow = True,
windowOrigin = 0,
windowIsInclusive = True,
helpURL = None,
debug = False,
):
"""The setup script; run once when the script runner
window is created.
Inputs:
- gcamActor: name of guide camera actor (e.g. "dcam")
- instPos: name of instrument position (e.g. "NA2"); case doesn't matter
- imageViewerTLName: name of image viewer toplevel (e.g. "Guide.DIS Slitviewer")
- defBoreXY: default boresight position in [x, y] arcsec;
If an entry is None then no offset widget is shown for that axis
and 0 is used.
- defRadius: default centroid radius, in arcsec
- defBinFactor: default bin factor; if None then bin factor cannot be set
- finalBinFactor: the final bin factor; this is an ugly hack that works around the problem
that guiders do not provide a bin factor keyword so there's no way to tell what
bin factor the guider started out with.
None means: if you can tell what the starting bin factor is then restore that; otherwise
leave the bin factor as the it was set by this script.
- maxFindAmpl: maximum star amplitude for finding stars (peak - sky in ADUs);
if None then star finding is disabled.
- doWindow: if True, subframe images during focus sequence
- windowOrigin: index of left or lower pixel for window (0 or 1 unless very wierd);
this is not use for star positions, which all have the same convention
- windowIsInclusive: is the upper-right window coord included in the image?
- helpURL: URL of help file
- debug: if True, run in debug mode, which uses fake data and does not communicate with the hub.
"""
BaseFocusScript.__init__(self,
sr = sr,
gcamActor = gcamActor,
instName = None,
imageViewerTLName = imageViewerTLName,
defRadius = defRadius,
defBinFactor = defBinFactor,
finalBinFactor = finalBinFactor,
maxFindAmpl = maxFindAmpl,
doWindow = doWindow,
windowOrigin = windowOrigin,
windowIsInclusive = windowIsInclusive,
helpURL = helpURL,
debug = debug,
)
self.instPos = instPos
def getInstInfo(self):
"""Obtains instrument data (in this case guider data).
Verifies the correct instrument and sets these attributes:
- instScale: x,y image scale in unbinned pixels/degree
- instCtr: x,y image center in unbinned pixels
- instLim: xmin, ymin, xmax, ymax image limits, inclusive, in unbinned pixels
- arcsecPerPixel: image scale in arcsec/unbinned pixel;
average of x and y scales
Raises ScriptError if wrong instrument.
"""
if not self.sr.debug:
# Make sure current instrument is correct
try:
currInstPosName = self.sr.getKeyVar(self.tccModel.instPos)
except self.sr.ScriptError:
raise self.sr.ScriptError("current instrument position unknown")
if not currInstPosName.lower() == self.instPos.lower():
raise self.sr.ScriptError("%s is not the current instrument position (%s)!" % (self.instPos, currInstPosName))
self.instScale = self.sr.getKeyVar(self.tccModel.gimScale, ind=None)
self.instCtr = self.sr.getKeyVar(self.tccModel.gimCtr, ind=None)
self.instLim = self.sr.getKeyVar(self.tccModel.gimLim, ind=None)
else:
# data from tcc tinst:I_NA2_DIS.DAT 18-OCT-2006
self.instScale = [-12066.6, 12090.5] # unbinned pixels/deg
self.instCtr = [240, 224]
self.instLim = [0, 0, 524, 511]
self.arcsecPerPixel = 3600.0 * 2 / (abs(self.instScale[0]) + abs(self.instScale[1]))
class ImagerFocusScript(BaseFocusScript):
"""Focus script for imaging instrument.
This is like an Offset Guider but the exposure commands
are sent to the instrument actor and centroid and findstars commands
are sent to nexpose using the image just taken.
For now there is no standard way to handle windowing and binning
so each instrument must override waitExpose to use windowing.
As a result the default value of doWindow is false.
However, if the exposure command gets arguments for windowing
then this will all change.
"""
def __init__(self,
sr,
instName,
imageViewerTLName = None,
defRadius = 5.0,
defBinFactor = 1,
maxFindAmpl = None,
doWindow = False,
windowOrigin = 1,
windowIsInclusive = True,
doZeroOverscan = False,
helpURL = None,
debug = False,
):
"""The setup script; run once when the script runner
window is created.
Inputs:
- instName: name of instrument (e.g. "DIS"); must be a name known to TUI.Inst.ExposeModel.
- imageViewerTLName: name of image viewer toplevel (e.g. "Guide.DIS Slitviewer")
- defRadius: default centroid radius, in arcsec
- defBinFactor: default bin factor; if None then bin factor cannot be set
- maxFindAmpl: maximum star amplitude for finding stars (peak - sky in ADUs);
if None then star finding is disabled.
- doWindow: if True, subframe images during focus sequence
- windowOrigin: index of left or lower pixel for window (0 or 1 unless very wierd);
this is not use for star positions, which all have the same convention
- windowIsInclusive: is the upper-right window coord included in the image?
- doZeroOverscan: if True then set overscan to zero
- helpURL: URL of help file
- debug: if True, run in debug mode, which uses fake data and does not communicate with the hub.
"""
self.exposeModel = TUI.Inst.ExposeModel.getModel(instName)
gcamActor = self.exposeModel.instInfo.centroidActor
if not gcamActor:
raise RuntimeError("Instrument %r has no centroidActor")
BaseFocusScript.__init__(self,
sr = sr,
gcamActor = gcamActor,
instName = instName,
imageViewerTLName = imageViewerTLName,
defRadius = defRadius,
defBinFactor = defBinFactor,
maxFindAmpl = maxFindAmpl,
doWindow = doWindow,
windowOrigin = windowOrigin,
windowIsInclusive = windowIsInclusive,
helpURL = helpURL,
debug = debug,
)
self.doZeroOverscan = bool(doZeroOverscan)
def formatBinFactorArg(self, isFinal):
"""Return bin factor argument for expose/centroid/findstars command
Inputs:
- isFinal: if True then return parameters for final exposure
"""
binFactor = self.getBinFactor(isFinal=isFinal)
if binFactor is None:
return ""
return "bin=%d,%d" % (binFactor, binFactor)
def formatExposeArgs(self, doWindow=True, isFinal=False):
"""Format arguments for exposure command.
Inputs:
- doWindow: if true, window the exposure (if permitted)
- isFinal: if True then return parameters for final exposure
"""
retStr = BaseFocusScript.formatExposeArgs(self, doWindow=doWindow, isFinal=isFinal)
retStr += " name=%s_focus" % (self.exposeModel.instInfo.instActor,)
if self.doZeroOverscan:
retStr += " overscan=0,0"
return retStr
def initAll(self):
"""Override the default initAll to record initial bin factor, if relevant
"""
BaseFocusScript.initAll(self)
if self.exposeModel.instInfo.numBin > 0:
self.finalBinFactor = self.exposeModel.bin.getInd(0)[0]
def waitCentroid(self):
"""Take an exposure and centroid using 1x1 binning.
If the centroid is found, sets self.sr.value to the FWHM.
Otherwise sets self.sr.value to None.
"""
yield self.waitExpose()
filePath = self.sr.value
centroidCmdStr = "centroid file=%s on=%0.1f,%0.1f cradius=%0.1f" % \
(filePath, self.relStarPos[0], self.relStarPos[1], self.centroidRadPix)
yield self.sr.waitCmd(
actor = self.gcamActor,
cmdStr = centroidCmdStr,
keyVars = (self.guideModel.star,),
checkFail = False,
)
cmdVar = self.sr.value
if self.sr.debug:
starData = makeStarData("c", self.relStarPos)
else:
starData = cmdVar.getKeyVarData(self.guideModel.star)
if starData:
self.sr.value = StarMeas.fromStarKey(starData[0])
else:
self.sr.value = StarMeas()
def getExposeCmdDict(self, doWindow=True, isFinal=False):
"""Get basic command arument dict for an expose command
This includes actor, cmdStr, abortCmdStr
Inputs:
- doWindow: if true, window the exposure (if permitted)
- isFinal: if True then return parameters for final exposure
"""
return dict(
actor = self.exposeModel.actor,
cmdStr = "object " + self.formatExposeArgs(doWindow, isFinal=isFinal),
abortCmdStr = "abort",
)
def waitExpose(self, doWindow=True):
"""Take an exposure.
Return the file path of the exposure in self.sr.value.
Raise ScriptError if the exposure fails.
"""
self.sr.showMsg("Exposing for %s sec" % (self.expTime,))
basicCmdDict = self.getExposeCmdDict(doWindow)
yield self.sr.waitCmd(
keyVars = (self.exposeModel.files,),
checkFail = False,
**basicCmdDict
)
cmdVar = self.sr.value
fileInfoList = cmdVar.getKeyVarData(self.exposeModel.files)
if self.sr.debug:
fileInfoList = [("me", "localhost", "tmp", "debug", "me", "test.fits")]
if not fileInfoList:
raise self.sr.ScriptError("exposure failed")
filePath = "".join(fileInfoList[0][2:6])
self.sr.value = filePath
def waitFindStar(self):
"""Take a full-frame exposure and find the best star that can be centroided.
Set self.sr.value to StarMeas for found star.
If no star found displays a warning and sets self.sr.value to empty StarMeas.
"""
yield self.waitExpose(doWindow=False)
filePath = self.sr.value
findStarCmdStr = "findstars file=%s" % (filePath,)
self.doTakeFinalImage = True
yield self.sr.waitCmd(
actor = self.gcamActor,
cmdStr = findStarCmdStr,
keyVars = (self.guideModel.star,),
checkFail = False,
)
cmdVar = self.sr.value
if self.sr.debug:
starDataList = makeStarData("f", (50.0, 75.0))
else:
starDataList = cmdVar.getKeyVarData(self.guideModel.star)
if not starDataList:
self.sr.value = StarMeas()
self.sr.showMsg("No stars found", severity=RO.Constants.sevWarning)
return
yield self.waitFindStarInList(filePath, starDataList)
def polyfitw(x, y, w, ndegree, return_fit=False):
"""
Performs a weighted least-squares polynomial fit with optional error estimates.
Inputs:
x:
The independent variable vector.
y:
The dependent variable vector. This vector should be the same
length as X.
w:
The vector of weights. This vector should be same length as
X and Y.
ndegree:
The degree of polynomial to fit.
Outputs:
If return_fit is false (the default) then polyfitw returns only C, a vector of
coefficients of length ndegree+1.
If return_fit is true then polyfitw returns a tuple (c, yfit, yband, sigma, a)
yfit:
The vector of calculated Y's. Has an error of + or - yband.
yband:
Error estimate for each point = 1 sigma.
sigma:
The standard deviation in Y units.
a:
Correlation matrix of the coefficients.
Written by: George Lawrence, LASP, University of Colorado,
December, 1981 in IDL.
Weights added, April, 1987, G. Lawrence
Fixed bug with checking number of params, November, 1998,
Mark Rivers.
Python version, May 2002, Mark Rivers
"""
n = min(len(x), len(y)) # size = smaller of x,y
m = ndegree + 1 # number of elements in coeff vector
a = numpy.zeros((m,m), float) # least square matrix, weighted matrix
b = numpy.zeros(m, float) # will contain sum w*y*x^j
z = numpy.ones(n, float) # basis vector for constant term
a[0,0] = numpy.sum(w)
b[0] = numpy.sum(w*y)
for p in range(1, 2*ndegree+1): # power loop
z = z*x # z is now x^p
if (p < m): b[p] = numpy.sum(w*y*z) # b is sum w*y*x^j
sum = numpy.sum(w*z)
for j in range(max(0,(p-ndegree)), min(ndegree,p)+1):
a[j,p-j] = sum
a = numpy.linalg.inv(a)
c = numpy.dot(b, a)
if not return_fit:
return c # exit if only fit coefficients are wanted
# compute optional output parameters.
yfit = numpy.zeros(n, float)+c[0] # one-sigma error estimates, init
for k in range(1, ndegree +1):
yfit = yfit + c[k]*(x**k) # sum basis vectors
var = numpy.sum((yfit-y)**2 )/(n-m) # variance estimate, unbiased
sigma = numpy.sqrt(var)
yband = numpy.zeros(n, float) + a[0,0]
z = numpy.ones(n, float)
for p in range(1,2*ndegree+1): # compute correlated error estimates on y
z = z*x # z is now x^p
sum = 0.
for j in range(max(0, (p - ndegree)), min(ndegree, p)+1):
sum = sum + a[j,p-j]
yband = yband + sum * z # add in all the error sources
yband = yband*var
yband = numpy.sqrt(yband)
return c, yfit, yband, sigma, a
| bsd-3-clause |
samuelefiorini/minimal | scripts/mini_train.py | 1 | 4500 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Minimal training script.
This script performs trace-norm penalized vector-valued regression (VVR) on a
given input dataset.
"""
######################################################################
# Copyright (C) 2016 Samuele Fiorini, Annalisa Barla
#
# FreeBSD License
######################################################################
import imp
import shutil
import argparse
import os
import cPickle as pkl
import minimal as mini
import pandas as pd
def main(config_file):
"""Import configuration file and train Minimal."""
# Load the configuration file
config_path = os.path.abspath(config_file)
config = imp.load_source('mini_config', config_path)
# Extract the needed information from config
data = config.X # (n, d) data matrix
labels = config.Y # (d, T) labels matrix
tau_range = config.tau_range
minimization = config.minimization_algorithm
penalty = config.penalty
loss = config.loss
cv_split = config.cross_validation_split
print("-------------- Minimal training --------------")
print("* Data matrix:\t\t {} x {}".format(*data.shape))
print("* Labels matrix:\t {} x {}".format(*labels.shape))
print("* Loss function:\t {}".format(loss))
print("* Regularization penalty: {}".format(penalty))
print("* Minimization algorithm: {}".format(minimization))
print("* Number of tau:\t {}".format(len(tau_range)))
print("* Cross-validation splits: {}".format(cv_split))
out = mini.core.model_selection(data=data, labels=labels,
tau_range=tau_range,
algorithm=minimization,
loss=loss,
penalty=penalty,
cv_split=cv_split)
# Initialize output folder
root = config.output_root_folder
folder = os.path.join(root, '_'.join(('mini', config.exp_tag,
mini.extra.get_time())))
if not os.path.exists(folder):
os.makedirs(folder)
# Copy the configuration file into the output folder
shutil.copy(config_path, os.path.join(folder, 'mini_config.py'))
# Save the training data
dfX = pd.DataFrame(data=data, index=config.index,
columns=config.feat_names[0])
dfX.to_csv(os.path.join(folder, 'training_data'))
dfY = pd.DataFrame(data=labels, index=config.index,
columns=config.feat_names[1])
dfY.to_csv(os.path.join(folder, 'training_labels'))
# Dump results
filename = os.path.join(folder, 'results.pkl')
with open(filename, 'w') as f:
pkl.dump(out, f)
print("* Results dumped in {}".format(filename))
# Save simple cross-validation error plots
filename = os.path.join(folder, 'cv-errors')
mini.plotting.errors(results=out, cv_split=cv_split, filename=filename,
file_format=config.file_format,
context=config.plotting_context)
print("* Plot generated in {}".format(filename+'.'+config.file_format))
print("----------------------------------------------")
######################################################################
if __name__ == '__main__':
__version__ = mini.__version__
parser = argparse.ArgumentParser(description='Minimal script for '
'trace-norm penalized'
'vector-valued regression.')
parser.add_argument('-v', '--version', action='version',
version='%(prog)s v' + __version__)
parser.add_argument("-c", "--create", dest="create", action="store_true",
help="create config file", default=False)
parser.add_argument("configuration_file", help="specify config file",
default='mini_config.py')
args = parser.parse_args()
if args.create:
std_config_path = os.path.join(mini.__path__[0], 'mini_config.py')
# Check for .pyc
if std_config_path.endswith('.pyc'):
std_config_path = std_config_path[:-1]
# Check if the file already exists
if os.path.exists(args.configuration_file):
parser.error("Minimal configuration file already exists")
# Copy the config file
shutil.copy(std_config_path, args.configuration_file)
else:
main(args.configuration_file)
| bsd-2-clause |
michalnand/motoko_uprising | src/tools/filter_design/resonant_filter.py | 1 | 1483 |
from scipy import signal
import numpy
import random
import matplotlib.pyplot as plt
fs = 1000.0 #samling frequency
fc = 250.0 #resonant frequency
q = 0.95 #Q factor
a1_ = 2.0*q*numpy.cos(2*numpy.pi*fc/fs)
a2_ = -q*q
b_ = (1.0 - q*q)/2.0
b = [b_, 0.0, -b_]
a = [1.0, -a1_, -a2_]
print()
print()
print("a1 = ", round(a1_, 6))
print("a2 = ", round(a2_, 6))
print("b0 = ", round(b_, 6))
print("b1 = ", round(0.0, 6))
print("b2 = ", round(-b_, 6))
print()
print()
print("y(n) = ", end = "")
print(round(a1_, 6),"*y(n-1) + ", end = "")
print(round(a2_, 6),"*y(n-2) + ", end = "")
print(round(b_, 6) ,"*x(n) + ", end = "")
print(round(-b_, 6),"*x(n-2)", end = "")
print()
print()
w, h = signal.freqz(b, a, fs = fs)
seq_length = 100
period = seq_length//2
imp = numpy.zeros(seq_length)
for i in range(0, seq_length):
if i%period < period//2:
imp[i] = 1.0
response = signal.lfilter(b, a, imp)
response_abs = numpy.abs(response)
input_response = plt.plot(numpy.arange(0, seq_length), imp, label = "input")
filter_response = plt.plot(numpy.arange(0, seq_length), response, label = "filter output")
amplitude_response = plt.plot(numpy.arange(0, seq_length), response_abs, label = "amplitute output")
plt.margins(0.1, 0.1)
plt.xlabel('Time [samples]')
plt.ylabel('Amplitude')
plt.grid(True)
plt.legend()
plt.show()
h_abs = numpy.abs(h)
h_abs_log = 20.0*numpy.log10(h_abs)
plt.plot(w, h_abs_log)
plt.xlabel('Frequency [Hz]')
plt.ylabel('Amplitude [dB]')
plt.show()
| gpl-3.0 |
rochus/cython-odesolver-nD-shootout | shootout.py | 1 | 2043 | #!/usr/bin/env python
import os, sys, timeit, platform
# make sure the modules are compiled# {{{
try:
print("building cython modules if necessary")
assert(os.system("python setup.py build_ext -i") == 0)
except:
print("unable to build the cython modules")
sys.exit(1)
# }}}
import numpy as np
import matplotlib.pyplot as plt
def shootout():
setup = '''
from math import sin
from odesolver_%s import test
'''
stmt = '''
test(100)
'''
repeat = 10
number = 1000
methods = ['pure python', 'pure cythonized', 'sliced cython', 'no-slice cython', 'pointer cython']
imports = ['py', 'cy_pure', 'cy', 'cy_noslice', 'cy_ptr']
# time all variants
ts = np.zeros(len(imports))
for i, s in enumerate(imports):
print("running method '%s'" % methods[i])
ts[i] = min(timeit.repeat(stmt, setup % s, repeat=repeat, number=number))
# get speed comparisons
ds = ts[0] / ts
# emit hardware information and statistics
print("platform: ", platform.processor())
print("method\t\t\truntime\tspeed")
print("----------------------------------------")
for i, m in enumerate(methods):
print("%s\t\t%6.4f\t%6.4f" % (m, ts[i], ds[i]))
# show nice little figure
ind = np.arange(len(methods))
width = 0.65
fig = plt.figure()
ax = fig.add_subplot(111)
fig.suptitle("Comparison of ODE solver optimizations", fontsize=13, fontweight="bold")
ax.set_title(platform.processor())
ax.set_xlabel('Implementation', fontweight="bold")
ax.set_ylabel('Relative Speed', fontweight="bold")
bars = ax.bar(ind, ds, width, color="#00719a")
i = 0
for b in bars:
h = b.get_height()
ax.text(b.get_x() + b.get_width() / 2, 1.02 * h, '%4.2f' % ds[i], ha='center', va='bottom')
i += 1
ax.set_xticks(ind + width/2)
ax.set_xticklabels(methods, rotation=70, ha='center')
ax.axhline(1.0, color='#8a8a8a')
fig.subplots_adjust(bottom=0.25)
plt.show()
if __name__ == "__main__":
shootout()
| mit |
kmacinnis/sympy | sympy/plotting/plot.py | 3 | 62261 | """Plotting module for Sympy.
A plot is represented by the ``Plot`` class that contains a reference to the
backend and a list of the data series to be plotted. The data series are
instances of classes meant to simplify getting points and meshes from sympy
expressions. ``plot_backends`` is a dictionary with all the backends.
This module gives only the essential. For all the fancy stuff use directly
the backend. You can get the backend wrapper for every plot from the
``_backend`` attribute. Moreover the data series classes have various useful
methods like ``get_points``, ``get_segments``, ``get_meshes``, etc, that may
be useful if you wish to use another plotting library.
Especially if you need publication ready graphs and this module is not enough
for you - just get the ``_backend`` attribute and add whatever you want
directly to it. In the case of matplotlib (the common way to graph data in
python) just copy ``_backend.fig`` which is the figure and ``_backend.ax``
which is the axis and work on them as you would on any other matplotlib object.
Simplicity of code takes much greater importance than performance. Don't use it
if you care at all about performance. A new backend instance is initialized
every time you call ``show()`` and the old one is left to the garbage collector.
"""
from __future__ import print_function, division
from inspect import getargspec
from itertools import chain
from collections import Callable
import warnings
from sympy import sympify, Expr, Tuple, Dummy
from sympy.external import import_module
from sympy.utilities.decorator import doctest_depends_on
from .experimental_lambdify import (vectorized_lambdify, lambdify)
# N.B.
# When changing the minimum module version for matplotlib, please change
# the same in the `SymPyDocTestFinder`` in `sympy/utilities/runtests.py`
# Backend specific imports - textplot
from sympy.plotting.textplot import textplot
# Global variable
# Set to False when running tests / doctests so that the plots don't show.
_show = True
def unset_show():
global _show
_show = False
##############################################################################
# The public interface
##############################################################################
class Plot(object):
"""The central class of the plotting module.
For interactive work the function ``plot`` is better suited.
This class permits the plotting of sympy expressions using numerous
backends (matplotlib, textplot, the old pyglet module for sympy, Google
charts api, etc).
The figure can contain an arbitrary number of plots of sympy expressions,
lists of coordinates of points, etc. Plot has a private attribute _series that
contains all data series to be plotted (expressions for lines or surfaces,
lists of points, etc (all subclasses of BaseSeries)). Those data series are
instances of classes not imported by ``from sympy import *``.
The customization of the figure is on two levels. Global options that
concern the figure as a whole (eg title, xlabel, scale, etc) and
per-data series options (eg name) and aesthetics (eg. color, point shape,
line type, etc.).
The difference between options and aesthetics is that an aesthetic can be
a function of the coordinates (or parameters in a parametric plot). The
supported values for an aesthetic are:
- None (the backend uses default values)
- a constant
- a function of one variable (the first coordinate or parameter)
- a function of two variables (the first and second coordinate or
parameters)
- a function of three variables (only in nonparametric 3D plots)
Their implementation depends on the backend so they may not work in some
backends.
If the plot is parametric and the arity of the aesthetic function permits
it the aesthetic is calculated over parameters and not over coordinates.
If the arity does not permit calculation over parameters the calculation is
done over coordinates.
Only cartesian coordinates are supported for the moment, but you can use
the parametric plots to plot in polar, spherical and cylindrical
coordinates.
The arguments for the constructor Plot must be subclasses of BaseSeries.
Any global option can be specified as a keyword argument.
The global options for a figure are:
- title : str
- xlabel : str
- ylabel : str
- legend : bool
- xscale : {'linear', 'log'}
- yscale : {'linear', 'log'}
- axis : bool
- axis_center : tuple of two floats or {'center', 'auto'}
- xlim : tuple of two floats
- ylim : tuple of two floats
- aspect_ratio : tuple of two floats or {'auto'}
- autoscale : bool
- margin : float in [0, 1]
The per data series options and aesthetics are:
There are none in the base series. See below for options for subclasses.
Some data series support additional aesthetics or options:
ListSeries, LineOver1DRangeSeries, Parametric2DLineSeries,
Parametric3DLineSeries support the following:
Aesthetics:
- line_color : function which returns a float.
options:
- label : str
- steps : bool
- integers_only : bool
SurfaceOver2DRangeSeries, ParametricSurfaceSeries support the following:
aesthetics:
- surface_color : function which returns a float.
"""
def __init__(self, *args, **kwargs):
super(Plot, self).__init__()
# Options for the graph as a whole.
# The possible values for each option are described in the docstring of
# Plot. They are based purely on convention, no checking is done.
self.title = None
self.xlabel = None
self.ylabel = None
self.aspect_ratio = 'auto'
self.xlim = None
self.ylim = None
self.axis_center = 'auto'
self.axis = True
self.xscale = 'linear'
self.yscale = 'linear'
self.legend = False
self.autoscale = True
self.margin = 0
# Contains the data objects to be plotted. The backend should be smart
# enough to iterate over this list.
self._series = []
self._series.extend(args)
# The backend type. On every show() a new backend instance is created
# in self._backend which is tightly coupled to the Plot instance
# (thanks to the parent attribute of the backend).
self.backend = DefaultBackend
# The keyword arguments should only contain options for the plot.
for key, val in kwargs.items():
if hasattr(self, key):
setattr(self, key, val)
def show(self):
# TODO move this to the backend (also for save)
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.show()
def save(self, path):
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.save(path)
def __str__(self):
series_strs = [('[%d]: ' % i) + str(s)
for i, s in enumerate(self._series)]
return 'Plot object containing:\n' + '\n'.join(series_strs)
def __getitem__(self, index):
return self._series[index]
def __setitem__(self, index, *args):
if len(args) == 1 and isinstance(args[0], BaseSeries):
self._series[index] = args
def __delitem__(self, index):
del self._series[index]
def append(self, *args):
"""Adds one more graph to the figure."""
if len(args) == 1 and isinstance(args[0], BaseSeries):
self._series.append(*args)
else:
self._series.append(Series(*args))
def extend(self, arg):
"""Adds the series from another plot or a list of series."""
if isinstance(arg, Plot):
self._series.extend(arg._series)
else:
self._series.extend(arg)
##############################################################################
# Data Series
##############################################################################
#TODO more general way to calculate aesthetics (see get_color_array)
### The base class for all series
class BaseSeries(object):
"""Base class for the data objects containing stuff to be plotted.
The backend should check if it supports the data series that it's given.
(eg TextBackend supports only LineOver1DRange).
It's the backend responsibility to know how to use the class of
data series that it's given.
Some data series classes are grouped (using a class attribute like is_2Dline)
according to the api they present (based only on convention). The backend is
not obliged to use that api (eg. The LineOver1DRange belongs to the
is_2Dline group and presents the get_points method, but the
TextBackend does not use the get_points method).
"""
# Some flags follow. The rationale for using flags instead of checking base
# classes is that setting multiple flags is simpler than multiple
# inheritance.
is_2Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dsurface = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_contour = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_implicit = False
# Some of the backends expect:
# - get_meshes returning mesh_x (1D array), mesh_y(1D array,
# mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
#Different from is_contour as the colormap in backend will be
#different
is_parametric = False
# The calculation of aesthetics expects:
# - get_parameter_points returning one or two np.arrays (1D or 2D)
# used for calculation aesthetics
def __init__(self):
super(BaseSeries, self).__init__()
@property
def is_3D(self):
flags3D = [
self.is_3Dline,
self.is_3Dsurface
]
return any(flags3D)
@property
def is_line(self):
flagslines = [
self.is_2Dline,
self.is_3Dline
]
return any(flagslines)
### 2D lines
class Line2DBaseSeries(BaseSeries):
"""A base class for 2D lines.
- adding the label, steps and only_integers options
- making is_2Dline true
- defining get_segments and get_color_array
"""
is_2Dline = True
_dim = 2
def __init__(self):
super(Line2DBaseSeries, self).__init__()
self.label = None
self.steps = False
self.only_integers = False
self.line_color = None
def get_segments(self):
np = import_module('numpy')
points = self.get_points()
if self.steps is True:
x = np.array((points[0], points[0])).T.flatten()[1:]
y = np.array((points[1], points[1])).T.flatten()[:-1]
points = (x, y)
points = np.ma.array(points).T.reshape(-1, 1, self._dim)
return np.ma.concatenate([points[:-1], points[1:]], axis=1)
def get_color_array(self):
np = import_module('numpy')
c = self.line_color
if hasattr(c, '__call__'):
f = np.vectorize(c)
arity = len(getargspec(c)[0])
if arity == 1 and self.is_parametric:
x = self.get_parameter_points()
return f(centers_of_segments(x))
else:
variables = list(map(centers_of_segments, self.get_points()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables[:2])
else: # only if the line is 3D (otherwise raises an error)
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class List2DSeries(Line2DBaseSeries):
"""Representation for a line consisting of list of points."""
def __init__(self, list_x, list_y):
np = import_module('numpy')
super(List2DSeries, self).__init__()
self.list_x = np.array(list_x)
self.list_y = np.array(list_y)
self.label = 'list'
def __str__(self):
return 'list plot'
def get_points(self):
return (self.list_x, self.list_y)
class LineOver1DRangeSeries(Line2DBaseSeries):
"""Representation for a line consisting of a sympy expression over a range."""
def __init__(self, expr, var_start_end, **kwargs):
super(LineOver1DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.label = str(self.expr)
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'cartesian line: %s for %s over %s' % (
str(self.expr), str(self.var), str((self.start, self.end)))
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if self.only_integers or not self.adaptive:
return super(LineOver1DRangeSeries, self).get_segments()
else:
f = lambdify([self.var], self.expr)
list_segments = []
def sample(p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
np = import_module('numpy')
#Randomly sample to avoid aliasing.
random = 0.45 + np.random.rand() * 0.1
xnew = p[0] + random * (q[0] - p[0])
ynew = f(xnew)
new_point = np.array([xnew, ynew])
#Maximum depth
if depth > self.depth:
list_segments.append([p, q])
#Sample irrespective of whether the line is flat till the
#depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
#Sample ten points if complex values are encountered
#at both ends. If there is a real value in between, then
#sample those points further.
elif p[1] is None and q[1] is None:
xarray = np.linspace(p[0], q[0], 10)
yarray = list(map(f, xarray))
if any(y is not None for y in yarray):
for i in len(yarray) - 1:
if yarray[i] is not None or yarray[i + 1] is not None:
sample([xarray[i], yarray[i]],
[xarray[i + 1], yarray[i + 1]], depth + 1)
#Sample further if one of the end points in None( i.e. a complex
#value) or the three points are not almost collinear.
elif (p[1] is None or q[1] is None or new_point[1] is None
or not flat(p, new_point, q)):
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start = f(self.start)
f_end = f(self.end)
sample([self.start, f_start], [self.end, f_end], 0)
return list_segments
def get_points(self):
np = import_module('numpy')
if self.only_integers is True:
list_x = np.linspace(int(self.start), int(self.end),
num=int(self.end) - int(self.start) + 1)
else:
list_x = np.linspace(self.start, self.end, num=self.nb_of_points)
f = vectorized_lambdify([self.var], self.expr)
list_y = f(list_x)
return (list_x, list_y)
class Parametric2DLineSeries(Line2DBaseSeries):
"""Representation for a line consisting of two parametric sympy expressions
over a range."""
is_parametric = True
def __init__(self, expr_x, expr_y, var_start_end, **kwargs):
super(Parametric2DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'parametric cartesian line: (%s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.var),
str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
list_x = fx(param)
list_y = fy(param)
return (list_x, list_y)
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if not self.adaptive:
return super(Parametric2DLineSeries, self).get_segments()
f_x = lambdify([self.var], self.expr_x)
f_y = lambdify([self.var], self.expr_y)
list_segments = []
def sample(param_p, param_q, p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
#Randomly sample to avoid aliasing.
np = import_module('numpy')
random = 0.45 + np.random.rand() * 0.1
param_new = param_p + random * (param_q - param_p)
xnew = f_x(param_new)
ynew = f_y(param_new)
new_point = np.array([xnew, ynew])
#Maximum depth
if depth > self.depth:
list_segments.append([p, q])
#Sample irrespective of whether the line is flat till the
#depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
#Sample ten points if complex values are encountered
#at both ends. If there is a real value in between, then
#sample those points further.
elif ((p[0] is None and q[1] is None) or
(p[1] is None and q[1] is None)):
param_array = np.linspace(param_p, param_q, 10)
x_array = list(map(f_x, param_array))
y_array = list(map(f_y, param_array))
if any(x is not None and y is not None
for x, y in zip(x_array, y_array)):
for i in len(y_array) - 1:
if ((x_array[i] is not None and y_array[i] is not None) or
(x_array[i + 1] is not None and y_array[i + 1] is not None)):
point_a = [x_array[i], y_array[i]]
point_b = [x_array[i + 1], y_array[i + 1]]
sample(param_array[i], param_array[i], point_a,
point_b, depth + 1)
#Sample further if one of the end points in None( ie a complex
#value) or the three points are not almost collinear.
elif (p[0] is None or p[1] is None
or q[1] is None or q[0] is None
or not flat(p, new_point, q)):
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start_x = f_x(self.start)
f_start_y = f_y(self.start)
start = [f_start_x, f_start_y]
f_end_x = f_x(self.end)
f_end_y = f_y(self.end)
end = [f_end_x, f_end_y]
sample(self.start, self.end, start, end, 0)
return list_segments
### 3D lines
class Line3DBaseSeries(Line2DBaseSeries):
"""A base class for 3D lines.
Most of the stuff is derived from Line2DBaseSeries."""
is_2Dline = False
is_3Dline = True
_dim = 3
def __init__(self):
super(Line3DBaseSeries, self).__init__()
class Parametric3DLineSeries(Line3DBaseSeries):
"""Representation for a 3D line consisting of two parametric sympy
expressions and a range."""
def __init__(self, expr_x, expr_y, expr_z, var_start_end, **kwargs):
super(Parametric3DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return '3D parametric cartesian line: (%s, %s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.expr_z),
str(self.var), str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
fz = vectorized_lambdify([self.var], self.expr_z)
list_x = fx(param)
list_y = fy(param)
list_z = fz(param)
return (list_x, list_y, list_z)
### Surfaces
class SurfaceBaseSeries(BaseSeries):
"""A base class for 3D surfaces."""
is_3Dsurface = True
def __init__(self):
super(SurfaceBaseSeries, self).__init__()
self.surface_color = None
def get_color_array(self):
np = import_module('numpy')
c = self.surface_color
if isinstance(c, Callable):
f = np.vectorize(c)
arity = len(getargspec(c)[0])
if self.is_parametric:
variables = list(map(centers_of_faces, self.get_parameter_meshes()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables)
variables = list(map(centers_of_faces, self.get_meshes()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables[:2])
else:
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class SurfaceOver2DRangeSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of a sympy expression and 2D
range."""
def __init__(self, expr, var_start_end_x, var_start_end_y, **kwargs):
super(SurfaceOver2DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.nb_of_points_x = kwargs.get('nb_of_points_x', 50)
self.nb_of_points_y = kwargs.get('nb_of_points_y', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('cartesian surface: %s for'
' %s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
class ParametricSurfaceSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of three parametric sympy
expressions and a range."""
is_parametric = True
def __init__(
self, expr_x, expr_y, expr_z, var_start_end_u, var_start_end_v,
**kwargs):
super(ParametricSurfaceSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.var_u = sympify(var_start_end_u[0])
self.start_u = float(var_start_end_u[1])
self.end_u = float(var_start_end_u[2])
self.var_v = sympify(var_start_end_v[0])
self.start_v = float(var_start_end_v[1])
self.end_v = float(var_start_end_v[2])
self.nb_of_points_u = kwargs.get('nb_of_points_u', 50)
self.nb_of_points_v = kwargs.get('nb_of_points_v', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('parametric cartesian surface: (%s, %s, %s) for'
' %s over %s and %s over %s') % (
str(self.expr_x),
str(self.expr_y),
str(self.expr_z),
str(self.var_u),
str((self.start_u, self.end_u)),
str(self.var_v),
str((self.start_v, self.end_v)))
def get_parameter_meshes(self):
np = import_module('numpy')
return np.meshgrid(np.linspace(self.start_u, self.end_u,
num=self.nb_of_points_u),
np.linspace(self.start_v, self.end_v,
num=self.nb_of_points_v))
def get_meshes(self):
mesh_u, mesh_v = self.get_parameter_meshes()
fx = vectorized_lambdify((self.var_u, self.var_v), self.expr_x)
fy = vectorized_lambdify((self.var_u, self.var_v), self.expr_y)
fz = vectorized_lambdify((self.var_u, self.var_v), self.expr_z)
return (fx(mesh_u, mesh_v), fy(mesh_u, mesh_v), fz(mesh_u, mesh_v))
### Contours
class ContourSeries(BaseSeries):
"""Representation for a contour plot."""
#The code is mostly repetition of SurfaceOver2DRange.
#XXX: Presently not used in any of those functions.
#XXX: Add contour plot and use this seties.
is_contour = True
def __init__(self, expr, var_start_end_x, var_start_end_y):
super(ContourSeries, self).__init__()
self.nb_of_points_x = 50
self.nb_of_points_y = 50
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.get_points = self.get_meshes
def __str__(self):
return ('contour: %s for '
'%s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
##############################################################################
# Backends
##############################################################################
class BaseBackend(object):
def __init__(self, parent):
super(BaseBackend, self).__init__()
self.parent = parent
## don't have to check for the success of importing matplotlib in each case;
## we will only be using this backend if we can successfully import matploblib
class MatplotlibBackend(BaseBackend):
def __init__(self, parent):
super(MatplotlibBackend, self).__init__(parent)
are_3D = [s.is_3D for s in self.parent._series]
self.matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['pyplot', 'cm', 'collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
self.plt = self.matplotlib.pyplot
self.cm = self.matplotlib.cm
self.LineCollection = self.matplotlib.collections.LineCollection
if any(are_3D) and not all(are_3D):
raise ValueError('The matplotlib backend can not mix 2D and 3D.')
elif not any(are_3D):
self.fig = self.plt.figure()
self.ax = self.fig.add_subplot(111)
self.ax.spines['left'].set_position('zero')
self.ax.spines['right'].set_color('none')
self.ax.spines['bottom'].set_position('zero')
self.ax.spines['top'].set_color('none')
self.ax.spines['left'].set_smart_bounds(True)
self.ax.spines['bottom'].set_smart_bounds(True)
self.ax.xaxis.set_ticks_position('bottom')
self.ax.yaxis.set_ticks_position('left')
elif all(are_3D):
## mpl_toolkits.mplot3d is necessary for
## projection='3d'
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
self.fig = self.plt.figure()
self.ax = self.fig.add_subplot(111, projection='3d')
def process_series(self):
parent = self.parent
for s in self.parent._series:
# Create the collections
if s.is_2Dline:
collection = self.LineCollection(s.get_segments())
self.ax.add_collection(collection)
elif s.is_contour:
self.ax.contour(*s.get_meshes())
elif s.is_3Dline:
# TODO too complicated, I blame matplotlib
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
art3d = mpl_toolkits.mplot3d.art3d
collection = art3d.Line3DCollection(s.get_segments())
self.ax.add_collection(collection)
x, y, z = s.get_points()
self.ax.set_xlim((min(x), max(x)))
self.ax.set_ylim((min(y), max(y)))
self.ax.set_zlim((min(z), max(z)))
elif s.is_3Dsurface:
x, y, z = s.get_meshes()
collection = self.ax.plot_surface(x, y, z, cmap=self.cm.jet,
rstride=1, cstride=1,
linewidth=0.1)
elif s.is_implicit:
#Smart bounds have to be set to False for implicit plots.
self.ax.spines['left'].set_smart_bounds(False)
self.ax.spines['bottom'].set_smart_bounds(False)
points = s.get_raster()
if len(points) == 2:
#interval math plotting
x, y = _matplotlib_list(points[0])
self.ax.fill(x, y, facecolor='b', edgecolor='None' )
else:
# use contourf or contour depending on whether it is
# an inequality or equality.
#XXX: ``contour`` plots multiple lines. Should be fixed.
ListedColormap = self.matplotlib.colors.ListedColormap
colormap = ListedColormap(["white", "blue"])
xarray, yarray, zarray, plot_type = points
if plot_type == 'contour':
self.ax.contour(xarray, yarray, zarray,
contours=(0, 0), fill=False, cmap=colormap)
else:
self.ax.contourf(xarray, yarray, zarray, cmap=colormap)
else:
raise ValueError('The matplotlib backend supports only '
'is_2Dline, is_3Dline, is_3Dsurface and '
'is_contour objects.')
# Customise the collections with the corresponding per-series
# options.
if hasattr(s, 'label'):
collection.set_label(s.label)
if s.is_line and s.line_color:
if isinstance(s.line_color, (float, int)) or isinstance(s.line_color, Callable):
color_array = s.get_color_array()
collection.set_array(color_array)
else:
collection.set_color(s.line_color)
if s.is_3Dsurface and s.surface_color:
if self.matplotlib.__version__ < "1.2.0": # TODO in the distant future remove this check
warnings.warn('The version of matplotlib is too old to use surface coloring.')
elif isinstance(s.surface_color, (float, int)) or isinstance(s.surface_color, Callable):
color_array = s.get_color_array()
color_array = color_array.reshape(color_array.size)
collection.set_array(color_array)
else:
collection.set_color(s.surface_color)
# Set global options.
# TODO The 3D stuff
# XXX The order of those is important.
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
Axes3D = mpl_toolkits.mplot3d.Axes3D
if parent.xscale and not isinstance(self.ax, Axes3D):
self.ax.set_xscale(parent.xscale)
if parent.yscale and not isinstance(self.ax, Axes3D):
self.ax.set_yscale(parent.yscale)
if parent.xlim:
self.ax.set_xlim(parent.xlim)
if parent.ylim:
self.ax.set_ylim(parent.ylim)
if not isinstance(self.ax, Axes3D) or self.matplotlib.__version__ >= '1.2.0': # XXX in the distant future remove this check
self.ax.set_autoscale_on(parent.autoscale)
if parent.axis_center:
val = parent.axis_center
if isinstance(self.ax, Axes3D):
pass
elif val == 'center':
self.ax.spines['left'].set_position('center')
self.ax.spines['bottom'].set_position('center')
elif val == 'auto':
xl, xh = self.ax.get_xlim()
yl, yh = self.ax.get_ylim()
pos_left = ('data', 0) if xl*xh <= 0 else 'center'
pos_bottom = ('data', 0) if yl*yh <= 0 else 'center'
self.ax.spines['left'].set_position(pos_left)
self.ax.spines['bottom'].set_position(pos_bottom)
else:
self.ax.spines['left'].set_position(('data', val[0]))
self.ax.spines['bottom'].set_position(('data', val[1]))
if not parent.axis:
self.ax.set_axis_off()
if parent.legend:
self.ax.legend()
self.ax.legend_.set_visible(parent.legend)
if parent.margin:
self.ax.set_xmargin(parent.margin)
self.ax.set_ymargin(parent.margin)
if parent.title:
self.ax.set_title(parent.title)
if parent.xlabel:
self.ax.set_xlabel(parent.xlabel, position=(1, 0))
if parent.ylabel:
self.ax.set_ylabel(parent.ylabel, position=(0, 1))
def show(self):
self.process_series()
#TODO after fixing https://github.com/ipython/ipython/issues/1255
# you can uncomment the next line and remove the pyplot.show() call
#self.fig.show()
if _show:
self.plt.show()
def save(self, path):
self.process_series()
self.fig.savefig(path)
def close(self):
self.plt.close(self.fig)
class TextBackend(BaseBackend):
def __init__(self, parent):
super(TextBackend, self).__init__(parent)
def show(self):
if len(self.parent._series) != 1:
raise ValueError(
'The TextBackend supports only one graph per Plot.')
elif not isinstance(self.parent._series[0], LineOver1DRangeSeries):
raise ValueError(
'The TextBackend supports only expressions over a 1D range')
else:
ser = self.parent._series[0]
textplot(ser.expr, ser.start, ser.end)
def close(self):
pass
class DefaultBackend(BaseBackend):
def __new__(cls, parent):
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
return MatplotlibBackend(parent)
else:
return TextBackend(parent)
plot_backends = {
'matplotlib': MatplotlibBackend,
'text': TextBackend,
'default': DefaultBackend
}
##############################################################################
# Finding the centers of line segments or mesh faces
##############################################################################
def centers_of_segments(array):
np = import_module('numpy')
return np.average(np.vstack((array[:-1], array[1:])), 0)
def centers_of_faces(array):
np = import_module('numpy')
return np.average(np.dstack((array[:-1, :-1],
array[1:, :-1],
array[:-1, 1: ],
array[:-1, :-1],
)), 2)
def flat(x, y, z, eps=1e-3):
"""Checks whether three points are almost collinear"""
np = import_module('numpy')
vector_a = x - y
vector_b = z - y
dot_product = np.dot(vector_a, vector_b)
vector_a_norm = np.linalg.norm(vector_a)
vector_b_norm = np.linalg.norm(vector_b)
cos_theta = dot_product / (vector_a_norm * vector_b_norm)
return abs(cos_theta + 1) < eps
def _matplotlib_list(interval_list):
"""
Returns lists for matplotlib ``fill`` command from a list of bounding
rectangular intervals
"""
xlist = []
ylist = []
if len(interval_list):
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
xlist.extend([intervalx.start, intervalx.start,
intervalx.end, intervalx.end, None])
ylist.extend([intervaly.start, intervaly.end,
intervaly.end, intervaly.start, None])
else:
#XXX Ugly hack. Matplotlib does not accept empty lists for ``fill``
xlist.extend([None, None, None, None])
ylist.extend([None, None, None, None])
return xlist, ylist
####New API for plotting module ####
# TODO: Add color arrays for plots.
# TODO: Add more plotting options for 3d plots.
# TODO: Adaptive sampling for 3D plots.
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot(*args, **kwargs):
"""
Plots a function of a single variable and returns an instance of
the ``Plot`` class (also, see the description of the
``show`` keyword argument below).
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single Plot
``plot(expr, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot(expr1, expr2, ..., range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot((expr1, range), (expr2, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function of single variable
``range``: (x, 0, 5), A 3-tuple denoting the range of the free variable.
Keyword Arguments
=================
Arguments for ``plot`` function:
``show``: Boolean. The default value is set to ``True``. Set show to
``False`` and the function will not display the plot. The returned
instance of the ``Plot`` class can then be used to save or display
the plot by calling the ``save()`` and ``show()`` methods
respectively.
Arguments for ``LineOver1DRangeSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to False and
specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of value ``n``
samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The function
is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics options:
``line_color``: float. Specifies the color for the plot.
See ``Plot`` to see how to set color for the plots.
If there are multiple plots, then the same series series are applied to
all the plots. If you want to set these options separately, you can index
the ``Plot`` object returned and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot. It is set to the latex representation of
the expression, if the plot has only one expression.
``xlabel`` : str. Label for the x - axis.
``ylabel`` : str. Label for the y - axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x - axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y - axis.
``axis_center``: tuple of two floats denoting the coordinates of the center or
{'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x - axis limits.
``ylim`` : tuple of two floats, denoting the y - axis limits.
Examples
========
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
Single Plot
>>> plot(x**2, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x**2 for x over (-5.0, 5.0)
Multiple plots with single range.
>>> plot(x, x**2, x**3, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x for x over (-5.0, 5.0)
[1]: cartesian line: x**2 for x over (-5.0, 5.0)
[2]: cartesian line: x**3 for x over (-5.0, 5.0)
Multiple plots with different ranges.
>>> plot((x**2, (x, -6, 6)), (x, (x, -5, 5)))
Plot object containing:
[0]: cartesian line: x**2 for x over (-6.0, 6.0)
[1]: cartesian line: x for x over (-5.0, 5.0)
No adaptive sampling.
>>> plot(x**2, adaptive=False, nb_of_points=400)
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
See Also
========
Plot, LineOver1DRangeSeries.
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 1)
series = [LineOver1DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot_parametric(*args, **kwargs):
"""
Plots a 2D parametric plot.
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single plot.
``plot_parametric(expr_x, expr_y, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot_parametric((expr1_x, expr1_y), (expr2_x, expr2_y), range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot_parametric((expr_x, expr_y, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``range``: (u, 0, 5), A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric2DLineSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to
False and specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of
value ``n`` samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The
function is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics
----------
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same Series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``xlabel`` : str. Label for the x - axis.
``ylabel`` : str. Label for the y - axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x - axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y - axis.
``axis_center``: tuple of two floats denoting the coordinates of the center
or {'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x - axis limits.
``ylim`` : tuple of two floats, denoting the y - axis limits.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot_parametric
>>> u = symbols('u')
Single Parametric plot
>>> plot_parametric(cos(u), sin(u), (u, -5, 5))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
Multiple parametric plot with single range.
>>> plot_parametric((cos(u), sin(u)), (u, cos(u)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-10.0, 10.0)
[1]: parametric cartesian line: (u, cos(u)) for u over (-10.0, 10.0)
Multiple parametric plots.
>>> plot_parametric((cos(u), sin(u), (u, -5, 5)),
... (cos(u), u, (u, -5, 5)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
[1]: parametric cartesian line: (cos(u), u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric2DLineSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 2, 1)
series = [Parametric2DLineSeries(*arg) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d_parametric_line(*args, **kwargs):
"""
Plots a 3D parametric line plot.
Usage
=====
Single plot:
``plot3d_parametric_line(expr_x, expr_y, expr_z, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_line((expr_x, expr_y, expr_z, range), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``expr_z`` : Expression representing the function along z.
``range``: ``(u, 0, 5)``, A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric3DLineSeries`` class.
``nb_of_points``: The range is uniformly sampled at ``nb_of_points``
number of points.
Aesthetics:
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class.
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_line
>>> u = symbols('u')
Single plot.
>>> plot3d_parametric_line(cos(u), sin(u), u, (u, -5, 5))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
Multiple plots.
>>> plot3d_parametric_line((cos(u), sin(u), u, (u, -5, 5)),
... (sin(u), u**2, u, (u, -5, 5)))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
[1]: 3D parametric cartesian line: (sin(u), u**2, u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric3DLineSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 1)
series = [Parametric3DLineSeries(*arg) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d(*args, **kwargs):
"""
Plots a 3D surface plot.
Usage
=====
Single plot
``plot3d(expr, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plot with the same range.
``plot3d(expr1, expr2, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot3d((expr1, range_x, range_y), (expr2, range_x, range_y), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function along x.
``range_x``: (x, 0, 5), A 3-tuple denoting the range of the x
variable.
``range_y``: (y, 0, 5), A 3-tuple denoting the range of the y
variable.
Keyword Arguments
=================
Arguments for ``SurfaceOver2DRangeSeries`` class:
``nb_of_points_x``: int. The x range is sampled uniformly at
``nb_of_points_x`` of points.
``nb_of_points_y``: int. The y range is sampled uniformly at
``nb_of_points_y`` of points.
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols
>>> from sympy.plotting import plot3d
>>> x, y = symbols('x y')
Single plot
>>> plot3d(x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with same range
>>> plot3d(x*y, -x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: -x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with different ranges.
>>> plot3d((x**2 + y**2, (x, -5, 5), (y, -5, 5)),
... (x*y, (x, -3, 3), (y, -3, 3)))
Plot object containing:
[0]: cartesian surface: x**2 + y**2 for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: x*y for x over (-3.0, 3.0) and y over (-3.0, 3.0)
See Also
========
Plot, SurfaceOver2DRangeSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 2)
series = [SurfaceOver2DRangeSeries(*arg) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d_parametric_surface(*args, **kwargs):
"""
Plots a 3D parametric surface plot.
Usage
=====
Single plot.
``plot3d_parametric_surface(expr_x, expr_y, expr_z, range_u, range_v, **kwargs)``
If the ranges is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_surface((expr_x, expr_y, expr_z, range_u, range_v), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x``: Expression representing the function along ``x``.
``expr_y``: Expression representing the function along ``y``.
``expr_z``: Expression representing the function along ``z``.
``range_u``: ``(u, 0, 5)``, A 3-tuple denoting the range of the ``u``
variable.
``range_v``: ``(v, 0, 5)``, A 3-tuple denoting the range of the v
variable.
Keyword Arguments
=================
Arguments for ``ParametricSurfaceSeries`` class:
``nb_of_points_u``: int. The ``u`` range is sampled uniformly at
``nb_of_points_v`` of points
``nb_of_points_y``: int. The ``v`` range is sampled uniformly at
``nb_of_points_y`` of points
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied for
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_surface
>>> u, v = symbols('u v')
Single plot.
>>> plot3d_parametric_surface(cos(u + v), sin(u - v), u - v,
... (u, -5, 5), (v, -5, 5))
Plot object containing:
[0]: parametric cartesian surface: (cos(u + v), sin(u - v), u - v) for u over (-5.0, 5.0) and v over (-5.0, 5.0)
See Also
========
Plot, ParametricSurfaceSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 2)
series = [ParametricSurfaceSeries(*arg) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def check_arguments(args, expr_len, nb_of_free_symbols):
"""
Checks the arguments and converts into tuples of the
form (exprs, ranges)
>>> from sympy import plot, cos, sin, symbols
>>> from sympy.plotting.plot import check_arguments
>>> x,y,u,v = symbols('x y u v')
>>> check_arguments([cos(x), sin(x)], 2, 1)
[(cos(x), sin(x), (x, -10, 10))]
>>> check_arguments([x, x**2], 1, 1)
[(x, (x, -10, 10)), (x**2, (x, -10, 10))]
"""
if expr_len > 1 and isinstance(args[0], Expr):
# Multiple expressions same range.
# The arguments are tuples when the expression length is
# greater than 1.
assert len(args) >= expr_len
for i in range(len(args)):
if isinstance(args[i], Tuple):
break
else:
i = len(args) + 1
exprs = Tuple(*args[:i])
free_symbols = list(set.union(*[e.free_symbols for e in exprs]))
if len(args) == expr_len + nb_of_free_symbols:
#Ranges given
plots = [exprs + Tuple(*args[expr_len:])]
else:
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
plots = [exprs + Tuple(*ranges)]
return plots
if isinstance(args[0], Expr) or (isinstance(args[0], Tuple) and
len(args[0]) == expr_len and
expr_len != 3):
# Cannot handle expressions with number of expression = 3. It is
# not possible to differentiate between expressions and ranges.
#Series of plots with same range
for i in range(len(args)):
if isinstance(args[i], Tuple) and len(args[i]) != expr_len:
break
if not isinstance(args[i], Tuple):
args[i] = Tuple(args[i])
else:
i = len(args) + 1
exprs = args[:i]
assert all(isinstance(e, Expr) for expr in exprs for e in expr)
free_symbols = list(set.union(*[e.free_symbols for expr in exprs
for e in expr]))
if len(free_symbols) > nb_of_free_symbols:
raise ValueError("The number of free_symbols in the expression "
"is greater than %d" % nb_of_free_symbols)
if len(args) == i + nb_of_free_symbols and isinstance(args[i], Tuple):
ranges = Tuple(*[range_expr for range_expr in args[
i:i + nb_of_free_symbols]])
plots = [expr + ranges for expr in exprs]
return plots
else:
#Use default ranges.
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
ranges = Tuple(*ranges)
plots = [expr + ranges for expr in exprs]
return plots
elif isinstance(args[0], Tuple) and len(args[0]) == expr_len + nb_of_free_symbols:
#Multiple plots with different ranges.
for arg in args:
for i in range(expr_len):
if not isinstance(arg[i], Expr):
raise ValueError("Expected an expression, given %s" %
str(arg[i]))
for i in range(nb_of_free_symbols):
if not len(arg[i + expr_len]) == 3:
raise ValueError("The ranges should be a tuple of "
"length 3, got %s" % str(arg[i + expr_len]))
return args
| bsd-3-clause |
google/gps_building_blocks | py/gps_building_blocks/ml/preprocessing/keyword_clustering.py | 1 | 5137 | # Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to cluster words/phrase/sentences using embedding."""
from typing import List, Optional, Text, Tuple
import importlib_resources
import numpy as np
from numpy import linalg
import pandas as pd
from sklearn import cluster
import tensorflow as tf
import tensorflow_hub as hub
from gps_building_blocks.ml.preprocessing import data as preprocess_data
class KeywordClustering(object):
"""Class to cluster text using embeddings of word/phrase or sentences."""
def __init__(self,
model: Optional[tf.keras.Model] = None,
stopwords: Optional[List[Text]] = None) -> None:
"""Initialize embed model and list of stopwords.
Args:
model: Pretrained model object for text embedding.
All pre trained tf embeddings:
https://tfhub.dev/s?module-type=text-embedding
stopwords: Stopwords to remove from embedding.
Attributes:
k_means: cluster.KMeans object used to cluster keywords.
"""
if model is None:
self.model = hub.load("https://tfhub.dev/google/nnlm-en-dim50/2")
else:
self.model = model
if stopwords is None:
stopwords_default = importlib_resources.read_text(preprocess_data,
"stopwords_eng.txt")
stopwords_default = stopwords_default.split("\n")[1:]
self.stopwords_to_remove = list(
filter(lambda word: word, stopwords_default))
else:
self.stopwords_to_remove = stopwords
self.k_means = cluster.KMeans
def extract_embedding(self, phrase: str) -> np.ndarray:
"""Extracts embedding of phrase using pretrained embedding model.
Args:
phrase: Word, phrase or sentence input for embedding model.
Returns:
Array of embedding for each word in phrase.
"""
phrase_input = [
i.lower()
for i in phrase.split(" ")
if i not in self.stopwords_to_remove
]
embed_phrase = self.model(phrase_input).numpy()
return embed_phrase
def get_average_embedding(self,
phrase_embed: np.ndarray) -> np.ndarray:
"""Calculates average embedding from embeddings of each word.
Args:
phrase_embed: Array of each word's embedding in phrase, output from
extract_embedding.
Returns:
Array mean of word (phrase).
"""
return np.mean(phrase_embed, axis=0)
def cluster_keywords(
self,
data: Optional[pd.DataFrame] = None,
colname_real: Optional[str] = None,
colname_mean_embed: Optional[str] = None,
n_clusters: Optional[int] = None,
num_of_closest_words: int = 2) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Clusters words using K-Means into num_clusters clusters.
Args:
data: A pd.DataFrame with words and average embedding.
colname_real: Column name for column of original keywords.
colname_mean_embed: Column name for column of average text embeddings.
n_clusters: Number of clusters.
num_of_closest_words: Number of words selected for cluster description.
Returns:
Two dataframes
First dataframe is original data with cluster label column and distance
to center column.
Second dataframe contains cluster label and num_of_closest_words for each
cluster.
"""
entityname_matrix = pd.DataFrame.from_records(data[colname_mean_embed])
k_means = self.k_means()
k_means.n_clusters = n_clusters
k_means = k_means.fit(entityname_matrix)
data["labels"] = k_means.labels_ # pytype: disable=unsupported-operands
# Calculate normalized distance of each point from its cluster center
data["center_diff"] = np.nan # pytype: disable=unsupported-operands
for i in range(0, n_clusters):
dist_from_cluster_center = data[data["labels"] == i][
colname_mean_embed].apply(lambda x: x - k_means.cluster_centers_[i])
data.loc[data["labels"] == i, "center_diff"] = linalg.norm(
dist_from_cluster_center.to_list(), axis=1) # pytype: disable=attribute-error
# pick out num_of_closest_words closest words to center to describe cluster
closest = data.groupby("labels")["center_diff"].nsmallest( # pytype: disable=attribute-error
num_of_closest_words)
data_cluster_description = data.loc[closest.index.get_level_values(level=1)] # pytype: disable=attribute-error
data_cluster_description = data_cluster_description.groupby(
["labels"], as_index=False).agg({colname_real: ", ".join})
return data, data_cluster_description
| apache-2.0 |
isrohutamahopetechnik/MissionPlanner | Lib/site-packages/scipy/signal/filter_design.py | 53 | 63381 | """Filter design.
"""
import types
import warnings
import numpy
from numpy import atleast_1d, poly, polyval, roots, real, asarray, allclose, \
resize, pi, absolute, logspace, r_, sqrt, tan, log10, arctan, arcsinh, \
cos, exp, cosh, arccosh, ceil, conjugate, zeros, sinh
from numpy import mintypecode
from scipy import special, optimize
from scipy.misc import comb
class BadCoefficients(UserWarning):
pass
abs = absolute
def findfreqs(num, den, N):
ep = atleast_1d(roots(den))+0j
tz = atleast_1d(roots(num))+0j
if len(ep) == 0:
ep = atleast_1d(-1000)+0j
ez = r_['-1',numpy.compress(ep.imag >=0, ep,axis=-1), numpy.compress((abs(tz) < 1e5) & (tz.imag >=0),tz,axis=-1)]
integ = abs(ez) < 1e-10
hfreq = numpy.around(numpy.log10(numpy.max(3*abs(ez.real + integ)+1.5*ez.imag))+0.5)
lfreq = numpy.around(numpy.log10(0.1*numpy.min(abs(real(ez+integ))+2*ez.imag))-0.5)
w = logspace(lfreq, hfreq, N)
return w
def freqs(b, a, worN=None, plot=None):
"""
Compute frequency response of analog filter.
Given the numerator (b) and denominator (a) of a filter compute its
frequency response::
b[0]*(jw)**(nb-1) + b[1]*(jw)**(nb-2) + ... + b[nb-1]
H(w) = -------------------------------------------------------
a[0]*(jw)**(na-1) + a[1]*(jw)**(na-2) + ... + a[na-1]
Parameters
----------
b : ndarray
Numerator of a linear filter.
a : ndarray
Denominator of a linear filter.
worN : {None, int}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, the compute at that many frequencies. Otherwise, compute the
response at frequencies given in worN.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The frequencies at which h was computed.
h : ndarray
The frequency response.
See Also
--------
freqz : Compute the frequency response of a digital filter.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude.
"""
if worN is None:
w = findfreqs(b,a,200)
elif isinstance(worN, types.IntType):
N = worN
w = findfreqs(b,a,N)
else:
w = worN
w = atleast_1d(w)
s = 1j*w
h = polyval(b, s) / polyval(a, s)
if not plot is None:
plot(w, h)
return w, h
def freqz(b, a=1, worN=None, whole=0, plot=None):
"""
Compute the frequency response of a digital filter.
Given the numerator ``b`` and denominator ``a`` of a digital filter compute
its frequency response::
jw -jw -jmw
jw B(e) b[0] + b[1]e + .... + b[m]e
H(e) = ---- = ------------------------------------
jw -jw -jnw
A(e) a[0] + a[1]e + .... + a[n]e
Parameters
----------
b : ndarray
numerator of a linear filter
a : ndarray
denominator of a linear filter
worN : {None, int}, optional
If None, then compute at 512 frequencies around the unit circle.
If a single integer, the compute at that many frequencies.
Otherwise, compute the response at frequencies given in worN
whole : bool, optional
Normally, frequencies are computed from 0 to pi (upper-half of
unit-circle. If whole is False, compute frequencies from 0 to 2*pi.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The frequencies at which h was computed.
h : ndarray
The frequency response.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude.
Examples
--------
>>> b = firwin(80, 0.5, window=('kaiser', 8))
>>> h, w = freqz(b)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.semilogy(h, np.abs(w), 'b')
>>> plt.ylabel('Amplitude (dB)', color='b')
>>> plt.xlabel('Frequency (rad/sample)')
>>> plt.grid()
>>> plt.legend()
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(w))
>>> plt.plot(h, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.show()
"""
b, a = map(atleast_1d, (b,a))
if whole:
lastpoint = 2*pi
else:
lastpoint = pi
if worN is None:
N = 512
w = numpy.arange(0,lastpoint,lastpoint/N)
elif isinstance(worN, types.IntType):
N = worN
w = numpy.arange(0,lastpoint,lastpoint/N)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(-1j*w)
h = polyval(b[::-1], zm1) / polyval(a[::-1], zm1)
if not plot is None:
plot(w, h)
return w, h
def tf2zpk(b, a):
"""Return zero, pole, gain (z,p,k) representation from a numerator,
denominator representation of a linear filter.
Parameters
----------
b : ndarray
Numerator polynomial.
a : ndarray
Denominator polynomial.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
If some values of b are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
"""
b,a = normalize(b,a)
b = (b+0.0) / a[0]
a = (a+0.0) / a[0]
k = b[0]
b /= b[0]
z = roots(b)
p = roots(a)
return z, p, k
def zpk2tf(z, p, k):
"""Return polynomial transfer function representation from zeros
and poles
Parameters
----------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndarray
Numerator polynomial.
a : ndarray
Denominator polynomial.
"""
z = atleast_1d(z)
k = atleast_1d(k)
if len(z.shape) > 1:
temp = poly(z[0])
b = zeros((z.shape[0], z.shape[1]+1), temp.dtype.char)
if len(k) == 1:
k = [k[0]]*z.shape[0]
for i in range(z.shape[0]):
b[i] = k[i] * poly(z[i])
else:
b = k * poly(z)
a = atleast_1d(poly(p))
return b, a
def normalize(b, a):
"""Normalize polynomial representation of a transfer function.
If values of b are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
"""
b,a = map(atleast_1d,(b,a))
if len(a.shape) != 1:
raise ValueError("Denominator polynomial must be rank-1 array.")
if len(b.shape) > 2:
raise ValueError("Numerator polynomial must be rank-1 or rank-2 array.")
if len(b.shape) == 1:
b = asarray([b],b.dtype.char)
while a[0] == 0.0 and len(a) > 1:
a = a[1:]
outb = b * (1.0) / a[0]
outa = a * (1.0) / a[0]
if allclose(outb[:,0], 0, rtol=1e-14):
warnings.warn("Badly conditioned filter coefficients (numerator): the "
"results may be meaningless", BadCoefficients)
while allclose(outb[:,0], 0, rtol=1e-14) and (outb.shape[-1] > 1):
outb = outb[:,1:]
if outb.shape[0] == 1:
outb = outb[0]
return outb, outa
def lp2lp(b, a, wo=1.0):
"""Return a low-pass filter with cutoff frequency `wo`
from a low-pass filter prototype with unity cutoff frequency.
"""
a,b = map(atleast_1d,(a,b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
M = max((d,n))
pwo = pow(wo,numpy.arange(M-1,-1,-1))
start1 = max((n-d,0))
start2 = max((d-n,0))
b = b * pwo[start1]/pwo[start2:]
a = a * pwo[start1]/pwo[start1:]
return normalize(b, a)
def lp2hp(b, a, wo=1.0):
"""Return a high-pass filter with cutoff frequency `wo`
from a low-pass filter prototype with unity cutoff frequency.
"""
a,b = map(atleast_1d,(a,b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
if wo != 1:
pwo = pow(wo,numpy.arange(max((d,n))))
else:
pwo = numpy.ones(max((d,n)),b.dtype.char)
if d >= n:
outa = a[::-1] * pwo
outb = resize(b,(d,))
outb[n:] = 0.0
outb[:n] = b[::-1] * pwo[:n]
else:
outb = b[::-1] * pwo
outa = resize(a,(n,))
outa[d:] = 0.0
outa[:d] = a[::-1] * pwo[:d]
return normalize(outb, outa)
def lp2bp(b, a, wo=1.0, bw=1.0):
"""Return a band-pass filter with center frequency `wo` and bandwidth `bw`
from a low-pass filter prototype with unity cutoff frequency.
"""
a,b = map(atleast_1d,(a,b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a,b))
ma = max([N,D])
Np = N + ma
Dp = D + ma
bprime = numpy.zeros(Np+1,artype)
aprime = numpy.zeros(Dp+1,artype)
wosq = wo*wo
for j in range(Np+1):
val = 0.0
for i in range(0,N+1):
for k in range(0,i+1):
if ma-i+2*k == j:
val += comb(i,k)*b[N-i]*(wosq)**(i-k) / bw**i
bprime[Np-j] = val
for j in range(Dp+1):
val = 0.0
for i in range(0,D+1):
for k in range(0,i+1):
if ma-i+2*k == j:
val += comb(i,k)*a[D-i]*(wosq)**(i-k) / bw**i
aprime[Dp-j] = val
return normalize(bprime, aprime)
def lp2bs(b, a, wo=1, bw=1):
"""Return a band-stop filter with center frequency `wo` and bandwidth `bw`
from a low-pass filter prototype with unity cutoff frequency.
"""
a,b = map(atleast_1d,(a,b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a,b))
M = max([N,D])
Np = M + M
Dp = M + M
bprime = numpy.zeros(Np+1,artype)
aprime = numpy.zeros(Dp+1,artype)
wosq = wo*wo
for j in range(Np+1):
val = 0.0
for i in range(0,N+1):
for k in range(0,M-i+1):
if i+2*k == j:
val += comb(M-i,k)*b[N-i]*(wosq)**(M-i-k) * bw**i
bprime[Np-j] = val
for j in range(Dp+1):
val = 0.0
for i in range(0,D+1):
for k in range(0,M-i+1):
if i+2*k == j:
val += comb(M-i,k)*a[D-i]*(wosq)**(M-i-k) * bw**i
aprime[Dp-j] = val
return normalize(bprime, aprime)
def bilinear(b, a, fs=1.0):
"""Return a digital filter from an analog filter using the bilinear transform.
The bilinear transform substitutes ``(z-1) / (z+1``) for ``s``.
"""
fs =float(fs)
a,b = map(atleast_1d,(a,b))
D = len(a) - 1
N = len(b) - 1
artype = float
M = max([N,D])
Np = M
Dp = M
bprime = numpy.zeros(Np+1,artype)
aprime = numpy.zeros(Dp+1,artype)
for j in range(Np+1):
val = 0.0
for i in range(N+1):
for k in range(i+1):
for l in range(M-i+1):
if k+l == j:
val += comb(i,k)*comb(M-i,l)*b[N-i]*pow(2*fs,i)*(-1)**k
bprime[j] = real(val)
for j in range(Dp+1):
val = 0.0
for i in range(D+1):
for k in range(i+1):
for l in range(M-i+1):
if k+l == j:
val += comb(i,k)*comb(M-i,l)*a[D-i]*pow(2*fs,i)*(-1)**k
aprime[j] = real(val)
return normalize(bprime, aprime)
def iirdesign(wp, ws, gpass, gstop, analog=0, ftype='ellip', output='ba'):
"""Complete IIR digital and analog filter design.
Given passband and stopband frequencies and gains construct an analog or
digital IIR filter of minimum order for a given basic type. Return the
output in numerator, denominator ('ba') or pole-zero ('zpk') form.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
ftype : str, optional
The type of IIR filter to design:
- elliptic : 'ellip'
- Butterworth : 'butter',
- Chebyshev I : 'cheby1',
- Chebyshev II: 'cheby2',
- Bessel : 'bessel'
output : ['ba', 'zpk'], optional
Type of output: numerator/denominator ('ba') or pole-zero ('zpk').
Default is 'ba'.
Returns
-------
b, a :
Numerator and denominator of the IIR filter. Only returned if
``output='ba'``.
z, p, k : Zeros, poles, and gain of the IIR filter. Only returned if
``output='zpk'``.
"""
try:
ordfunc = filter_dict[ftype][1]
except KeyError:
raise ValueError("Invalid IIR filter type: %s" % ftype)
except IndexError:
raise ValueError("%s does not have order selection use iirfilter function." % ftype)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
band_type = 2*(len(wp)-1)
band_type +=1
if wp[0] >= ws[0]:
band_type += 1
btype = {1:'lowpass', 2:'highpass', 3:'bandstop', 4:'bandpass'}[band_type]
N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog)
return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype, ftype=ftype, output=output)
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=0, ftype='butter', output='ba'):
"""IIR digital and analog filter design given order and critical points.
Design an Nth order lowpass digital or analog filter and return the filter
coefficients in (B,A) (numerator, denominator) or (Z,P,K) form.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
rp : float, optional
For Chebyshev and elliptic filters provides the maximum ripple
in the passband.
rs : float, optional
For chebyshev and elliptic filters provides the minimum attenuation in
the stop band.
btype : str, optional
The type of filter (lowpass, highpass, bandpass, bandstop).
Default is bandpass.
analog : int, optional
Non-zero to return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- elliptic : 'ellip'
- Butterworth : 'butter',
- Chebyshev I : 'cheby1',
- Chebyshev II: 'cheby2',
- Bessel : 'bessel'
output : ['ba', 'zpk'], optional
Type of output: numerator/denominator ('ba') or pole-zero ('zpk').
Default is 'ba'.
See Also
--------
butterord, cheb1ord, cheb2ord, ellipord
"""
ftype, btype, output = [x.lower() for x in (ftype, btype, output)]
Wn = asarray(Wn)
try:
btype = band_dict[btype]
except KeyError:
raise ValueError("%s is an invalid bandtype for filter." % btype)
try:
typefunc = filter_dict[ftype][0]
except KeyError:
raise ValueError("%s is not a valid basic iir filter." % ftype)
if output not in ['ba', 'zpk']:
raise ValueError("%s is not a valid output form." % output)
#pre-warp frequencies for digital filter design
if not analog:
fs = 2.0
warped = 2*fs*tan(pi*Wn/fs)
else:
warped = Wn
# convert to low-pass prototype
if btype in ['lowpass', 'highpass']:
wo = warped
else:
bw = warped[1] - warped[0]
wo = sqrt(warped[0]*warped[1])
# Get analog lowpass prototype
if typefunc in [buttap, besselap]:
z, p, k = typefunc(N)
elif typefunc == cheb1ap:
if rp is None:
raise ValueError("passband ripple (rp) must be provided to design a Chebyshev I filter.")
z, p, k = typefunc(N, rp)
elif typefunc == cheb2ap:
if rs is None:
raise ValueError("stopband atteunatuion (rs) must be provided to design an Chebyshev II filter.")
z, p, k = typefunc(N, rs)
else: # Elliptic filters
if rs is None or rp is None:
raise ValueError("Both rp and rs must be provided to design an elliptic filter.")
z, p, k = typefunc(N, rp, rs)
b, a = zpk2tf(z,p,k)
# transform to lowpass, bandpass, highpass, or bandstop
if btype == 'lowpass':
b, a = lp2lp(b,a,wo=wo)
elif btype == 'highpass':
b, a = lp2hp(b,a,wo=wo)
elif btype == 'bandpass':
b, a = lp2bp(b,a,wo=wo,bw=bw)
else: # 'bandstop'
b, a = lp2bs(b,a,wo=wo,bw=bw)
# Find discrete equivalent if necessary
if not analog:
b, a = bilinear(b, a, fs=fs)
# Transform to proper out type (pole-zero, state-space, numer-denom)
if output == 'zpk':
return tf2zpk(b,a)
else:
return b,a
def butter(N, Wn, btype='low', analog=0, output='ba'):
"""Butterworth digital and analog filter design.
Design an Nth order lowpass digital or analog Butterworth filter and return
the filter coefficients in (B,A) or (Z,P,K) form.
See also
--------
buttord.
"""
return iirfilter(N, Wn, btype=btype, analog=analog, output=output, ftype='butter')
def cheby1(N, rp, Wn, btype='low', analog=0, output='ba'):
"""Chebyshev type I digital and analog filter design.
Design an Nth order lowpass digital or analog Chebyshev type I filter and
return the filter coefficients in (B,A) or (Z,P,K) form.
See also
--------
cheb1ord.
"""
return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog, output=output, ftype='cheby1')
def cheby2(N, rs, Wn, btype='low', analog=0, output='ba'):
"""Chebyshev type I digital and analog filter design.
Design an Nth order lowpass digital or analog Chebyshev type I filter and
return the filter coefficients in (B,A) or (Z,P,K) form.
See also
--------
cheb2ord.
"""
return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog, output=output, ftype='cheby2')
def ellip(N, rp, rs, Wn, btype='low', analog=0, output='ba'):
"""Elliptic (Cauer) digital and analog filter design.
Design an Nth order lowpass digital or analog elliptic filter and return
the filter coefficients in (B,A) or (Z,P,K) form.
See also
--------
ellipord.
"""
return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog, output=output, ftype='elliptic')
def bessel(N, Wn, btype='low', analog=0, output='ba'):
"""Bessel digital and analog filter design.
Design an Nth order lowpass digital or analog Bessel filter and return the
filter coefficients in (B,A) or (Z,P,K) form.
"""
return iirfilter(N, Wn, btype=btype, analog=analog, output=output, ftype='bessel')
def maxflat():
pass
def yulewalk():
pass
def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type):
"""Band Stop Objective Function for order minimization.
Returns the non-integer order for an analog band stop filter.
Parameters
----------
wp :
Edge of passband `passb`.
ind : int
Index specifying which `passb` edge to vary (0 or 1).
passb : array_like
Two element sequence of fixed passband edges.
stopb : array_like
Two element sequence of fixed stopband edges.
gstop : float
Amount of attenuation in stopband in dB.
gpass : float
Amount of ripple in the passband in dB.
type : ['butter', 'cheby', 'ellip']
Type of filter.
Returns
-------
n : scalar
Filter order (possibly non-integer).
"""
passbC = passb.copy()
passbC[ind] = wp
nat = stopb*(passbC[0]-passbC[1]) / (stopb**2 - passbC[0]*passbC[1])
nat = min(abs(nat))
if type == 'butter':
GSTOP = 10**(0.1*abs(gstop))
GPASS = 10**(0.1*abs(gpass))
n = (log10((GSTOP-1.0)/(GPASS-1.0)) / (2*log10(nat)))
elif type == 'cheby':
GSTOP = 10**(0.1*abs(gstop))
GPASS = 10**(0.1*abs(gpass))
n = arccosh(sqrt((GSTOP-1.0)/(GPASS-1.0))) / arccosh(nat)
elif type == 'ellip':
GSTOP = 10**(0.1*gstop)
GPASS = 10**(0.1*gpass)
arg1 = sqrt( (GPASS-1.0) / (GSTOP-1.0) )
arg0 = 1.0 / nat
d0 = special.ellipk([arg0**2, 1-arg0**2])
d1 = special.ellipk([arg1**2, 1-arg1**2])
n = (d0[0]*d1[1] / (d0[1]*d1[0]))
else:
raise ValueError("Incorrect type: %s" % type)
return n
def buttord(wp, ws, gpass, gstop, analog=0):
"""Butterworth filter order selection.
Return the order of the lowest order digital Butterworth filter that loses
no more than `gpass` dB in the passband and has at least `gstop` dB
attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
Returns
-------
ord : int
The lowest order for a Butterworth filter which meets specs.
wn : ndarray or float
The Butterworth natural frequency (i.e. the "3dB frequency"). Should
be used with `butter` to give filter results.
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2*(len(wp)-1)
filter_type +=1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies
if not analog:
passb = tan(wp*pi/2.0)
stopb = tan(ws*pi/2.0)
else:
passb = wp*1.0
stopb = ws*1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0]-1e-12,
args=(0,passb,stopb,gpass,gstop,'butter'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1]+1e-12, passb[1],
args=(1,passb,stopb,gpass,gstop,'butter'),
disp=0)
passb[1] = wp1
nat = (stopb * (passb[0]-passb[1])) / (stopb**2 - passb[0]*passb[1])
elif filter_type == 4: # pass
nat = (stopb**2 - passb[0]*passb[1]) / (stopb* (passb[0]-passb[1]))
nat = min(abs(nat))
GSTOP = 10**(0.1*abs(gstop))
GPASS = 10**(0.1*abs(gpass))
ord = int(ceil( log10((GSTOP-1.0)/(GPASS-1.0)) / (2*log10(nat))))
# Find the butterworth natural frequency W0 (or the "3dB" frequency")
# to give exactly gstop at nat. W0 will be between 1 and nat
try:
W0 = nat / ( ( 10**(0.1*abs(gstop))-1)**(1.0/(2.0*ord)))
except ZeroDivisionError:
W0 = nat
print "Warning, order is zero...check input parametegstop."
# now convert this frequency back from lowpass prototype
# to the original analog filter
if filter_type == 1: # low
WN = W0*passb
elif filter_type == 2: # high
WN = passb / W0
elif filter_type == 3: # stop
WN = numpy.zeros(2,float)
WN[0] = ((passb[1] - passb[0]) + sqrt((passb[1] - passb[0])**2 + \
4*W0**2 * passb[0] * passb[1])) / (2*W0)
WN[1] = ((passb[1] - passb[0]) - sqrt((passb[1] - passb[0])**2 + \
4*W0**2 * passb[0] * passb[1])) / (2*W0)
WN = numpy.sort(abs(WN))
elif filter_type == 4: # pass
W0 = numpy.array([-W0, W0],float)
WN = -W0 * (passb[1]-passb[0]) / 2.0 + sqrt(W0**2 / 4.0 * \
(passb[1]-passb[0])**2 + \
passb[0]*passb[1])
WN = numpy.sort(abs(WN))
else:
raise ValueError("Bad type: %s" % filter_type)
if not analog:
wn = (2.0/pi)*arctan(WN)
else:
wn = WN
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb1ord(wp, ws, gpass, gstop, analog=0):
"""Chebyshev type I filter order selection.
Return the order of the lowest order digital Chebyshev Type I filter that
loses no more than `gpass` dB in the passband and has at least `gstop` dB
attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
Returns
-------
ord : int
The lowest order for a Chebyshev type I filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby1` to give filter results.
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2*(len(wp)-1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-wagpass frequencies
if not analog:
passb = tan(pi*wp/2.)
stopb = tan(pi*ws/2.)
else:
passb = wp*1.0
stopb = ws*1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0]-1e-12,
args=(0,passb,stopb,gpass,gstop,'cheby'), disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1]+1e-12, passb[1],
args=(1,passb,stopb,gpass,gstop,'cheby'), disp=0)
passb[1] = wp1
nat = (stopb * (passb[0]-passb[1])) / (stopb**2 - passb[0]*passb[1])
elif filter_type == 4: # pass
nat = (stopb**2 - passb[0]*passb[1]) / (stopb* (passb[0]-passb[1]))
nat = min(abs(nat))
GSTOP = 10**(0.1*abs(gstop))
GPASS = 10**(0.1*abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP-1.0) / (GPASS-1.0))) / arccosh(nat)))
# Natural frequencies are just the passband edges
if not analog:
wn = (2.0/pi)*arctan(passb)
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb2ord(wp, ws, gpass, gstop, analog=0):
"""Chebyshev type II filter order selection.
Description:
Return the order of the lowest order digital Chebyshev Type II filter
that loses no more than gpass dB in the passband and has at least gstop dB
attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
Returns
-------
ord : int
The lowest order for a Chebyshev type II filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby2` to give filter results.
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2*(len(wp)-1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-wagpass frequencies
if not analog:
passb = tan(pi*wp/2.0)
stopb = tan(pi*ws/2.0)
else:
passb = wp*1.0
stopb = ws*1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0]-1e-12,
args=(0,passb,stopb,gpass,gstop,'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1]+1e-12, passb[1],
args=(1,passb,stopb,gpass,gstop,'cheby'),
disp=0)
passb[1] = wp1
nat = (stopb * (passb[0]-passb[1])) / (stopb**2 - passb[0]*passb[1])
elif filter_type == 4: # pass
nat = (stopb**2 - passb[0]*passb[1]) / (stopb* (passb[0]-passb[1]))
nat = min(abs(nat))
GSTOP = 10**(0.1*abs(gstop))
GPASS = 10**(0.1*abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP-1.0) / (GPASS-1.0))) / arccosh(nat)))
# Find frequency where analog response is -gpass dB.
# Then convert back from low-pass prototype to the original filter.
new_freq = cosh(1.0/ord * arccosh(sqrt((GSTOP-1.0)/(GPASS-1.0))))
new_freq = 1.0 / new_freq
if filter_type == 1:
nat = passb / new_freq
elif filter_type == 2:
nat = passb * new_freq
elif filter_type == 3:
nat = numpy.zeros(2,float)
nat[0] = new_freq / 2.0 * (passb[0]-passb[1]) + \
sqrt(new_freq**2 * (passb[1]-passb[0])**2 / 4.0 + \
passb[1] * passb[0])
nat[1] = passb[1] * passb[0] / nat[0]
elif filter_type == 4:
nat = numpy.zeros(2,float)
nat[0] = 1.0/(2.0*new_freq) * (passb[0] - passb[1]) + \
sqrt((passb[1]-passb[0])**2 / (4.0*new_freq**2) + \
passb[1] * passb[0])
nat[1] = passb[0] * passb[1] / nat[0]
if not analog:
wn = (2.0/pi)*arctan(nat)
else:
wn = nat
if len(wn) == 1:
wn = wn[0]
return ord, wn
def ellipord(wp, ws, gpass, gstop, analog=0):
"""Elliptic (Cauer) filter order selection.
Return the order of the lowest order digital elliptic filter that loses no
more than gpass dB in the passband and has at least gstop dB attenuation in
the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
Returns
------
ord : int
The lowest order for an Elliptic (Cauer) filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`ellip` to give filter results.-
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2*(len(wp)-1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-wagpass frequencies
if analog:
passb = wp*1.0
stopb = ws*1.0
else:
passb = tan(wp*pi/2.0)
stopb = tan(ws*pi/2.0)
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0]-1e-12,
args=(0,passb,stopb,gpass,gstop,'ellip'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1]+1e-12, passb[1],
args=(1,passb,stopb,gpass,gstop,'ellip'),
disp=0)
passb[1] = wp1
nat = (stopb * (passb[0]-passb[1])) / (stopb**2 - passb[0]*passb[1])
elif filter_type == 4: # pass
nat = (stopb**2 - passb[0]*passb[1]) / (stopb* (passb[0]-passb[1]))
nat = min(abs(nat))
GSTOP = 10**(0.1*gstop)
GPASS = 10**(0.1*gpass)
arg1 = sqrt( (GPASS-1.0) / (GSTOP-1.0) )
arg0 = 1.0 / nat
d0 = special.ellipk([arg0**2, 1-arg0**2])
d1 = special.ellipk([arg1**2, 1-arg1**2])
ord = int(ceil(d0[0]*d1[1] / (d0[1]*d1[0])))
if not analog:
wn = arctan(passb)*2.0/pi
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def buttap(N):
"""Return (z,p,k) zero, pole, gain for analog prototype of an Nth
order Butterworth filter."""
z = []
n = numpy.arange(1,N+1)
p = numpy.exp(1j*(2*n-1)/(2.0*N)*pi)*1j
k = 1
return z, p, k
def cheb1ap(N, rp):
"""Return (z,p,k) zero, pole, gain for Nth order Chebyshev type I lowpass
analog filter prototype with `rp` decibels of ripple in the passband.
"""
z = []
eps = numpy.sqrt(10**(0.1*rp)-1.0)
n = numpy.arange(1,N+1)
mu = 1.0/N * numpy.log((1.0+numpy.sqrt(1+eps*eps)) / eps)
theta = pi/2.0 * (2*n-1.0)/N
p = -numpy.sinh(mu)*numpy.sin(theta) + 1j*numpy.cosh(mu)*numpy.cos(theta)
k = numpy.prod(-p,axis=0).real
if N % 2 == 0:
k = k / sqrt((1+eps*eps))
return z, p, k
pass
def cheb2ap(N, rs):
"""Return (z,p,k) zero, pole, gain for Nth order Chebyshev type II lowpass
analog filter prototype with `rs` decibels of ripple in the stopband.
"""
de = 1.0/sqrt(10**(0.1*rs)-1)
mu = arcsinh(1.0/de)/N
if N % 2:
m = N - 1
n = numpy.concatenate((numpy.arange(1,N-1,2),numpy.arange(N+2,2*N,2)))
else:
m = N
n = numpy.arange(1,2*N,2)
z = conjugate(1j / cos(n*pi/(2.0*N)))
p = exp(1j*(pi*numpy.arange(1,2*N,2)/(2.0*N) + pi/2.0))
p = sinh(mu) * p.real + 1j*cosh(mu)*p.imag
p = 1.0 / p
k = (numpy.prod(-p,axis=0)/numpy.prod(-z,axis=0)).real
return z, p, k
EPSILON = 2e-16
def vratio(u, ineps, mp):
[s,c,d,phi] = special.ellipj(u,mp)
ret = abs(ineps - s/c)
return ret
def kratio(m, k_ratio):
m = float(m)
if m < 0:
m = 0.0
if m > 1:
m = 1.0
if abs(m) > EPSILON and (abs(m) + EPSILON) < 1:
k = special.ellipk([m,1-m])
r = k[0] / k[1] - k_ratio
elif abs(m) > EPSILON:
r = -k_ratio
else:
r = 1e20
return abs(r)
def ellipap(N, rp, rs):
"""Return (z,p,k) zeros, poles, and gain of an Nth order normalized
prototype elliptic analog lowpass filter with `rp` decibels of ripple in
the passband and a stopband `rs` decibels down.
References
----------
Lutova, Tosic, and Evans, "Filter Design for Signal Processing", Chapters 5
and 12.
"""
if N == 1:
p = -sqrt(1.0/(10**(0.1*rp)-1.0))
k = -p
z = []
return z, p, k
eps = numpy.sqrt(10**(0.1*rp)-1)
ck1 = eps / numpy.sqrt(10**(0.1*rs)-1)
ck1p = numpy.sqrt(1-ck1*ck1)
if ck1p == 1:
raise ValueError("Cannot design a filter with given rp and rs specifications.")
wp = 1
val = special.ellipk([ck1*ck1,ck1p*ck1p])
if abs(1-ck1p*ck1p) < EPSILON:
krat = 0
else:
krat = N*val[0] / val[1]
m = optimize.fmin(kratio, [0.5], args=(krat,), maxfun=250, maxiter=250,
disp=0)
if m < 0 or m > 1:
m = optimize.fminbound(kratio, 0, 1, args=(krat,), maxfun=250,
maxiter=250, disp=0)
capk = special.ellipk(m)
ws = wp / sqrt(m)
m1 = 1-m
j = numpy.arange(1-N%2,N,2)
jj = len(j)
[s,c,d,phi] = special.ellipj(j*capk/N,m*numpy.ones(jj))
snew = numpy.compress(abs(s) > EPSILON, s,axis=-1)
z = 1.0 / (sqrt(m)*snew)
z = 1j*z
z = numpy.concatenate((z,conjugate(z)))
r = optimize.fmin(vratio, special.ellipk(m), args=(1./eps, ck1p*ck1p),
maxfun=250, maxiter=250, disp=0)
v0 = capk * r / (N*val[0])
[sv,cv,dv,phi] = special.ellipj(v0,1-m)
p = -(c*d*sv*cv + 1j*s*dv) / (1-(d*sv)**2.0)
if N % 2:
newp = numpy.compress(abs(p.imag) > EPSILON*numpy.sqrt(numpy.sum(p*numpy.conjugate(p),axis=0).real), p,axis=-1)
p = numpy.concatenate((p,conjugate(newp)))
else:
p = numpy.concatenate((p,conjugate(p)))
k = (numpy.prod(-p,axis=0) / numpy.prod(-z,axis=0)).real
if N % 2 == 0:
k = k / numpy.sqrt((1+eps*eps))
return z, p, k
def besselap(N):
"""Return (z,p,k) zero, pole, gain for analog prototype of an Nth order
Bessel filter."""
z = []
k = 1
if N == 0:
p = [];
elif N == 1:
p = [-1]
elif N == 2:
p = [-.8660254037844386467637229+.4999999999999999999999996*1j,
-.8660254037844386467637229-.4999999999999999999999996*1j]
elif N == 3:
p = [-.9416000265332067855971980,
-.7456403858480766441810907-.7113666249728352680992154*1j,
-.7456403858480766441810907+.7113666249728352680992154*1j]
elif N == 4:
p = [-.6572111716718829545787781-.8301614350048733772399715*1j,
-.6572111716718829545787788+.8301614350048733772399715*1j,
-.9047587967882449459642637-.2709187330038746636700923*1j,
-.9047587967882449459642624+.2709187330038746636700926*1j]
elif N == 5:
p = [-.9264420773877602247196260,
-.8515536193688395541722677-.4427174639443327209850002*1j,
-.8515536193688395541722677+.4427174639443327209850002*1j,
-.5905759446119191779319432-.9072067564574549539291747*1j,
-.5905759446119191779319432+.9072067564574549539291747*1j]
elif N == 6:
p = [-.9093906830472271808050953-.1856964396793046769246397*1j,
-.9093906830472271808050953+.1856964396793046769246397*1j,
-.7996541858328288520243325-.5621717346937317988594118*1j,
-.7996541858328288520243325+.5621717346937317988594118*1j,
-.5385526816693109683073792-.9616876881954277199245657*1j,
-.5385526816693109683073792+.9616876881954277199245657*1j]
elif N == 7:
p = [-.9194871556490290014311619,
-.8800029341523374639772340-.3216652762307739398381830*1j,
-.8800029341523374639772340+.3216652762307739398381830*1j,
-.7527355434093214462291616-.6504696305522550699212995*1j,
-.7527355434093214462291616+.6504696305522550699212995*1j,
-.4966917256672316755024763-1.002508508454420401230220*1j,
-.4966917256672316755024763+1.002508508454420401230220*1j]
elif N == 8:
p = [-.9096831546652910216327629-.1412437976671422927888150*1j,
-.9096831546652910216327629+.1412437976671422927888150*1j,
-.8473250802359334320103023-.4259017538272934994996429*1j,
-.8473250802359334320103023+.4259017538272934994996429*1j,
-.7111381808485399250796172-.7186517314108401705762571*1j,
-.7111381808485399250796172+.7186517314108401705762571*1j,
-.4621740412532122027072175-1.034388681126901058116589*1j,
-.4621740412532122027072175+1.034388681126901058116589*1j]
elif N == 9:
p = [-.9154957797499037686769223,
-.8911217017079759323183848-.2526580934582164192308115*1j,
-.8911217017079759323183848+.2526580934582164192308115*1j,
-.8148021112269012975514135-.5085815689631499483745341*1j,
-.8148021112269012975514135+.5085815689631499483745341*1j,
-.6743622686854761980403401-.7730546212691183706919682*1j,
-.6743622686854761980403401+.7730546212691183706919682*1j,
-.4331415561553618854685942-1.060073670135929666774323*1j,
-.4331415561553618854685942+1.060073670135929666774323*1j]
elif N == 10:
p = [-.9091347320900502436826431-.1139583137335511169927714*1j,
-.9091347320900502436826431+.1139583137335511169927714*1j,
-.8688459641284764527921864-.3430008233766309973110589*1j,
-.8688459641284764527921864+.3430008233766309973110589*1j,
-.7837694413101441082655890-.5759147538499947070009852*1j,
-.7837694413101441082655890+.5759147538499947070009852*1j,
-.6417513866988316136190854-.8175836167191017226233947*1j,
-.6417513866988316136190854+.8175836167191017226233947*1j,
-.4083220732868861566219785-1.081274842819124562037210*1j,
-.4083220732868861566219785+1.081274842819124562037210*1j]
elif N == 11:
p = [-.9129067244518981934637318,
-.8963656705721166099815744-.2080480375071031919692341*1j
-.8963656705721166099815744+.2080480375071031919692341*1j,
-.8453044014712962954184557-.4178696917801248292797448*1j,
-.8453044014712962954184557+.4178696917801248292797448*1j,
-.7546938934722303128102142-.6319150050721846494520941*1j,
-.7546938934722303128102142+.6319150050721846494520941*1j,
-.6126871554915194054182909-.8547813893314764631518509*1j,
-.6126871554915194054182909+.8547813893314764631518509*1j,
-.3868149510055090879155425-1.099117466763120928733632*1j,
-.3868149510055090879155425+1.099117466763120928733632*1j]
elif N == 12:
p = [-.9084478234140682638817772-95506365213450398415258360.0e-27*1j,
-.9084478234140682638817772+95506365213450398415258360.0e-27*1j,
-.8802534342016826507901575-.2871779503524226723615457*1j,
-.8802534342016826507901575+.2871779503524226723615457*1j,
-.8217296939939077285792834-.4810212115100676440620548*1j,
-.8217296939939077285792834+.4810212115100676440620548*1j,
-.7276681615395159454547013-.6792961178764694160048987*1j,
-.7276681615395159454547013+.6792961178764694160048987*1j,
-.5866369321861477207528215-.8863772751320727026622149*1j,
-.5866369321861477207528215+.8863772751320727026622149*1j,
-.3679640085526312839425808-1.114373575641546257595657*1j,
-.3679640085526312839425808+1.114373575641546257595657*1j]
elif N == 13:
p = [-.9110914665984182781070663,
-.8991314665475196220910718-.1768342956161043620980863*1j,
-.8991314665475196220910718+.1768342956161043620980863*1j,
-.8625094198260548711573628-.3547413731172988997754038*1j,
-.8625094198260548711573628+.3547413731172988997754038*1j,
-.7987460692470972510394686-.5350752120696801938272504*1j,
-.7987460692470972510394686+.5350752120696801938272504*1j,
-.7026234675721275653944062-.7199611890171304131266374*1j,
-.7026234675721275653944062+.7199611890171304131266374*1j,
-.5631559842430199266325818-.9135900338325109684927731*1j,
-.5631559842430199266325818+.9135900338325109684927731*1j,
-.3512792323389821669401925-1.127591548317705678613239*1j,
-.3512792323389821669401925+1.127591548317705678613239*1j]
elif N == 14:
p = [-.9077932138396487614720659-82196399419401501888968130.0e-27*1j,
-.9077932138396487614720659+82196399419401501888968130.0e-27*1j,
-.8869506674916445312089167-.2470079178765333183201435*1j,
-.8869506674916445312089167+.2470079178765333183201435*1j,
-.8441199160909851197897667-.4131653825102692595237260*1j,
-.8441199160909851197897667+.4131653825102692595237260*1j,
-.7766591387063623897344648-.5819170677377608590492434*1j,
-.7766591387063623897344648+.5819170677377608590492434*1j,
-.6794256425119233117869491-.7552857305042033418417492*1j,
-.6794256425119233117869491+.7552857305042033418417492*1j,
-.5418766775112297376541293-.9373043683516919569183099*1j,
-.5418766775112297376541293+.9373043683516919569183099*1j,
-.3363868224902037330610040-1.139172297839859991370924*1j,
-.3363868224902037330610040+1.139172297839859991370924*1j]
elif N == 15:
p = [-.9097482363849064167228581,
-.9006981694176978324932918-.1537681197278439351298882*1j,
-.9006981694176978324932918+.1537681197278439351298882*1j,
-.8731264620834984978337843-.3082352470564267657715883*1j,
-.8731264620834984978337843+.3082352470564267657715883*1j,
-.8256631452587146506294553-.4642348752734325631275134*1j,
-.8256631452587146506294553+.4642348752734325631275134*1j,
-.7556027168970728127850416-.6229396358758267198938604*1j,
-.7556027168970728127850416+.6229396358758267198938604*1j,
-.6579196593110998676999362-.7862895503722515897065645*1j,
-.6579196593110998676999362+.7862895503722515897065645*1j,
-.5224954069658330616875186-.9581787261092526478889345*1j,
-.5224954069658330616875186+.9581787261092526478889345*1j,
-.3229963059766444287113517-1.149416154583629539665297*1j,
-.3229963059766444287113517+1.149416154583629539665297*1j]
elif N == 16:
p = [-.9072099595087001356491337-72142113041117326028823950.0e-27*1j,
-.9072099595087001356491337+72142113041117326028823950.0e-27*1j,
-.8911723070323647674780132-.2167089659900576449410059*1j,
-.8911723070323647674780132+.2167089659900576449410059*1j,
-.8584264231521330481755780-.3621697271802065647661080*1j,
-.8584264231521330481755780+.3621697271802065647661080*1j,
-.8074790293236003885306146-.5092933751171800179676218*1j,
-.8074790293236003885306146+.5092933751171800179676218*1j,
-.7356166304713115980927279-.6591950877860393745845254*1j,
-.7356166304713115980927279+.6591950877860393745845254*1j,
-.6379502514039066715773828-.8137453537108761895522580*1j,
-.6379502514039066715773828+.8137453537108761895522580*1j,
-.5047606444424766743309967-.9767137477799090692947061*1j,
-.5047606444424766743309967+.9767137477799090692947061*1j,
-.3108782755645387813283867-1.158552841199330479412225*1j,
-.3108782755645387813283867+1.158552841199330479412225*1j]
elif N == 17:
p = [-.9087141161336397432860029,
-.9016273850787285964692844-.1360267995173024591237303*1j,
-.9016273850787285964692844+.1360267995173024591237303*1j,
-.8801100704438627158492165-.2725347156478803885651973*1j,
-.8801100704438627158492165+.2725347156478803885651973*1j,
-.8433414495836129204455491-.4100759282910021624185986*1j,
-.8433414495836129204455491+.4100759282910021624185986*1j,
-.7897644147799708220288138-.5493724405281088674296232*1j,
-.7897644147799708220288138+.5493724405281088674296232*1j,
-.7166893842372349049842743-.6914936286393609433305754*1j,
-.7166893842372349049842743+.6914936286393609433305754*1j,
-.6193710717342144521602448-.8382497252826992979368621*1j,
-.6193710717342144521602448+.8382497252826992979368621*1j,
-.4884629337672704194973683-.9932971956316781632345466*1j,
-.4884629337672704194973683+.9932971956316781632345466*1j,
-.2998489459990082015466971-1.166761272925668786676672*1j,
-.2998489459990082015466971+1.166761272925668786676672*1j]
elif N == 18:
p = [-.9067004324162775554189031-64279241063930693839360680.0e-27*1j,
-.9067004324162775554189031+64279241063930693839360680.0e-27*1j,
-.8939764278132455733032155-.1930374640894758606940586*1j,
-.8939764278132455733032155+.1930374640894758606940586*1j,
-.8681095503628830078317207-.3224204925163257604931634*1j,
-.8681095503628830078317207+.3224204925163257604931634*1j,
-.8281885016242836608829018-.4529385697815916950149364*1j,
-.8281885016242836608829018+.4529385697815916950149364*1j,
-.7726285030739558780127746-.5852778162086640620016316*1j,
-.7726285030739558780127746+.5852778162086640620016316*1j,
-.6987821445005273020051878-.7204696509726630531663123*1j,
-.6987821445005273020051878+.7204696509726630531663123*1j,
-.6020482668090644386627299-.8602708961893664447167418*1j,
-.6020482668090644386627299+.8602708961893664447167418*1j,
-.4734268069916151511140032-1.008234300314801077034158*1j,
-.4734268069916151511140032+1.008234300314801077034158*1j,
-.2897592029880489845789953-1.174183010600059128532230*1j,
-.2897592029880489845789953+1.174183010600059128532230*1j]
elif N == 19:
p = [-.9078934217899404528985092,
-.9021937639390660668922536-.1219568381872026517578164*1j,
-.9021937639390660668922536+.1219568381872026517578164*1j,
-.8849290585034385274001112-.2442590757549818229026280*1j,
-.8849290585034385274001112+.2442590757549818229026280*1j,
-.8555768765618421591093993-.3672925896399872304734923*1j,
-.8555768765618421591093993+.3672925896399872304734923*1j,
-.8131725551578197705476160-.4915365035562459055630005*1j,
-.8131725551578197705476160+.4915365035562459055630005*1j,
-.7561260971541629355231897-.6176483917970178919174173*1j,
-.7561260971541629355231897+.6176483917970178919174173*1j,
-.6818424412912442033411634-.7466272357947761283262338*1j,
-.6818424412912442033411634+.7466272357947761283262338*1j,
-.5858613321217832644813602-.8801817131014566284786759*1j,
-.5858613321217832644813602+.8801817131014566284786759*1j,
-.4595043449730988600785456-1.021768776912671221830298*1j,
-.4595043449730988600785456+1.021768776912671221830298*1j,
-.2804866851439370027628724-1.180931628453291873626003*1j,
-.2804866851439370027628724+1.180931628453291873626003*1j]
elif N == 20:
p = [-.9062570115576771146523497-57961780277849516990208850.0e-27*1j,
-.9062570115576771146523497+57961780277849516990208850.0e-27*1j,
-.8959150941925768608568248-.1740317175918705058595844*1j,
-.8959150941925768608568248+.1740317175918705058595844*1j,
-.8749560316673332850673214-.2905559296567908031706902*1j,
-.8749560316673332850673214+.2905559296567908031706902*1j,
-.8427907479956670633544106-.4078917326291934082132821*1j,
-.8427907479956670633544106+.4078917326291934082132821*1j,
-.7984251191290606875799876-.5264942388817132427317659*1j,
-.7984251191290606875799876+.5264942388817132427317659*1j,
-.7402780309646768991232610-.6469975237605228320268752*1j,
-.7402780309646768991232610+.6469975237605228320268752*1j,
-.6658120544829934193890626-.7703721701100763015154510*1j,
-.6658120544829934193890626+.7703721701100763015154510*1j,
-.5707026806915714094398061-.8982829066468255593407161*1j,
-.5707026806915714094398061+.8982829066468255593407161*1j,
-.4465700698205149555701841-1.034097702560842962315411*1j,
-.4465700698205149555701841+1.034097702560842962315411*1j,
-.2719299580251652601727704-1.187099379810885886139638*1j,
-.2719299580251652601727704+1.187099379810885886139638*1j]
elif N == 21:
p = [-.9072262653142957028884077,
-.9025428073192696303995083-.1105252572789856480992275*1j,
-.9025428073192696303995083+.1105252572789856480992275*1j,
-.8883808106664449854431605-.2213069215084350419975358*1j,
-.8883808106664449854431605+.2213069215084350419975358*1j,
-.8643915813643204553970169-.3326258512522187083009453*1j,
-.8643915813643204553970169+.3326258512522187083009453*1j,
-.8299435470674444100273463-.4448177739407956609694059*1j,
-.8299435470674444100273463+.4448177739407956609694059*1j,
-.7840287980408341576100581-.5583186348022854707564856*1j,
-.7840287980408341576100581+.5583186348022854707564856*1j,
-.7250839687106612822281339-.6737426063024382240549898*1j,
-.7250839687106612822281339+.6737426063024382240549898*1j,
-.6506315378609463397807996-.7920349342629491368548074*1j,
-.6506315378609463397807996+.7920349342629491368548074*1j,
-.5564766488918562465935297-.9148198405846724121600860*1j,
-.5564766488918562465935297+.9148198405846724121600860*1j,
-.4345168906815271799687308-1.045382255856986531461592*1j,
-.4345168906815271799687308+1.045382255856986531461592*1j,
-.2640041595834031147954813-1.192762031948052470183960*1j,
-.2640041595834031147954813+1.192762031948052470183960*1j]
elif N == 22:
p = [-.9058702269930872551848625-52774908289999045189007100.0e-27*1j,
-.9058702269930872551848625+52774908289999045189007100.0e-27*1j,
-.8972983138153530955952835-.1584351912289865608659759*1j,
-.8972983138153530955952835+.1584351912289865608659759*1j,
-.8799661455640176154025352-.2644363039201535049656450*1j,
-.8799661455640176154025352+.2644363039201535049656450*1j,
-.8534754036851687233084587-.3710389319482319823405321*1j,
-.8534754036851687233084587+.3710389319482319823405321*1j,
-.8171682088462720394344996-.4785619492202780899653575*1j,
-.8171682088462720394344996+.4785619492202780899653575*1j,
-.7700332930556816872932937-.5874255426351153211965601*1j,
-.7700332930556816872932937+.5874255426351153211965601*1j,
-.7105305456418785989070935-.6982266265924524000098548*1j,
-.7105305456418785989070935+.6982266265924524000098548*1j,
-.6362427683267827226840153-.8118875040246347267248508*1j,
-.6362427683267827226840153+.8118875040246347267248508*1j,
-.5430983056306302779658129-.9299947824439872998916657*1j,
-.5430983056306302779658129+.9299947824439872998916657*1j,
-.4232528745642628461715044-1.055755605227545931204656*1j,
-.4232528745642628461715044+1.055755605227545931204656*1j,
-.2566376987939318038016012-1.197982433555213008346532*1j,
-.2566376987939318038016012+1.197982433555213008346532*1j]
elif N == 23:
p = [-.9066732476324988168207439,
-.9027564979912504609412993-.1010534335314045013252480*1j,
-.9027564979912504609412993+.1010534335314045013252480*1j,
-.8909283242471251458653994-.2023024699381223418195228*1j,
-.8909283242471251458653994+.2023024699381223418195228*1j,
-.8709469395587416239596874-.3039581993950041588888925*1j,
-.8709469395587416239596874+.3039581993950041588888925*1j,
-.8423805948021127057054288-.4062657948237602726779246*1j,
-.8423805948021127057054288+.4062657948237602726779246*1j,
-.8045561642053176205623187-.5095305912227258268309528*1j,
-.8045561642053176205623187+.5095305912227258268309528*1j,
-.7564660146829880581478138-.6141594859476032127216463*1j,
-.7564660146829880581478138+.6141594859476032127216463*1j,
-.6965966033912705387505040-.7207341374753046970247055*1j,
-.6965966033912705387505040+.7207341374753046970247055*1j,
-.6225903228771341778273152-.8301558302812980678845563*1j,
-.6225903228771341778273152+.8301558302812980678845563*1j,
-.5304922463810191698502226-.9439760364018300083750242*1j,
-.5304922463810191698502226+.9439760364018300083750242*1j,
-.4126986617510148836149955-1.065328794475513585531053*1j,
-.4126986617510148836149955+1.065328794475513585531053*1j,
-.2497697202208956030229911-1.202813187870697831365338*1j,
-.2497697202208956030229911+1.202813187870697831365338*1j]
elif N == 24:
p = [-.9055312363372773709269407-48440066540478700874836350.0e-27*1j,
-.9055312363372773709269407+48440066540478700874836350.0e-27*1j,
-.8983105104397872954053307-.1454056133873610120105857*1j,
-.8983105104397872954053307+.1454056133873610120105857*1j,
-.8837358034555706623131950-.2426335234401383076544239*1j,
-.8837358034555706623131950+.2426335234401383076544239*1j,
-.8615278304016353651120610-.3403202112618624773397257*1j,
-.8615278304016353651120610+.3403202112618624773397257*1j,
-.8312326466813240652679563-.4386985933597305434577492*1j,
-.8312326466813240652679563+.4386985933597305434577492*1j,
-.7921695462343492518845446-.5380628490968016700338001*1j,
-.7921695462343492518845446+.5380628490968016700338001*1j,
-.7433392285088529449175873-.6388084216222567930378296*1j,
-.7433392285088529449175873+.6388084216222567930378296*1j,
-.6832565803536521302816011-.7415032695091650806797753*1j,
-.6832565803536521302816011+.7415032695091650806797753*1j,
-.6096221567378335562589532-.8470292433077202380020454*1j,
-.6096221567378335562589532+.8470292433077202380020454*1j,
-.5185914574820317343536707-.9569048385259054576937721*1j,
-.5185914574820317343536707+.9569048385259054576937721*1j,
-.4027853855197518014786978-1.074195196518674765143729*1j,
-.4027853855197518014786978+1.074195196518674765143729*1j,
-.2433481337524869675825448-1.207298683731972524975429*1j,
-.2433481337524869675825448+1.207298683731972524975429*1j]
elif N == 25:
p = [-.9062073871811708652496104,
-.9028833390228020537142561-93077131185102967450643820.0e-27*1j,
-.9028833390228020537142561+93077131185102967450643820.0e-27*1j,
-.8928551459883548836774529-.1863068969804300712287138*1j,
-.8928551459883548836774529+.1863068969804300712287138*1j,
-.8759497989677857803656239-.2798521321771408719327250*1j,
-.8759497989677857803656239+.2798521321771408719327250*1j,
-.8518616886554019782346493-.3738977875907595009446142*1j,
-.8518616886554019782346493+.3738977875907595009446142*1j,
-.8201226043936880253962552-.4686668574656966589020580*1j,
-.8201226043936880253962552+.4686668574656966589020580*1j,
-.7800496278186497225905443-.5644441210349710332887354*1j,
-.7800496278186497225905443+.5644441210349710332887354*1j,
-.7306549271849967721596735-.6616149647357748681460822*1j,
-.7306549271849967721596735+.6616149647357748681460822*1j,
-.6704827128029559528610523-.7607348858167839877987008*1j,
-.6704827128029559528610523+.7607348858167839877987008*1j,
-.5972898661335557242320528-.8626676330388028512598538*1j,
-.5972898661335557242320528+.8626676330388028512598538*1j,
-.5073362861078468845461362-.9689006305344868494672405*1j,
-.5073362861078468845461362+.9689006305344868494672405*1j,
-.3934529878191079606023847-1.082433927173831581956863*1j,
-.3934529878191079606023847+1.082433927173831581956863*1j,
-.2373280669322028974199184-1.211476658382565356579418*1j,
-.2373280669322028974199184+1.211476658382565356579418*1j]
else:
raise ValueError("Bessel Filter not supported for order %d" % N)
return z, p, k
filter_dict = {'butter': [buttap,buttord],
'butterworth' : [buttap,buttord],
'cauer' : [ellipap,ellipord],
'elliptic' : [ellipap,ellipord],
'ellip' : [ellipap,ellipord],
'bessel' : [besselap],
'cheby1' : [cheb1ap, cheb1ord],
'chebyshev1' : [cheb1ap, cheb1ord],
'chebyshevi' : [cheb1ap, cheb1ord],
'cheby2' : [cheb2ap, cheb2ord],
'chebyshev2' : [cheb2ap, cheb2ord],
'chebyshevii' : [cheb2ap, cheb2ord]
}
band_dict = {'band':'bandpass',
'bandpass':'bandpass',
'pass' : 'bandpass',
'bp':'bandpass',
'bs':'bandstop',
'bandstop':'bandstop',
'bands' : 'bandstop',
'stop' : 'bandstop',
'l' : 'lowpass',
'low': 'lowpass',
'lowpass' : 'lowpass',
'high' : 'highpass',
'highpass' : 'highpass',
'h' : 'highpass'
}
warnings.simplefilter("always", BadCoefficients)
| gpl-3.0 |
ishank08/scikit-learn | sklearn/cluster/dbscan_.py | 20 | 12730 | # -*- coding: utf-8 -*-
"""
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <robertlayton@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# Lars Buitinck
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_array, check_consistent_length
from ..utils.fixes import astype
from ..neighbors import NearestNeighbors
from ._dbscan_inner import dbscan_inner
def dbscan(X, eps=0.5, min_samples=5, metric='minkowski', metric_params=None,
algorithm='auto', leaf_size=30, p=2, sample_weight=None, n_jobs=1):
"""Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
metric_params : dict, optional
Additional keyword arguments for the metric function.
.. versionadded:: 0.19
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
core_samples : array [n_core_samples]
Indices of core samples.
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
if not eps > 0.0:
raise ValueError("eps must be positive.")
X = check_array(X, accept_sparse='csr')
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
check_consistent_length(X, sample_weight)
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if metric == 'precomputed' and sparse.issparse(X):
neighborhoods = np.empty(X.shape[0], dtype=object)
X.sum_duplicates() # XXX: modifies X's internals in-place
X_mask = X.data <= eps
masked_indices = astype(X.indices, np.intp, copy=False)[X_mask]
masked_indptr = np.concatenate(([0], np.cumsum(X_mask)))[X.indptr[1:]]
# insert the diagonal: a point is its own neighbor, but 0 distance
# means absence from sparse matrix data
masked_indices = np.insert(masked_indices, masked_indptr,
np.arange(X.shape[0]))
masked_indptr = masked_indptr[:-1] + np.arange(1, X.shape[0])
# split into rows
neighborhoods[:] = np.split(masked_indices, masked_indptr)
else:
neighbors_model = NearestNeighbors(radius=eps, algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
metric_params=metric_params, p=p,
n_jobs=n_jobs)
neighbors_model.fit(X)
# This has worst case O(n^2) memory complexity
neighborhoods = neighbors_model.radius_neighbors(X, eps,
return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors)
for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors])
for neighbors in neighborhoods])
# Initially, all samples are noise.
labels = -np.ones(X.shape[0], dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
return np.where(core_samples)[0], labels
class DBSCAN(BaseEstimator, ClusterMixin):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
.. versionadded:: 0.17
metric *precomputed* to accept precomputed sparse matrix.
metric_params : dict, optional
Additional keyword arguments for the metric function.
.. versionadded:: 0.19
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
labels_ : array, shape = [n_samples]
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
def __init__(self, eps=0.5, min_samples=5, metric='euclidean',
metric_params=None, algorithm='auto', leaf_size=30, p=None,
n_jobs=1):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.metric_params = metric_params
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
self.n_jobs = n_jobs
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features or distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
"""
X = check_array(X, accept_sparse='csr')
clust = dbscan(X, sample_weight=sample_weight,
**self.get_params())
self.core_sample_indices_, self.labels_ = clust
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
def fit_predict(self, X, y=None, sample_weight=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
self.fit(X, sample_weight=sample_weight)
return self.labels_
| bsd-3-clause |
zuku1985/scikit-learn | examples/svm/plot_separating_hyperplane.py | 294 | 1273 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/pandas/io/api.py | 3 | 1112 | """
Data IO api
"""
# flake8: noqa
from pandas.io.parsers import read_csv, read_table, read_fwf
from pandas.io.clipboard.clipboard import read_clipboard
from pandas.io.excel import ExcelFile, ExcelWriter, read_excel
from pandas.io.pytables import HDFStore, get_store, read_hdf
from pandas.io.json import read_json
from pandas.io.html import read_html
from pandas.io.sql import read_sql, read_sql_table, read_sql_query
from pandas.io.sas import read_sas
from pandas.io.feather_format import read_feather
from pandas.io.stata import read_stata
from pandas.io.pickle import read_pickle, to_pickle
from pandas.io.packers import read_msgpack, to_msgpack
from pandas.io.gbq import read_gbq
# deprecation, xref #13790
def Term(*args, **kwargs):
import warnings
warnings.warn("pd.Term is deprecated as it is not "
"applicable to user code. Instead use in-line "
"string expressions in the where clause when "
"searching in HDFStore",
FutureWarning, stacklevel=2)
from pandas.io.pytables import Term
return Term(*args, **kwargs)
| agpl-3.0 |
crate/crash | crate/crash/tabulate.py | 1 | 42918 | # -*- coding: utf-8 -*-
# Copyright (c) 2011-2014 Sergey Astanin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Pretty-print tabular data."""
import io
import re
from collections import namedtuple
from functools import partial, reduce
from itertools import zip_longest as izip_longest
_none_type = type(None)
_int_type = int
_long_type = int
_float_type = float
_text_type = str
_binary_type = bytes
def float_format(val):
return str(val)
def _is_file(f):
return isinstance(f, io.IOBase)
try:
import wcwidth # optional wide-character (CJK) support
except ImportError:
wcwidth = None
__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"]
__version__ = "0.7.5"
MIN_PADDING = 0
# if True, enable wide-character (CJK) support
WIDE_CHARS_MODE = wcwidth is not None
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
# A table structure is suppposed to be:
#
# --- lineabove ---------
# headerrow
# --- linebelowheader ---
# datarow
# --- linebewteenrows ---
# ... (more datarows) ...
# --- linebewteenrows ---
# last datarow
# --- linebelow ---------
#
# TableFormat's line* elements can be
#
# - either None, if the element is not used,
# - or a Line tuple,
# - or a function: [col_widths], [col_alignments] -> string.
#
# TableFormat's *row elements can be
#
# - either None, if the element is not used,
# - or a DataRow tuple,
# - or a function: [cell_values], [col_widths], [col_alignments] -> string.
#
# padding (an integer) is the amount of white space around data values.
#
# with_header_hide:
#
# - either None, to display all table elements unconditionally,
# - or a list of elements not to be displayed if the table has column headers.
#
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "with_header_hide"])
def _pipe_segment_with_colons(align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
w = colwidth
if align in ["right", "decimal"]:
return ('-' * (w - 1)) + ":"
elif align == "center":
return ":" + ('-' * (w - 2)) + ":"
elif align == "left":
return ":" + ('-' * (w - 1))
else:
return '-' * w
def _pipe_line_with_colons(colwidths, colaligns):
"""Return a horizontal line with optional colons to indicate column's
alignment (as in `pipe` output format)."""
segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)]
return "|" + "|".join(segments) + "|"
def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
alignment = {"left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| '}
# hard-coded padding _around_ align attribute and value together
# rather than padding parameter which affects only the value
values_with_attrs = [' ' + alignment.get(a, '') + c + ' '
for c, a in zip(cell_values, colaligns)]
colsep = separator * 2
return (separator + colsep.join(values_with_attrs)).rstrip()
def _html_row_with_attrs(celltag, cell_values, colwidths, colaligns):
alignment = {"left": '',
"right": ' style="text-align: right;"',
"center": ' style="text-align: center;"',
"decimal": ' style="text-align: right;"'}
values_with_attrs = ["<{0}{1}>{2}</{0}>".format(celltag, alignment.get(a, ''), c)
for c, a in zip(cell_values, colaligns)]
return "<tr>" + "".join(values_with_attrs).rstrip() + "</tr>"
def _latex_line_begin_tabular(colwidths, colaligns, booktabs=False):
alignment = {"left": "l", "right": "r", "center": "c", "decimal": "r"}
tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns])
return "\n".join(["\\begin{tabular}{" + tabular_columns_fmt + "}",
"\\toprule" if booktabs else "\hline"])
LATEX_ESCAPE_RULES = {r"&": r"\&", r"%": r"\%", r"$": r"\$", r"#": r"\#",
r"_": r"\_", r"^": r"\^{}", r"{": r"\{", r"}": r"\}",
r"~": r"\textasciitilde{}", "\\": r"\textbackslash{}",
r"<": r"\ensuremath{<}", r">": r"\ensuremath{>}"}
def _latex_row(cell_values, colwidths, colaligns):
def escape_char(c):
return LATEX_ESCAPE_RULES.get(c, c)
escaped_values = ["".join(map(escape_char, cell)) for cell in cell_values]
rowfmt = DataRow("", "&", "\\\\")
return _build_simple_row(escaped_values, rowfmt)
_table_formats = {"simple":
TableFormat(lineabove=Line("", "-", " ", ""),
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
with_header_hide=["lineabove", "linebelow"]),
"plain":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"fancy_grid":
TableFormat(lineabove=Line("╒", "═", "╤", "╕"),
linebelowheader=Line("╞", "═", "╪", "╡"),
linebetweenrows=Line("├", "─", "┼", "┤"),
linebelow=Line("╘", "═", "╧", "╛"),
headerrow=DataRow("│", "│", "│"),
datarow=DataRow("│", "│", "│"),
padding=1, with_header_hide=None),
"pipe":
TableFormat(lineabove=_pipe_line_with_colons,
linebelowheader=_pipe_line_with_colons,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"orgtbl":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"psql":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"rst":
TableFormat(lineabove=Line("", "=", " ", ""),
linebelowheader=Line("", "=", " ", ""),
linebetweenrows=None,
linebelow=Line("", "=", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"mediawiki":
TableFormat(lineabove=Line("{| class=\"wikitable\" style=\"text-align: left;\"",
"", "", "\n|+ <!-- caption -->\n|-"),
linebelowheader=Line("|-", "", "", ""),
linebetweenrows=Line("|-", "", "", ""),
linebelow=Line("|}", "", "", ""),
headerrow=partial(_mediawiki_row_with_attrs, "!"),
datarow=partial(_mediawiki_row_with_attrs, "|"),
padding=0, with_header_hide=None),
"html":
TableFormat(lineabove=Line("<table>", "", "", ""),
linebelowheader=None,
linebetweenrows=None,
linebelow=Line("</table>", "", "", ""),
headerrow=partial(_html_row_with_attrs, "th"),
datarow=partial(_html_row_with_attrs, "td"),
padding=0, with_header_hide=None),
"latex":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"latex_booktabs":
TableFormat(lineabove=partial(_latex_line_begin_tabular, booktabs=True),
linebelowheader=Line("\\midrule", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\bottomrule\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"tsv":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", "\t", ""),
datarow=DataRow("", "\t", ""),
padding=0, with_header_hide=None)}
tabulate_formats = list(sorted(_table_formats.keys()))
_multiline_codes = re.compile(r"\r|\n|\r\n")
_multiline_codes_bytes = re.compile(b"\r|\n|\r\n")
_invisible_codes = re.compile(r"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
_invisible_codes_bytes = re.compile(b"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
def simple_separated_format(separator):
"""Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\\t") ; \
tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23'
True
"""
return TableFormat(None, None, None, None,
headerrow=DataRow('', separator, ''),
datarow=DataRow('', separator, ''),
padding=0, with_header_hide=None)
def _isconvertible(conv, string):
try:
n = conv(string)
return True
except (ValueError, TypeError):
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
"""
return _isconvertible(float, string)
def _isint(string, inttype=int):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return type(string) is inttype or \
(isinstance(string, _binary_type) or isinstance(string, _text_type)) \
and \
_isconvertible(inttype, string)
def _type(string, has_invisible=True):
"""The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
elif hasattr(string, "isoformat"): # datetime.datetime, date, and time
return _text_type
elif _isint(string):
return int
elif _isint(string, _long_type):
return _long_type
elif _isnumber(string):
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padleft(width, s, has_invisible=True):
"""Flush right.
>>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
True
"""
def impl(val):
iwidth = width + len(val) - len(_strip_invisible(val)) if has_invisible else width
fmt = "{0:>%ds}" % iwidth
return fmt.format(val)
num_lines = s.splitlines()
return len(num_lines) > 1 and '\n'.join(map(impl, num_lines)) or impl(s)
def _padright(width, s, has_invisible=True):
"""Flush left.
>>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 '
True
"""
def impl(val):
iwidth = width + len(val) - len(_strip_invisible(val)) if has_invisible else width
fmt = "{0:<%ds}" % iwidth
return fmt.format(val)
num_lines = s.splitlines()
return len(num_lines) > 1 and '\n'.join(map(impl, num_lines)) or impl(s)
def _padboth(width, s, has_invisible=True):
"""Center string.
>>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 '
True
"""
def impl(val):
iwidth = width + len(val) - len(_strip_invisible(val)) if has_invisible else width
fmt = "{0:^%ds}" % iwidth
return fmt.format(val)
num_lines = s.splitlines()
return len(num_lines) > 1 and '\n'.join(map(impl, num_lines)) or impl(s)
def _padnone(ignore_width, s):
return s
def _strip_invisible(s):
"Remove invisible ANSI color codes."
if isinstance(s, _text_type):
return re.sub(_invisible_codes, "", s)
else: # a bytestring
return re.sub(_invisible_codes_bytes, "", s)
def _max_line_width(s):
"""
Visible width of a potentially multinie content.
>>> _max_line_width('this\\nis\\na\\nmultiline\\ntext')
9
"""
if not s:
return 0
return max(map(len, s.splitlines()))
def _visible_width(s):
"""Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
"""
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return _max_line_width(_strip_invisible(s))
else:
return _max_line_width(_text_type(s))
def _is_multiline(s):
if isinstance(s, _text_type):
return bool(re.search(_multiline_codes, s))
else: # a bytestring
return bool(re.search(_multiline_codes_bytes, s))
def _multiline_width(multiline_s, line_width_fn=len):
return max(map(line_width_fn, re.split("[\r\n]", multiline_s)))
def _choose_width_fn(has_invisible, enable_widechars, is_multiline):
"""Return a function to calculate visible cell width."""
if has_invisible:
line_width_fn = _visible_width
elif enable_widechars: # optional wide-character support if available
line_width_fn = wcwidth.wcswidth
else:
line_width_fn = len
if is_multiline:
width_fn = lambda s: _multiline_width(s, line_width_fn)
else:
width_fn = line_width_fn
return width_fn
def _align_column_choose_padfn(strings, alignment, has_invisible):
if alignment == "right":
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment == "center":
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment == "decimal":
if has_invisible:
decimals = [_afterpoint(_strip_invisible(s)) for s in strings]
else:
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
elif not alignment:
padfn = _padnone
else:
strings = [s.strip() for s in strings]
padfn = _padright
return strings, padfn
def _align_column(strings, alignment, minwidth=0,
has_invisible=True, enable_widechars=False, is_multiline=False):
"""[string] -> [padded_string]
>>> list(map(str,_align_column(["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"], "decimal")))
[' 12.345 ', '-1234.5 ', ' 1.23 ', ' 1234.5 ', ' 1e+234 ', ' 1.0e234']
>>> list(map(str,_align_column(['123.4', '56.7890'], None)))
['123.4', '56.7890']
"""
strings, padfn = _align_column_choose_padfn(strings, alignment, has_invisible)
width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline)
s_widths = list(map(width_fn, strings))
maxwidth = max(max(s_widths), minwidth)
# TODO: refactor column alignment in single-line and multiline modes
if is_multiline:
if not enable_widechars and not has_invisible:
padded_strings = [
"\n".join([padfn(maxwidth, s) for s in ms.splitlines()])
for ms in strings]
else:
# enable wide-character width corrections
s_lens = [max((len(s) for s in re.split("[\r\n]", ms))) for ms in strings]
visible_widths = [maxwidth - (w - l) for w, l in zip(s_widths, s_lens)]
# wcswidth and _visible_width don't count invisible characters;
# padfn doesn't need to apply another correction
if strings[0] == '':
strings[0] = ' '
padded_strings = ["\n".join([padfn(w, s) for s in (ms.splitlines() or ms)])
for ms, w in zip(strings, visible_widths)]
else: # single-line cell values
if not enable_widechars and not has_invisible:
padded_strings = [padfn(maxwidth, s) for s in strings]
else:
# enable wide-character width corrections
s_lens = list(map(len, strings))
visible_widths = [maxwidth - (w - l) for w, l in zip(s_widths, s_lens)]
# wcswidth and _visible_width don't count invisible characters;
# padfn doesn't need to apply another correction
padded_strings = [padfn(w, s) for s, w in zip(strings, visible_widths)]
return padded_strings
def _more_generic(type1, type2):
types = {_none_type: 0, int: 1, float: 2, _binary_type: 3, _text_type: 4}
invtypes = {4: _text_type, 3: _binary_type, 2: float, 1: int, 0: _none_type}
moregeneric = max(types.get(type1, 4), types.get(type2, 4))
return invtypes[moregeneric]
def _column_type(values, has_invisible=True):
"""The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
return reduce(_more_generic, [type(v) for v in values], int)
def _format(val, valtype, floatfmt, missingval="", has_invisible=True):
"""Format a value accoding to its type.
Unicode is supported:
>>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
"""
if val is None:
return missingval
if valtype in [int, _long_type, _text_type]:
return "{0}".format(val)
elif valtype is _binary_type:
try:
return _text_type(val, "ascii")
except TypeError:
return _text_type(val)
elif valtype is float:
is_a_colored_number = has_invisible and isinstance(val, (_text_type, _binary_type))
if is_a_colored_number:
raw_val = _strip_invisible(val)
formatted_val = format(float(raw_val), floatfmt)
return val.replace(raw_val, formatted_val)
elif not floatfmt:
return float_format(val)
else:
return format(float(val), floatfmt)
else:
return "{0}".format(val)
def _align_header(header, alignment, width, visible_width, enable_widechars=False, is_multiline=False):
if is_multiline:
header_lines = re.split(_multiline_codes, header)
padded_lines = [_align_header(h, alignment, width, visible_width) for h in header_lines]
return "\n".join(padded_lines)
# else: not multiline
ninvisible = max(0, len(header) - visible_width)
width += ninvisible
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return "{0}".format(header)
else:
return _padleft(width, header)
def _normalize_tabular_data(tabular_data, headers):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* list of dicts (usually used with headers="keys")
* list of OrderedDicts (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
"""
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = tabular_data.keys()
rows = list(izip_longest(*tabular_data.values())) # columns have to be transposed
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
keys = tabular_data.keys()
vals = tabular_data.values # values matrix doesn't need to be transposed
names = tabular_data.index
rows = [[v] + list(row) for v, row in zip(names, vals)]
else:
raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type, keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
# numpy record array
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")):
# namedtuple
headers = list(map(_text_type, rows[0]._fields))
elif (len(rows) > 0
and isinstance(rows[0], dict)):
# dict or OrderedDict
uniq_keys = set() # implements hashed lookup
keys = [] # storage for set
if headers == "firstrow":
firstdict = rows[0] if len(rows) > 0 else {}
keys.extend(firstdict.keys())
uniq_keys.update(keys)
rows = rows[1:]
for row in rows:
for k in row.keys():
# Save unique items in input order
if k not in uniq_keys:
keys.append(k)
uniq_keys.add(k)
if headers == 'keys':
headers = keys
elif isinstance(headers, dict):
# a dict of headers for a list of dicts
headers = [headers.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
elif headers == "firstrow":
if len(rows) > 0:
headers = [firstdict.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
else:
headers = []
elif headers:
raise ValueError('headers for a list of dicts is not a dict or a keyword')
rows = [[row.get(k) for k in keys] for row in rows]
elif headers == "keys" and len(rows) > 0:
# keys are column indices
headers = list(map(_text_type, range(len(rows[0]))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
headers = list(map(_text_type, rows[0])) # headers should be strings
rows = rows[1:]
headers = list(map(_text_type, headers))
rows = list(map(list, rows))
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""] * (ncols - nhs) + headers
return rows, headers
def tabulate(tabular_data, headers=(), tablefmt="simple",
floatfmt="g", numalign="decimal", stralign="left",
missingval=""):
"""Format a fixed width table for pretty printing.
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
--- ---------
1 2.34
-56 8.999
2 10001
--- ---------
The first required argument (`tabular_data`) can be a
list-of-lists (or another iterable of iterables), a list of named
tuples, a dictionary of iterables, an iterable of dictionaries,
a two-dimensional NumPy array, NumPy record array, or a Pandas'
dataframe.
Table headers
-------------
To print nice column headers, supply the second argument (`headers`):
- `headers` can be an explicit list of column headers
- if `headers="firstrow"`, then the first row of data is used
- if `headers="keys"`, then dictionary keys or column indices are used
Otherwise a headerless table is produced.
If the number of headers is less than the number of columns, they
are supposed to be names of the last columns. This is consistent
with the plain-text format of R and Pandas' dataframes.
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
... headers="firstrow"))
sex age
----- ----- -----
Alice F 24
Bob M 19
Column alignment
----------------
`tabulate` tries to detect column types automatically, and aligns
the values properly. By default it aligns decimal points of the
numbers (or flushes integer numbers to the right), and flushes
everything else to the left. Possible column alignments
(`numalign`, `stralign`) are: "right", "center", "left", "decimal"
(only for `numalign`), and None (to disable alignment).
Table formats
-------------
`floatfmt` is a format specification used for columns which
contain numeric data with a decimal point.
`None` values are replaced with a `missingval` string:
>>> print(tabulate([["spam", 1, None],
... ["eggs", 42, 3.14],
... ["other", None, 2.7]], missingval="?"))
----- -- ----
spam 1 ?
eggs 42 3.14
other ? 2.7
----- -- ----
Various plain-text table formats (`tablefmt`) are supported:
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
'latex', and 'latex_booktabs'. Variable `tabulate_formats` contains the list of
currently supported formats.
"plain" format doesn't use any pseudographics to draw tables,
it separates columns with a double space:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "plain"))
strings numbers
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
spam 41.9999
eggs 451
"simple" format is like Pandoc simple_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "simple"))
strings numbers
--------- ---------
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
---- --------
spam 41.9999
eggs 451
---- --------
"grid" is similar to tables produced by Emacs table.el package or
Pandoc grid_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "grid"))
+-----------+-----------+
| strings | numbers |
+===========+===========+
| spam | 41.9999 |
+-----------+-----------+
| eggs | 451 |
+-----------+-----------+
>>> print(tabulate([["this\\nis\\na multiline\\ntext", "41.9999", "foo\\nbar"], ["NULL", "451.0", ""]],
... ["text", "numbers", "other"], "grid"))
+-------------+----------+-------+
| text | numbers | other |
+=============+==========+=======+
| this | 41.9999 | foo |
| is | | bar |
| a multiline | | |
| text | | |
+-------------+----------+-------+
| NULL | 451 | |
+-------------+----------+-------+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+------+----------+
| spam | 41.9999 |
+------+----------+
| eggs | 451 |
+------+----------+
"fancy_grid" draws a grid using box-drawing characters:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "fancy_grid"))
╒═══════════╤═══════════╕
│ strings │ numbers │
╞═══════════╪═══════════╡
│ spam │ 41.9999 │
├───────────┼───────────┤
│ eggs │ 451 │
╘═══════════╧═══════════╛
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
pipe_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "pipe"))
| strings | numbers |
|:----------|----------:|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|:-----|---------:|
| spam | 41.9999 |
| eggs | 451 |
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
are slightly different from "pipe" format by not using colons to
define column alignment, and using a "+" sign to indicate line
intersections:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "orgtbl"))
| strings | numbers |
|-----------+-----------|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
| spam | 41.9999 |
| eggs | 451 |
"rst" is like a simple table format from reStructuredText; please
note that reStructuredText accepts also "grid" tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "rst"))
========= =========
strings numbers
========= =========
spam 41.9999
eggs 451
========= =========
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
==== ========
spam 41.9999
eggs 451
==== ========
"mediawiki" produces a table markup used in Wikipedia and on other
MediaWiki-based sites:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="mediawiki"))
{| class="wikitable" style="text-align: left;"
|+ <!-- caption -->
|-
! strings !! align="right"| numbers
|-
| spam || align="right"| 41.9999
|-
| eggs || align="right"| 451
|}
"html" produces HTML markup:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="html"))
<table>
<tr><th>strings </th><th style="text-align: right;"> numbers</th></tr>
<tr><td>spam </td><td style="text-align: right;"> 41.9999</td></tr>
<tr><td>eggs </td><td style="text-align: right;"> 451 </td></tr>
</table>
"latex" produces a tabular environment of LaTeX document markup:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
\\begin{tabular}{lr}
\\hline
spam & 41.9999 \\\\
eggs & 451 \\\\
\\hline
\\end{tabular}
"latex_booktabs" produces a tabular environment of LaTeX document markup
using the booktabs.sty package:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs"))
\\begin{tabular}{lr}
\\toprule
spam & 41.9999 \\\\
eggs & 451 \\\\
\\bottomrule
\end{tabular}
"""
if tabular_data is None:
tabular_data = []
list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\n'.join(['\t'.join(map(_text_type, headers))] + \
['\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
enable_widechars = wcwidth is not None and WIDE_CHARS_MODE
is_multiline = _is_multiline(plain_text)
width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline)
# format rows and columns, convert numeric values to strings
cols = list(zip(*list_of_lists))
coltypes = list(map(_column_type, cols))
cols = [[_format(v, ct, floatfmt, missingval, has_invisible) for v in c]
for c, ct in zip(cols, coltypes)]
# align columns
aligns = [numalign if ct in [int, float] else stralign for ct in coltypes]
minwidths = [width_fn(h) + MIN_PADDING for h in headers] if headers else [0] * len(cols)
cols = [_align_column(c, a, minw, has_invisible, enable_widechars, is_multiline)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
t_cols = cols or [['']] * len(headers)
t_aligns = aligns or [stralign] * len(headers)
minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, t_cols)]
headers = [_align_header(h, a, minw, width_fn(h), enable_widechars, is_multiline)
for h, a, minw in zip(headers, t_aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [width_fn(c[0]) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows, minwidths, aligns, is_multiline)
def _build_simple_row(padded_cells, rowfmt):
"Format row according to DataRow format without padding."
begin, sep, end = rowfmt
return (begin + sep.join(padded_cells) + end).rstrip()
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
"Return a string which represents a row of data cells."
if not rowfmt:
return None
if hasattr(rowfmt, "__call__"):
return rowfmt(padded_cells, colwidths, colaligns)
else:
return _build_simple_row(padded_cells, rowfmt)
def _build_line(colwidths, colaligns, linefmt):
"Return a string which represents a horizontal line."
if not linefmt:
return None
if hasattr(linefmt, "__call__"):
return linefmt(colwidths, colaligns)
else:
begin, fill, sep, end = linefmt
cells = [fill * w for w in colwidths]
return _build_simple_row(cells, (begin, sep, end))
def _pad_row(cells, padding):
if cells:
pad = " " * padding
padded_cells = [pad + cell + pad for cell in cells]
return padded_cells
else:
return cells
def _append_basic_row(lines, padded_cells, colwidths, colaligns, rowfmt):
lines.append(_build_row(padded_cells, colwidths, colaligns, rowfmt))
return lines
def _append_multiline_row(lines, padded_multiline_cells, padded_widths, colaligns, rowfmt, pad):
colwidths = [w - 2 * pad for w in padded_widths]
cells_lines = [c.splitlines() for c in padded_multiline_cells]
nlines = max(map(len, cells_lines)) # number of lines in the row
# vertically pad cells where some lines are missing
cells_lines = [(cl + [' ' * w] * (nlines - len(cl))) for cl, w in zip(cells_lines, colwidths)]
lines_cells = [[cl[i] for cl in cells_lines] for i in range(nlines)]
for ln in lines_cells:
padded_ln = _pad_row(ln, 1)
_append_basic_row(lines, padded_ln, colwidths, colaligns, rowfmt)
return lines
def _append_line(lines, colwidths, colaligns, linefmt):
lines.append(_build_line(colwidths, colaligns, linefmt))
return lines
def _format_table(fmt, headers, rows, colwidths, colaligns, is_multiline):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []
pad = fmt.padding
headerrow = fmt.headerrow
padded_widths = [(w + 2 * pad) for w in colwidths]
if is_multiline:
pad_row = lambda row, _: row # do it later, in _append_multiline_row
append_row = partial(_append_multiline_row, pad=pad)
else:
pad_row = _pad_row
append_row = _append_basic_row
padded_headers = pad_row(headers, pad)
padded_rows = [pad_row(row, pad) for row in rows]
if fmt.lineabove and "lineabove" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.lineabove)
if padded_headers:
append_row(lines, padded_headers, padded_widths, colaligns, headerrow)
if fmt.linebelowheader and "linebelowheader" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.linebelowheader)
if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in padded_rows[:-1]:
append_row(lines, row, padded_widths, colaligns, fmt.datarow)
_append_line(lines, padded_widths, colaligns, fmt.linebetweenrows)
# the last row without a line below
append_row(lines, padded_rows[-1], padded_widths, colaligns, fmt.datarow)
else:
for row in padded_rows:
append_row(lines, row, padded_widths, colaligns, fmt.datarow)
if fmt.linebelow and "linebelow" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.linebelow)
return "\n".join(lines)
| apache-2.0 |
klion93/Exciting-Scripts-Tools | OPTIMIZE-analyze_boron10.py | 1 | 17925 | #!/usr/bin/env python
#%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%#
#%!% ---------------------------------- OPTIMIZE-analyze.py ---------------------------------- %!%#
#%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%#
#
# AUTHOR:
# Rostam Golesorkhtabar
# r.golesorkhtabar@gmail.com
#
# DATE:
# Wed Jan 01 00:00:00 2014
#
# SYNTAX:
# python OPTIMIZE-analyze.py
# OPTIMIZE-analyze.py
#
# EXPLANATION:
#
#__________________________________________________________________________________________________
from pylab import *
import os
import sys
import glob
import copy
import math
import os.path
import numpy as np
from lxml import etree as ET
import matplotlib.pyplot as plt
import pylab as pyl
from scipy.optimize import fmin_powell
#%!%!%--- CONSTANTS ---%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!
_e = 1.602176565e-19 # elementary charge
Bohr = 5.291772086e-11 # a.u. to meter
Ha2eV= 27.211396132 # Ha to eV
ToGPa= (_e*Ha2eV)/(1e9*Bohr**3) # Ha/[a.u.]^3 to GPa
#__________________________________________________________________________________________________
#%!%!%--- SUBROUTINS AND FUNCTIONS ---%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%
def E_eos(p0, V):
if (eos=='M'):
""" Murnaghan Energy"""
E0, V0, B0, Bp = p0
E = E0 + (B0*V/Bp*(1/(Bp-1)*(V0/V)**Bp +1)-B0*V0/(Bp-1))
else:
""" Birch-Murnaghan Energy"""
E0, V0, B0, Bp = p0
E = E0 + (9.*B0*V0/16)*(((((V0/V)**(2./3))-1.)**3.)*Bp \
+ ((((V0/V)**(2/3.))-1.)**2.)*(6.-4.*((V0/V)**(2./3.))))
return E
#--------------------------------------------------------------------------------------------------
def P_eos(p0, V):
if (eos=='M'):
""" Murnaghan Pressure"""
E0, V0, B0, Bp = p0
P = B0/Bp*((V0/V)**Bp - 1.)
else:
""" Birch-Murnaghan Pressure"""
E0, V0, B0, Bp = p0
P = 3./2*B0*((V0/V)**(7./3) - (V0/V)**(5./3))*(1. + 3./4*(Bp-4.)*((V0/V)**(2./3) - 1.))
return P
#--------------------------------------------------------------------------------------------------
def snr(p0, v, e):
""" Squared norm of residue vector calculation """
return np.sum((e - E_eos(p0, v))**2.)
#--------------------------------------------------------------------------------------------------
def sortlist(lst1, lst2):
temp = copy.copy(lst1)
lst3 = []
lst4 = []
temp.sort()
for i in range(len(lst1)):
lst3.append(lst1[lst1.index(temp[i])])
lst4.append(lst2[lst1.index(temp[i])])
return lst3, lst4
#--------------------------------------------------------------------------------------------------
def readenergy():
os.system("grep \"Total energy \" INFO.OUT > tempfile")
tmpfile = open('tempfile', 'r')
e = float(tmpfile.readlines()[-1].strip().split()[3])
tmpfile.close()
os.system("rm -f tempfile")
return e
#__________________________________________________________________________________________________
#%!%!%--- Reading the INFO file ---%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!
INFO_file = str(glob.glob('INFO_*')[0])
INFO=open(INFO_file,'r')
mod =INFO_file[5:]
l1 = INFO.readline()
l2 = INFO.readline()
l3 = INFO.readline()
mdr = float(l3.split()[-1])
l4 = INFO.readline()
NoP = int(l4.split()[-1])
INFO.close()
#--------------------------------------------------------------------------------------------------
#%!%--- Reading the energies ---%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%
if (mod == 'VOL'):
volume = []
energy = []
vollist= glob.glob('vol_??')
for vol_num in vollist:
os.chdir(vol_num)
if (os.path.exists('INFO.OUT') == False):
print'\n ... Oops NOTICE: There is NO "INFO.OUT" file in "'+ vol_num + \
'" directory !?!?!? \n'
for line in open('INFO.OUT','r'):
if (line.find('Unit cell volume')>=0):
vol = float(line.split()[-1])
break
volume.append(vol)
energy.append(readenergy())
os.chdir('../')
volume, energy = sortlist(volume, energy)
fvol = open('energy-vs-volume', 'w')
for i in range(len(energy)):
print >>fvol, volume[i],' ', energy[i]
fvol.close()
data = np.loadtxt('energy-vs-volume')
vi, ei = data.T
if (len(ei) < 3): sys.exit('\n ... Oops ERROR: EOS fit needs at least 3 points. \n')
#%!%!%!%!--- Reading the EOS type ---%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!
eos = raw_input('\n>>>> Murnaghan or Birch-Murnaghan EOS: [M/B] ').upper()
if (eos != 'B' and eos != 'M'): sys.exit("\n ... Oops ERROR: Choose 'B' or 'M' \n")
if (eos == 'B'): eos = 'BM'
#----------------------------------------------------------------------------------------------
#%!%!%!%!--- FIT CALCULATIONS ---%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!
a2, a1, a0 = np.polyfit(vi, ei, 2)
V0 = -a1/(2.*a2)
E0 = a2*V0**2. + a1*V0 + a0
B0 = a2*V0
Bp = 2.
p0 = [E0, V0, B0, Bp]
viei = sorted([zip(vi, ei)])
v, e = np.array(viei).T
p1, fopt, direc, n_iter, n_funcalls, warnflag = \
fmin_powell(snr, p0, args=(v, e), full_output=True, disp=0)
E0, V0, B0, Bp = p1
print\
'\n =====================================================================',\
'\n Fit accuracy:',\
'\n Log(Final residue in [Ha]): '+str(round(log10(sqrt(fopt)),2)),'\n'\
'\n Final parameters:' \
'\n E_min = ' + str(round(E0,7)) +' [Ha]' \
'\n V_min = ' + str(round(V0,4)) +' [Bohr^3]'\
'\n B_0 = ' + str(round(B0*ToGPa,3))+' [GPa]' \
"\n B' = " + str(round(Bp,3)) +'\n'
str_V = []
str_de= []
str_P = []
for i in range(len(ei)):
Pi = P_eos(p1, vi[i])*ToGPa
ei_eos = E_eos(p1, vi[i])
str_vi = str(round(vi[i],4))
if (Pi > 0):
str_Pi = '+'+str(round(Pi,3))
else:
str_Pi = str(round(Pi,3))
dei = ei[i] - ei_eos
if (dei > 0):
str_dei = '+'+str('%8.8f'%(dei))
else:
str_dei = str('%8.8f'%(dei))
str_V.append(str_vi)
str_de.append(str_dei)
str_P.append(str_Pi)
sum_Vi = 0
sum_dei= 0
sum_Pi = 0
for i in range(len(ei)):
sum_Vi = sum_Vi + len(str_V[i])
sum_dei= sum_dei + len(str_de[i])
sum_Pi = sum_Pi + len(str_P[i])
len_Vi = int(sum_Vi /len(ei)) + 1
len_dei= int(sum_dei/len(ei)) + 1
len_Pi = int(sum_Pi /len(ei)) + 1
#%!%!%--- WRITING THE OUTPUT FILE ---%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!
outf = open(eos + '_eos.out', 'w')
if (eos=='M'):
print >>outf,' === Murnaghan eos ==============================='
else:
print >>outf,' === Birch-Murnaghan eos ========================='
print >>outf, \
' Fit accuracy:', \
'\n Log(Final residue in [Ha]): '+str(round(log10(sqrt(fopt)),2)), '\n'\
'\n Final parameters:' \
'\n E_min = ' + str(round(E0,7)) +' [Ha]' \
'\n V_min = ' + str(round(V0,4)) +' [Bohr^3]' \
'\n B_0 = ' + str(round(B0*ToGPa,3))+' [GPa]' \
"\n B' = " + str(round(Bp,3)) + \
'\n =================================================\n'\
'\n Volume' + ((len_Vi-3)*' ') + 'E_dft-E_eos Pressure [GPa]'
for i in range(len(ei)):
print >>outf, str_V[i] + ((len_Vi -len(str_V[i] ))*' ') + ' ' \
+ str_de[i] + ((len_dei-len(str_de[i]))*' ') + ' ' + str_P[i]
outf.close()
#----------------------------------------------------------------------------------------------
#%!%!%--- Writing the 'eos-optimized.xml' file ---%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%
INOBJ= open(mod.lower()+'-xml/input.xml', 'r')
doc = ET.parse(INOBJ)
root = doc.getroot()
scale = map(float,doc.xpath('/input/structure/crystal/@scale'))
if (scale==[]):
ascale=1.
else:
ascale=scale[0]
stretchstr = doc.xpath('/input/structure/crystal/@stretch')
if (stretchstr==[]):
stretch=[1.,1.,1.]
else:
stretch=np.array(map(float,stretchstr[0].split()))
basevectsn = doc.xpath('//basevect/text()')
bv = []
for basevect in basevectsn:
bv.append(map(float,basevect.split()))
M_old= np.array(bv)
D = np.linalg.det(M_old)
V0_in= abs(stretch[0]*stretch[1]*stretch[2]*ascale**3*D)
s_min= (V0/V0_in)**(1./3.)-1.
def_matrix={\
'VOL' :[[ 1.+s_min, 0. , 0. ],
[ 0. , 1.+s_min, 0. ],
[ 0. , 0. , 1.+s_min]]}
M_min = np.array(def_matrix[mod])
M_new = np.dot(M_old, M_min)
bsvct = doc.xpath('//crystal/basevect')
for j in range(3):
bdummy = '%22.16f'%(M_new[j,0]) + '%22.16f'%(M_new[j,1]) + '%22.16f'%(M_new[j,2])+' '
bsvct[j].text = bdummy
#---Writing the structure file-----------------------------------------------------------------
OUTOBJ = open(eos+'-optimized.xml', 'w')
OUTOBJ.write(ET.tostring(root, method ='xml',
pretty_print =True ,
xml_declaration=True ,
encoding ='UTF-8'))
OUTOBJ.close()
print ' Optimized lattice parameter saved into the file: "' + eos + '-optimized.xml".'\
'\n =====================================================================\n'
#--------------------------------------------------------------------------------------------------
if (mod != 'VOL'):
fee = open('energy-vs-strain', 'w')
for i in range(1, NoP+1):
if (0 < i and i < 10): dir_num = mod.lower() + '_0'+ str(i)
if (9 < i and i < 100): dir_num = mod.lower() + '_' + str(i)
if (os.path.exists(dir_num) == False):
print '\n ... Oops NOTICE: There is NO '+ dir_num +' directory !?!?!? \n'
break
os.chdir(dir_num)
if (os.path.exists('INFO.OUT') == False):
print '\n ... Oops NOTICE: There is NO "INFO.OUT" file in "'+ dir_num + \
'" directory !?!?!? \n'
s = i-(NoP+1)/2
r = 2*mdr*s/(NoP-1)
if (s==0): r=0.00001
if (r>0):
strain ='+'+str(round(r,10))
else:
strain = str(round(r,10))
print >>fee, strain,' ', readenergy()
os.chdir('../')
fee.close()
data = np.loadtxt('energy-vs-strain')
si, ei = data.T
vs = sorted([zip(si, ei)])
s, e = np.array(vs).T
if (len(e) < 5):
sys.exit('\n ... Oops ERROR: 4th order polynomial fit needs at least 5 points.\n')
#%!%!%!%!--- FIT CALCULATIONS ---%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!
coeffitions = np.polyfit(si, ei, 4)
f4 = np.poly1d(coeffitions)
s_fit = np.linspace(mdr*-1.2, mdr*1.2, 1000)
e_fit = f4(s_fit)
s_min = s_fit[e_fit.argmin()]
e_min = e_fit[e_fit.argmin()]
#%!%--- Writing the 'mod-optimized.xml' file ---%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%
INOBJ= open(mod.lower()+'-xml/input.xml', 'r')
doc = ET.parse(INOBJ)
root = doc.getroot()
stretchstr = doc.xpath('/input/structure/crystal/@stretch')
if (stretchstr==[]):
stretch=[1.,1.,1.]
else:
stretch=np.array(map(float,stretchstr[0].split()))
basevectsn = doc.xpath('//basevect/text()')
bv = []
for basevect in basevectsn:
bv.append(map(float,basevect.split()))
M_old= np.array(bv)
def_matrix={\
'BOA' :[[(1+s_min)**-.5, 0. , 0. ],
[ 0. , 1.+s_min , 0. ],
[ 0. , 0. ,(1+s_min)**-.5]],\
'COA' :[[(1+s_min)**-.5, 0. , 0. ],
[ 0. , (1+s_min)**-.5, 0. ],
[ 0. , 0. , 1.+s_min ]],\
'ALPHA':[[1./(1-s_min**2), 0. , 0. ],
[ 0. , 1. ,s_min ],
[ 0. ,s_min , 1. ]],\
'BETA' :[[ 1. , 0. ,s_min ],
[ 0. , 1./(1-s_min**2), 0. ],
[s_min , 0. , 1. ]],\
'GAMMA':[[ 1. ,s_min , 0. ],
[s_min , 1. , 0. ],
[ 0. , 0. , 1./(1-s_min**2)]]}
M_min = np.array(def_matrix[mod])
M_new = np.dot(M_old, M_min)
bsvct = doc.xpath('//crystal/basevect')
for j in range(3):
bdummy = '%22.16f'%(M_new[j,0]) + '%22.16f'%(M_new[j,1]) + '%22.16f'%(M_new[j,2])+' '
bsvct[j].text = bdummy
#---Writing the structure file-----------------------------------------------------------------
OUTOBJ = open(mod.lower()+'-optimized.xml', 'w')
OUTOBJ.write(ET.tostring(root, method ='xml',
pretty_print =True ,
xml_declaration=True ,
encoding ='UTF-8'))
OUTOBJ.close()
print '\n ====================================================================='\
'\n Optimized lattice parameter saved into the file: "'+ mod.lower() +'-optimized.xml".'\
'\n =====================================================================\n'
#--------------------------------------------------------------------------------------------------
#%!%!%--- PLOT DEFINITIONS ---%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%
params = {'axes.linewidth' : 2.,
'figure.subplot.bottom' : 0.14,
'figure.subplot.right' : 0.93,
'figure.subplot.left' : 0.20,
'xtick.major.pad' : 8,
}
plt.rcParams.update(params)
#%!%!%--- PLOT SECTION ---%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%!%
fig = plt.figure()
ax = fig.add_subplot(111)
fig.subplots_adjust(left=0.20)
if (mod == 'VOL'):
if (eos=='M'):
fit_label = 'Murnaghan fit'
else:
fit_label = 'Birch-Murnaghan fit'
xlabel = u'Volume [a$_0$\u00B3]'
ylabel = 'Energy - E$_{min}$ [mHa]'
plt.text(0.3,0.76, 'E$_{min}$ = '+str(round(E0,7))+' [Ha]' , transform = ax.transAxes , fontsize=17)
plt.text(0.3,0.69, 'V$_{min}$ = '+str(round(V0,4))+u' [a$_0$\u00B3]', transform = ax.transAxes , fontsize=17)
plt.text(0.3,0.62, 'B$_0$ = '+str(round(B0*ToGPa,3))+' [GPa]' , transform = ax.transAxes , fontsize=17)
plt.text(0.3,0.55, 'B$^\prime$ = '+str(round(Bp,3)) , transform = ax.transAxes , fontsize=17)
vmn = min(min(vi), V0)
vmx = max(max(vi), V0)
dv = vmx - vmn
v_eos= np.linspace(vmn-(0.1*dv), vmx+(0.1*dv), 1000)
e_eos= E_eos(p1, v_eos)
xx = [] ; xx = v_eos
yy = [] ; yy = e_eos
x0 = [] ; x0 = vi
y0 = [] ; y0 = ei
yymin = min(yy)
y0min = min(y0)
for i in range(len(yy)): yy[i] = (yy[i] - yymin)*1000
for i in range(len(y0)): y0[i] = (y0[i] - y0min)*1000
if (mod != 'VOL'):
xlabel = 'Physical strain $\epsilon$'
ylabel = 'Energy - E$_{min}$ [mHa]'
fit_label = '4th order polynomial fit'
plt.text(0.3,0.76, 'E$_{min}$ = '+str(round(e_min, 7))+' [Ha]', transform = ax.transAxes , fontsize=17)
plt.text(0.3,0.69, '$\epsilon_{min}$ = '+str(round(s_min,5)) , transform = ax.transAxes , fontsize=17)
xx = [] ; xx = s_fit
yy = [] ; yy = e_fit
x0 = [] ; x0 = si
y0 = [] ; y0 = ei
yymin = min(yy)
y0min = min(y0)
for i in range(len(yy)): yy[i] = (yy[i] - yymin)*1000
for i in range(len(y0)): y0[i] = (y0[i] - y0min)*1000
ax.set_xlabel(xlabel, fontsize = 18)
ax.set_ylabel(ylabel, fontsize = 18)
ax.plot(xx, yy, 'k' ,
color = 'red' ,
linewidth = 2 ,
label = fit_label)
ax.plot(x0, y0, 'o' ,
color = 'green',
markersize = 8 ,
markeredgecolor= 'black',
markeredgewidth= 1 ,
label = 'DFT Calculation')
ax.legend(numpoints=1,loc=9)
for label in ax.xaxis.get_ticklabels(): label.set_fontsize(15)
for label in ax.yaxis.get_ticklabels(): label.set_fontsize(15)
for line in ax.get_xticklines() + ax.get_yticklines():
line.set_markersize(6)
line.set_markeredgewidth(2)
#pyl.grid(True)
ax.xaxis.set_major_locator(MaxNLocator(7))
max_y = max(max(yy), max(y0))
min_y = min(min(yy), min(y0))
max_x = max(max(xx), max(x0))
min_x = min(min(xx), min(x0))
dyy = (max_y-min_y)/15
ax.set_ylim(min_y-dyy,max_y+dyy)
dxx = (max_x-min_x)/18
ax.set_xlim(min_x-dxx,max_x+dxx)
if (mod == 'VOL'):
plt.savefig(eos+'_eos.png', orientation='portrait',format='png',dpi=150)
plt.savefig(eos+'_eos.eps', orientation='portrait',format='eps')
else:
plt.savefig(mod.lower()+'.png', orientation='portrait',format='png',dpi=150)
plt.savefig(mod.lower()+'.eps', orientation='portrait',format='eps')
#plt.show()
#--------------------------------------------------------------------------------------------------
| apache-2.0 |
arkadoel/AprendiendoPython | pandas/pruebasCSV/prueba1.py | 1 | 1703 | __author__ = 'root'
import pandas
from pandas import ExcelWriter
import xlsxwriter
archivo = './train.csv'
if __name__ == '__main__':
data = pandas.read_csv(archivo)
#print(data) imprime lo guardado
#data.Name muestra la columna Name del .csv
'''
para filtrar
print(data[data.Sex == 'male'])
'''
hombres = data[data.Sex == 'male']
mujeres = data[data.Sex == 'female']
num_hombres = len(hombres)
num_mujeres = len(mujeres)
print('Numero de hombres ', num_hombres)
print('Numero de mujeres ', num_mujeres)
print(data.columns)
print(data.xs(0))
#guardar las mujeres a otro CSV
#mujeres.to_csv('mujeres.csv', index=False)
print('\r\nAgrupaciones por rangos de edad:')
'''
Agrupemos los grupos de edad de las mujeres
'''
menores = mujeres[mujeres.Age < 20]
los_veinte = mujeres[(mujeres.Age >= 20) & (mujeres.Age < 30)]
mayores_treinta = mujeres[mujeres.Age >= 30]
nulos = mujeres[mujeres.Age.isnull()]
print('\tMenores de 20: ', len(menores))
print('\tVeinteañeras', len(los_veinte))
print('\tMayores 30: ', len(mayores_treinta))
print('\tEdad desconocida: ', len(nulos))
print('\t\tTotal mujeres: ', len(mujeres))
'''
GUARDAR LOS DATOS A UN EXCEL
'''
#menores.to_excel('menores.xlsx', sheet_name='menores', engine='xlsxwriter')
writer = pandas.ExcelWriter('menores.xlsx')
menores.to_excel(writer,'Menores de 20')
los_veinte.to_excel(writer, 'Veintena')
mayores_treinta.to_excel(writer,'Mayores de 30')
nulos.to_excel(writer, 'Edad desconocida')
estadistico = writer.book.add_worksheet('Estadistico')
writer.save()
| gpl-3.0 |
othercriteria/StochasticBlockmodel | Network.py | 1 | 8884 | #!/usr/bin/env python
# Network representation and basic operations
# Daniel Klein, 5/10/2012
from os import system, unlink
import numpy as np
import scipy.sparse as sparse
import networkx as nx
import matplotlib.pyplot as plt
from Array import Array
from Covariate import NodeCovariate
class Network(Array):
def __init__(self, N = 0, names = None):
Array.__init__(self, N, N)
if names is None:
self.names = np.array(['%d' % n for n in range(self.N)])
else:
self.names = names
self.rnames = self.names
self.cnames = self.names
self.node_covariates = {}
def new_node_covariate(self, name, as_int = False):
if as_int:
node_cov = NodeCovariate(self.names, dtype = np.int)
else:
node_cov = NodeCovariate(self.names)
self.node_covariates[name] = node_cov
self.row_covariates[name] = node_cov
self.col_covariates[name] = node_cov
return node_cov
def new_node_covariate_int(self, name):
return self.new_node_covariate(name, as_int = True)
def subnetwork(self, inds):
sub_array = self.subarray(inds, inds)
sub = Network(len(inds), self.names[inds])
sub.array = sub_array.array
sub.row_covariates = sub_array.row_covariates
sub.col_covariates = sub_array.col_covariates
if sub_array.offset:
sub.offset = sub_array.offset
sub.edge_covariates = sub_array.edge_covariates
for node_covariate in self.node_covariates:
src = self.node_covariates[node_covariate]
sub.node_covariates[node_covariate] = src.subset(inds)
return sub
def nodes(self):
return self.names
def edges(self):
if self.is_sparse():
nz_i, nz_j = self.array.nonzero()
for n in range(self.array.nnz):
yield (self.names[nz_i[n]], self.names[nz_j[n]])
else:
for i in range(self.N):
for j in range(self.N):
if self.array[i,j]:
yield (self.names[i], self.names[j])
def show(self):
graph = nx.DiGraph()
for n in self.nodes():
graph.add_node(n)
for n_i, n_j in self.edges():
graph.add_edge(n_i, n_j)
pos = nx.nx_pydot.graphviz_layout(graph, prog = 'neato')
nx.draw(graph, pos)
plt.show()
def show_graphviz(self, file = 'out.pdf', splines = True, labels = True):
outfile = open('temp_graphviz.dot', 'w')
outfile.write('digraph G {\n')
outfile.write('size="12,16";\n')
outfile.write('orientation=landscape;\n')
outfile.write('overlap=none;\n')
outfile.write('repulsiveforce=12;\n')
if splines:
outfile.write('splines=true;\n')
for name in self.nodes():
outfile.write('%s [label=""];\n' % name)
for edge in self.edges():
outfile.write('%s -> %s;\n' % edge)
outfile.write('}\n')
outfile.close()
system('fdp -Tps2 temp_graphviz.dot -o temp_graphviz.ps')
unlink('temp_graphviz.dot')
system('ps2pdf temp_graphviz.ps %s' % file)
unlink('temp_graphviz.ps')
def show_heatmap(self, order_by = None,
order_by_row = None, order_by_col = None):
if order_by:
title = 'Network ordered by node covariate\n"%s"' % order_by
o = np.argsort(self.node_covariates[order_by][:])
elif order_by_row:
title = 'Network ordered by row covariate\n"%s"' % order_by_row
o = np.argsort(self.row_covariates[order_by_row][:])
elif order_by_col:
title = 'Network ordered by column covariate\n"%s"' % order_by_col
o = np.argsort(self.col_covariates[order_by_col][:])
else:
title, o = 'Unordered adjacency matrix', np.arange(self.N)
f, (ax_im, ax_ord) = plt.subplots(2, sharex = True)
f.set_figwidth(3)
f.set_figheight(6)
A = self.as_dense()
ax_im.imshow(A[o][:,o]).set_cmap('binary')
ax_im.set_ylim(0, self.N - 1)
ax_im.set_xticks([])
ax_im.set_yticks([])
ax_im.set_title(title)
#plt.setp([ax_im.get_xticklabels(), ax_im.get_yticklabels()],
# visible = False)
if order_by:
ax_ord.scatter(np.arange(self.N), self.node_covariates[order_by][o])
ax_ord.set_xlim(0, self.N - 1)
ax_ord.set_ylim(self.node_covariates[order_by][o[0]],
self.node_covariates[order_by][o[-1]])
plt.show()
def show_offset(self, order_by = None):
if order_by:
title = 'Offsets ordered by node covariate\n"%s"' % order_by
o = np.argsort(self.node_covariates[order_by][:])
else:
title, o = 'Unordered offsets', np.arange(self.N)
f = plt.figure()
ax = f.add_subplot(1, 1, 1)
O = self.initialize_offset().matrix()
ax.imshow(O[o][:,o])
ax.set_xlim(0, self.N - 1)
ax.set_ylim(0, self.N - 1)
ax.set_title(title)
plt.setp([ax.get_xticklabels(), ax.get_yticklabels()],
visible = False)
plt.show()
def show_degree_histograms(self):
# Messy since otherwise row/column sums can overflow...
r = np.array(self.array.asfptype().sum(1),dtype=np.int).flatten()
c = np.array(self.array.asfptype().sum(0),dtype=np.int).flatten()
plt.figure()
plt.subplot(2,1,1)
plt.title('out-degree')
plt.hist(r, bins = max(r))
plt.subplot(2,1,2)
plt.title('in-degree')
plt.hist(c, bins = max(c))
plt.show()
def network_from_networkx(g, cov_names = []):
N = g.number_of_nodes()
names = np.array(g.nodes())
network = Network(N, names)
name_to_index = {}
for i, n in enumerate(names):
name_to_index[n] = i
for s, t in g.edges():
network.array[name_to_index[s],name_to_index[t]] = True
for cov_name in cov_names:
nodes = g.nodes()
covs = [g.node[n][cov_name] for n in nodes]
network.new_node_covariate(cov_name).from_pairs(nodes, covs)
return network
def network_from_file_gexf(path, cov_names = []):
in_network = nx.read_gexf(path)
return network_from_networkx(in_network, cov_names)
def network_from_file_gml(path, cov_names = []):
in_network = nx.read_gml(path)
in_network = nx.DiGraph(in_network)
return network_from_networkx(in_network, cov_names)
def network_from_edges(edges):
# First pass over edges to determine names and number of nodes
names = set()
N = 0
for n_1, n_2 in edges:
if not n_1 in names:
names.add(n_1)
N += 1
if not n_2 in names:
names.add(n_2)
N += 1
# Process list of names and assign indices
network = Network(N, np.array(list(names)))
name_to_index = {}
for i, n in enumerate(names):
name_to_index[n] = i
# Second pass over edges to populate network
for n_1, n_2 in edges:
network[name_to_index[n_1],name_to_index[n_2]] = True
return network
# Some "tests"
if __name__ == '__main__':
net = network_from_file_gexf('data/test.gexf')
net.new_node_covariate('x_0')
net.node_covariates['x_0'].from_pairs([str(i) for i in range(10)],
[i**2 for i in range(10)])
net.new_node_covariate('x_1')
net.node_covariates['x_1'].data[:] = np.random.normal(2,1,net.N)
def f_self(n_1, n_2):
return n_1 == n_2
net.new_edge_covariate('self_edge').from_binary_function_name(f_self)
def f_first_half_dir(n_1, n_2):
return (n_1 < n_2) and (n_2 in ['0','1','2','3','4'])
net.new_edge_covariate('ec_2').from_binary_function_name(f_first_half_dir)
print net.node_covariates['x_0']
print net.node_covariates['x_1']
print net.edge_covariates['self_edge']
print net.edge_covariates['ec_2']
print net.as_dense()
print net.nodes()
net.show()
net_2 = net.subnetwork(np.array([5,0,1,6]))
print net_2.as_dense()
print net_2.node_covariates['x_0']
print net_2.node_covariates['x_1']
print net_2.edge_covariates['self_edge']
print net_2.edge_covariates['ec_2']
net_2.show()
net_3 = Network(10)
ord = np.arange(10)
np.random.shuffle(ord)
for i in range(10):
for j in range(i,10):
net_3.array[ord[i],ord[j]] = True
net_3.offset_extremes()
print net_3.offset.matrix()
print net_3.subnetwork(np.array([2,1,0])).offset.matrix()
net_4 = network_from_file_gml('data/polblogs/polblogs.gml', ['value'])
print net_4.node_covariates['value']
| mit |
mbartling/TAMU_senior_design | mavlink/pymavlink/tools/mavgraph.py | 3 | 6747 | #!/usr/bin/env python
'''
graph a MAVLink log file
Andrew Tridgell August 2011
'''
import sys, struct, time, os, datetime
import math, re
import pylab, matplotlib
from math import *
from pymavlink.mavextra import *
locator = None
formatter = None
def plotit(x, y, fields, colors=[]):
'''plot a set of graphs using date for x axis'''
global locator, formatter
pylab.ion()
fig = pylab.figure(num=1, figsize=(12,6))
ax1 = fig.gca()
ax2 = None
xrange = 0.0
for i in range(0, len(fields)):
if len(x[i]) == 0: continue
if x[i][-1] - x[i][0] > xrange:
xrange = x[i][-1] - x[i][0]
xrange *= 24 * 60 * 60
if formatter is None:
formatter = matplotlib.dates.DateFormatter('%H:%M:%S')
interval = 1
intervals = [ 1, 2, 5, 10, 15, 30, 60, 120, 240, 300, 600,
900, 1800, 3600, 7200, 5*3600, 10*3600, 24*3600 ]
for interval in intervals:
if xrange / interval < 15:
break
locator = matplotlib.dates.SecondLocator(interval=interval)
if not opts.xaxis:
ax1.xaxis.set_major_locator(locator)
ax1.xaxis.set_major_formatter(formatter)
empty = True
ax1_labels = []
ax2_labels = []
for i in range(0, len(fields)):
if len(x[i]) == 0:
print("Failed to find any values for field %s" % fields[i])
continue
if i < len(colors):
color = colors[i]
else:
color = 'red'
(tz, tzdst) = time.tzname
if axes[i] == 2:
if ax2 == None:
ax2 = ax1.twinx()
ax = ax2
if not opts.xaxis:
ax2.xaxis.set_major_locator(locator)
ax2.xaxis.set_major_formatter(formatter)
label = fields[i]
if label.endswith(":2"):
label = label[:-2]
ax2_labels.append(label)
else:
ax1_labels.append(fields[i])
ax = ax1
if opts.xaxis:
if opts.marker is not None:
marker = opts.marker
else:
marker = '+'
if opts.linestyle is not None:
linestyle = opts.linestyle
else:
linestyle = 'None'
ax.plot(x[i], y[i], color=color, label=fields[i],
linestyle=linestyle, marker=marker)
else:
if opts.marker is not None:
marker = opts.marker
else:
marker = 'None'
if opts.linestyle is not None:
linestyle = opts.linestyle
else:
linestyle = '-'
ax.plot_date(x[i], y[i], color=color, label=fields[i],
linestyle=linestyle, marker=marker, tz=None)
pylab.draw()
empty = False
if ax1_labels != []:
ax1.legend(ax1_labels,loc=opts.legend)
if ax2_labels != []:
ax2.legend(ax2_labels,loc=opts.legend2)
if empty:
print("No data to graph")
return
from optparse import OptionParser
parser = OptionParser("mavgraph.py [options] <filename> <fields>")
parser.add_option("--no-timestamps",dest="notimestamps", action='store_true', help="Log doesn't have timestamps")
parser.add_option("--planner",dest="planner", action='store_true', help="use planner file format")
parser.add_option("--condition",dest="condition", default=None, help="select packets by a condition")
parser.add_option("--labels",dest="labels", default=None, help="comma separated field labels")
parser.add_option("--legend", default='upper left', help="default legend position")
parser.add_option("--legend2", default='upper right', help="default legend2 position")
parser.add_option("--marker", default=None, help="point marker")
parser.add_option("--linestyle", default=None, help="line style")
parser.add_option("--xaxis", default=None, help="X axis expression")
(opts, args) = parser.parse_args()
from pymavlink import mavutil
if len(args) < 2:
print("Usage: mavlogdump.py [options] <LOGFILES...> <fields...>")
sys.exit(1)
filenames = []
fields = []
for f in args:
if os.path.exists(f):
filenames.append(f)
else:
fields.append(f)
msg_types = set()
multiplier = []
field_types = []
colors = [ 'red', 'green', 'blue', 'orange', 'olive', 'black', 'grey', 'yellow' ]
# work out msg types we are interested in
x = []
y = []
axes = []
first_only = []
re_caps = re.compile('[A-Z_][A-Z0-9_]+')
for f in fields:
caps = set(re.findall(re_caps, f))
msg_types = msg_types.union(caps)
field_types.append(caps)
y.append([])
x.append([])
axes.append(1)
first_only.append(False)
def add_data(t, msg, vars):
'''add some data'''
mtype = msg.get_type()
if mtype not in msg_types:
return
for i in range(0, len(fields)):
if mtype not in field_types[i]:
continue
f = fields[i]
if f.endswith(":2"):
axes[i] = 2
f = f[:-2]
if f.endswith(":1"):
first_only[i] = True
f = f[:-2]
v = mavutil.evaluate_expression(f, vars)
if v is None:
continue
if opts.xaxis is None:
xv = t
else:
xv = mavutil.evaluate_expression(opts.xaxis, vars)
if xv is None:
continue
y[i].append(v)
x[i].append(xv)
def process_file(filename):
'''process one file'''
print("Processing %s" % filename)
mlog = mavutil.mavlink_connection(filename, notimestamps=opts.notimestamps)
vars = {}
while True:
msg = mlog.recv_match(opts.condition)
if msg is None: break
tdays = (msg._timestamp - time.timezone) / (24 * 60 * 60)
tdays += 719163 # pylab wants it since 0001-01-01
add_data(tdays, msg, mlog.messages)
if len(filenames) == 0:
print("No files to process")
sys.exit(1)
if opts.labels is not None:
labels = opts.labels.split(',')
if len(labels) != len(fields)*len(filenames):
print("Number of labels (%u) must match number of fields (%u)" % (
len(labels), len(fields)*len(filenames)))
sys.exit(1)
else:
labels = None
for fi in range(0, len(filenames)):
f = filenames[fi]
process_file(f)
for i in range(0, len(x)):
if first_only[i] and fi != 0:
x[i] = []
y[i] = []
if labels:
lab = labels[fi*len(fields):(fi+1)*len(fields)]
else:
lab = fields[:]
plotit(x, y, lab, colors=colors[fi*len(fields):])
for i in range(0, len(x)):
x[i] = []
y[i] = []
pylab.show()
raw_input('press enter to exit....')
| mit |
Kortemme-Lab/kddg | kddg/api/data.py | 1 | 101829 | #!/usr/bin/python2.4
# encoding: utf-8
"""
data.py
High-level functions for importing data into the DDG database.
Example usage:
# Create an import API instance
importer = DataImportInterface.get_interface_with_config_file(echo_sql = False)
# Access the SQLAlchemy session directly
session = importer.session
# Access the MySQLdb interface layer directly
DDG_db = importer.DDG_db # or importerDDG_db_utf
# Access an RCSB PDB file to the database
importer.add_pdb_from_rcsb('1A2K')
# Access ligand details to the database (note: this will be called by add_pdb_from_rcsb)
importer.add_ligand_by_pdb_code('GTP')
# Update certain properties of RCSB files in the database
importer.update_pdbs(update_sections = set(['Residues', 'Publication']), start_at = None, restrict_to_file_source = 'RCSB')
@todo list:
- ticket 1489: add_pdb_from_rcsb: FileContent (see ticket 1489)
- get_pdb_details (see below)
- PDB Chains (_add_pdb_chains)
ticket 1463: SCOP/SCOPe classifications
add self.pfam_api.? call to get mapping from RCSB PDB files to the UniProt sequences. Add this to a separate function, _add_pdb_uniprot_mapping.
add self.pfam_api.get_pfam_accession_numbers_from_pdb_chain(database_pdb_id, c)) calls. Use this in _add_pdb_uniprot_mapping.
add self.scope_api.get_chain_details(database_pdb_id, c))) calls
ticket 1472: add coordinates
- ticket 1488, 1473: _add_pdb_uniprot_mapping. implement UniProt mapping. The old approach was flawed. Use the new approach (similar to how I used PPComplex as an abstraction layer)
- ticket 1493: add b-factors and DSSP for modified residues
- ticket 1491: electron densities
Created by Shane O'Connor 2015.
Copyright (c) 2015 Shane O'Connor. All rights reserved.
"""
import sys
import pprint
from io import BytesIO
import os
import copy
import json
import zipfile
import traceback
import gzip
import shutil
import sqlite3
import cPickle as pickle
from types import NoneType
import time
import numpy
import pandas
from MySQLdb import OperationalError as MySQLOperationalError
from sqlalchemy import Table, Column, Integer, ForeignKey
from sqlalchemy.orm import relationship, backref
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy import create_engine, and_
from sqlalchemy import inspect as sqlalchemy_inspect
from sqlalchemy.exc import TimeoutError as SQLAlchemyTimeoutError
from klab import colortext
from klab.bio.pdb import PDB
from klab.bio.basics import ChainMutation
from klab.fs.fsio import read_file, write_temp_file, open_temp_file, write_file
from klab.bio.pfam import Pfam
from klab.bio.dssp import MonomerDSSP, ComplexDSSP, MissingAtomException
from klab.bio.ligand import Ligand, PDBLigand, LigandMap
from klab.bio.pdbtm import PDBTM
from klab.bio.clustalo import PDBChainSequenceAligner
from klab.db.sqlalchemy_interface import get_single_record_from_query, get_or_create_in_transaction, row_to_dict
from klab.bio import rcsb
from klab.general.strutil import remove_trailing_line_whitespace
from klab.hash.md5 import get_hexdigest
from kddg.api.schema import test_schema_against_database_instance
from kddg.api.schema import PDBFile, PDBChain, PDBMolecule, PDBMoleculeChain, PDBResidue, LigandDescriptor, LigandIdentifier, LigandSynonym, LigandPrice, LigandReference, PDBLigand, PDBLigandFile, PDBIon, FileContent, PDB2PDBChainMap
from kddg.api.schema import Ligand as DBLigand
from kddg.api.schema import Ion as DBIon
from kddg.api.schema import User as DBUser
from kddg.api.schema import Publication, PublicationAuthor, PublicationIdentifier, DeclarativeBase
from kddg.api.layers import *
from kddg.api import dbi
from kddg.api import settings
sys_settings = settings.load()
try:
import magic
except ImportError:
colortext.error('Failed to import magic package. This failure will prevent you from being able to import new file content into the database.')
pass
#################################
# #
# Utility functions #
# #
#################################
def json_dumps(j):
'''All json.dumps calls should use this for consistency.'''
return json.dumps(j, sort_keys=True, indent = 4)
class DataImportInterface(object):
'''This is the data import API class which should be used when adding basic data (PDB files, complex definitions, etc.)
to the database.
from kddg.api.data import DataImportInterface
importer = DataImportInterface(read_file('ddgdb.pw'))
e.g.
importer.add_pdb_from_rcsb('1A2K')
Objects of this class and derived subclasses has three main members:
self.DDG_db - a database interface used to interact directly with the database via MySQL commands
self.DDG_db_utf - the same interface but with UTF support. This should be used when dealing with UTF fields e.g. publication data
self.prediction_data_path - this is the location on the file server where output form jobs of the derived class type (e.g. binding affinity jobs) should be stored.
'''
##################
# #
# Constructors #
# #
##################
def __init__(self, passwd, connect_string, connect_string_utf, username = sys_settings.database.username, hostname = sys_settings.database.hostname, rosetta_scripts_path = None, rosetta_database_path = None, cache_dir = sys_settings.cache.cache_dir, echo_sql = False, port = sys_settings.database.port, file_content_buffer_size = None):
'''
:param passwd:
:param connect_string:
:param username:
:param hostname:
:param rosetta_scripts_path:
:param rosetta_database_path:
:param cache_dir: Used to cache downloaded files e.g. PDB files from the RCSB servers. Particularly useful during testing to avoid spamming their servers with requests.
:param echo_sql: If echo_sql is set then all executed SQL commands are printed to stdout (by default) which may be useful for debugging.
:return:
'''
# Set up MySQLdb connections
passwd = passwd.strip()
self.DDG_db = dbi.ddGDatabase(passwd = passwd, username = username, hostname = hostname, port = port)
self.DDG_db_utf = dbi.ddGDatabase(passwd = passwd, username = username, hostname = hostname, use_utf = True, port = port)
self.cache_dir = cache_dir
self.echo_sql = echo_sql
test_schema_against_database_instance(self.DDG_db)
if self.cache_dir:
self.initialize_cache_directory()
else:
colortext.warning('Warning: No cache directory has been specified in your configuration file. Please look at settings.json.template for an example of how to configure this.\n Not using a cache directory may result in files being retrieved from the RCSB servers multiple times.')
# Set up SQLAlchemy connections
self.connect_string = connect_string
self.connect_string_utf = connect_string_utf
self.engine, self.session = None, None
self.engine_utf, self.session_utf = None, None
self.get_engine(utf = False)
self.get_engine(utf = True)
self.get_session(utf = False)
self.get_session(utf = True)
# File cache - circular buffer-ish with head promotion on access
self.file_content_buffer_size = file_content_buffer_size or 800 # based on some benchmarking using Tina's GSP run, 800 seems a reasonable size (and was more than enough for that run)
self.file_content_cache = {}
self.file_content_buffer = []
self.file_content_cache_hits = 0
self.file_content_cache_misses = 0
assert(isinstance(self.file_content_buffer_size, int))
self.rosetta_scripts_path = rosetta_scripts_path
self.rosetta_database_path = rosetta_database_path
# Parse PDB chain -> Pfam mapping
self.pfam_api = Pfam()
@classmethod
def get_interface_with_config_file(cls, database = sys_settings.database.database, host_config_name = sys_settings.database.host_config_name, rosetta_scripts_path = None, rosetta_database_path = None, my_cnf_path = None, cache_dir = sys_settings.cache.cache_dir, echo_sql = False, port = sys_settings.database.port):
# Uses ~/.my.cnf to get authentication information
### Example .my.cnf (host_config_name will equal myserver):
### [clientmyserver]
### user=username
### password=notmyrealpass
### host=server.domain.com
if not my_cnf_path:
my_cnf_path = os.path.expanduser(os.path.join('~', '.my.cnf'))
if not os.path.isfile(os.path.expanduser(my_cnf_path)):
raise Exception("A .my.cnf file must exist at: " + my_cnf_path)
# These four variables must be set in a section of .my.cnf named host_config_name
user = None
password = None
host = None
connection_string = None
connection_string_utf = None
connection_string_key = 'sqlalchemy.{0}.url'.format(database)
connection_string_key_utf = 'sqlalchemy.{0}.url.utf'.format(database)
file_content_buffer_size = None
with open(my_cnf_path, 'r') as f:
parsing_config_section = False
for line in f:
if line.strip() == '[client%s]' % host_config_name:
parsing_config_section = True
elif line.strip() == '':
parsing_config_section = False
elif parsing_config_section:
if '=' in line:
tokens = line.strip().split('=')
key, val = tokens[0], '='.join(tokens[1:]) # values may contain '=' signs
key, val = key.strip(), val.strip()
if key == 'user':
user = val
elif key == 'password':
password = val
elif key == 'cache_dir':
cache_dir = val
elif key == 'file_content_buffer_size':
file_content_buffer_size = int(val)
elif key == 'host':
host = val
elif key == 'port':
port = int(val)
elif key == connection_string_key:
connection_string = val
elif key == connection_string_key_utf:
connection_string_utf = val
else:
parsing_config_section = False
if not user or not password or not host or not connection_string or not connection_string_utf:
raise Exception("Couldn't find host(%s), username(%s), password, or connection string in section %s in %s" % (host, user, host_config_name, my_cnf_path) )
return cls(password, connection_string, connection_string_utf, username = user, hostname = host, rosetta_scripts_path = rosetta_scripts_path, rosetta_database_path = rosetta_database_path, cache_dir = cache_dir, echo_sql = echo_sql, port = port, file_content_buffer_size = file_content_buffer_size)
def __del__(self):
pass #self.DDG_db.close() #self.ddGDataDB.close()
#############################
# #
# SQLAlchemy Engine setup #
# #
#############################
def get_engine(self, utf = False):
if utf:
if not self.engine_utf:
self.engine_utf = create_engine(self.connect_string_utf, echo = self.echo_sql)
return self.engine_utf
else:
if not self.engine:
self.engine = create_engine(self.connect_string, echo = self.echo_sql)
return self.engine
def get_connection(self, utf = False):
# e.g. connection = importer.get_connection(); connection.execute("SELECT * FROM User")
engine = self.get_engine(utf = utf)
return engine.connect()
def get_session(self, new_session = False, autoflush = True, autocommit = False, utf = False):
engine = self.get_engine(utf = utf)
if new_session or ((not(utf) and not(self.session)) or (utf and not(self.session_utf))):
maker_ddgdatabase = sessionmaker(autoflush = autoflush, autocommit = autocommit)
s = scoped_session(maker_ddgdatabase)
DeclarativeBaseDDG = declarative_base()
metadata_ddg = DeclarativeBaseDDG.metadata
s.configure(bind=engine)
metadata_ddg.bind = engine
if new_session:
return s
else:
if utf:
self.session_utf = s
else:
self.session = s
if utf:
return self.session_utf
else:
return self.session
def renew(self, utf = False):
self.session = self.get_session(new_session = True, utf = utf)
#################################
# #
# Data removal API #
# #
#################################
def remove_ligands(self):
'''This function should not generally be called. It was added while testing the ligand addition code.
Removals are protected by sessions to prevent partial deletions and ligands will only be removed with this
function if there are no corresponding records in other tables e.g. PDBLigand.
I initially added ligands based on IDs extracted from PDBs however this list included ions like FE2 so I wanted
to scrub all existing records and only add ligands with >1 atoms to the Ligand table. Ions are now added to the
Ion table.
'''
tsession = self.get_session()
ligand_ids = [l.ID for l in tsession.query(DBLigand)]
for ligand_id in ligand_ids:
tsession = self.get_session(new_session = True) # do not allow partial deletions
try:
colortext.message('Removing ligand {0}.'.format(ligand_id))
for ltbl in [LigandDescriptor, LigandIdentifier, LigandPrice, LigandReference, LigandSynonym]:
tsession.query(ltbl).filter(ltbl.LigandID == ligand_id).delete()
tsession.query(DBLigand).filter(DBLigand.ID == ligand_id).delete()
tsession.commit()
tsession.close()
print('Success.\n')
except Exception, e:
colortext.error('Failure.')
print(str(e))
print(traceback.format_exc() + '\n')
tsession.rollback()
tsession.close()
print('')
#################################
# #
# Data update API #
# #
#################################
def update_pdbs(self, pdb_ids = [], update_sections = set(), start_at = None, restrict_to_file_source = None, pdb_ligand_params_files = {}):
'''Updates all or selected data for all or selected PDB files in the database, enumerating in alphabetical order.
If start_at is specified, the update begins at the specified PDB identifier.
pdb_ligand_params_files should be a mapping from pdb_id -> ligand_code -> params_file_path
'''
if not pdb_ids:
pdb_ids = [r for r in self.DDG_db.execute_select('SELECT ID, FileSource FROM PDBFile ORDER BY ID')]
counter = 0
num_pdb_ids = len(pdb_ids)
hit_starting_pdb = False
for r in pdb_ids:
counter += 1
pdb_id = r['ID']
if (not restrict_to_file_source) or (r['FileSource'] == restrict_to_file_source):
if (not start_at) or (r['ID'].upper() == start_at):
hit_starting_pdb = True
if hit_starting_pdb:
colortext.message('Updating data for {0} ({1}/{2}).'.format(pdb_id, counter, num_pdb_ids))
tsession = self.get_session(new_session = True)
ligand_params_file_paths = pdb_ligand_params_files.get(pdb_id, {})
self.add_pdb_data(tsession, pdb_id, update_sections = update_sections, ligand_params_file_paths = ligand_params_file_paths)
tsession.commit()
tsession.close()
if not hit_starting_pdb:
raise Exception('We never hit the starting PDB "{0}".'.format(start_at))
#################################
# #
# Ligand entry - public API #
# #
# Missing tables: #
# LigandPrice #
# LigandReference #
# #
#################################
def add_ligand_by_pdb_code(self, pdb_code):
'''This function adds a ligand to the database using the ligand's PDB code. The insertion is handled by a transaction.
The value of the ID field of the Ligand record is returned.
Touched tables:
Ligand
LigandDescriptor
LigandIdentifier
LigandSynonym
'''
colortext.message('Adding ligand {0}'.format(pdb_code))
l = Ligand.retrieve_data_from_rcsb(pdb_code, cached_dir = '/tmp')
colortext.ppurple(l)
tsession = self.get_session(new_session = True) # As this may be called by another function, we want to keep the ligand entry separate from other transactions.
try:
# Create the main ligand record
if l.InChI == None:
# Error handling for the unknown ligands
assert(l.PDBCode == 'UNL' or l.PDBCode == 'UNK' or l.PDBCode == 'UNX')
l.InChI = l.PDBCode
l.InChIKey = l.PDBCode
db_ligand = get_or_create_in_transaction(tsession, DBLigand, l.__dict__, missing_columns = ['ID'])
# Create the ligand descriptor records
descriptor_fieldnames = [c.name for c in list(sqlalchemy_inspect(LigandDescriptor).columns)]
for descriptor in l.descriptors:
descriptor = copy.deepcopy(descriptor)
descriptor['LigandID'] = db_ligand.ID
db_ligand_descriptor = get_or_create_in_transaction(tsession, LigandDescriptor, descriptor, missing_columns = ['ID'])
for identifier in l.identifiers:
identifier = copy.deepcopy(identifier)
identifier['LigandID'] = db_ligand.ID
db_ligand_identifier = get_or_create_in_transaction(tsession, LigandIdentifier, identifier, missing_columns = ['ID'])
for synonym in l.synonyms:
db_ligand_synonym = get_or_create_in_transaction(tsession, LigandSynonym, dict(LigandID = db_ligand.ID, Synonym = synonym.strip()))
db_ligand_id = db_ligand.ID
tsession.commit()
tsession.close()
print('Success.\n')
return db_ligand_id
except:
colortext.error('Failure.')
tsession.rollback()
tsession.close()
raise
###########################################################################################
## File management layer
##
## This part of the API is responsible for file content abstraction
###########################################################################################
@informational_file
def get_file_id(self, content, tsession = None, hexdigest = None):
'''Searches the database to see whether the FileContent already exists. The search uses the digest and filesize as
heuristics to speed up the search. If a file has the same hex digest and file size then we do a straight comparison
of the contents.
If the FileContent exists, the value of the ID field is returned else None is returned.
'''
tsession = tsession or self.get_session()
existing_filecontent_id = None
hexdigest = hexdigest or get_hexdigest(content)
filesize = len(content)
for r in tsession.execute('SELECT ID FROM FileContent WHERE MD5HexDigest=:hd AND Filesize=:fsz', dict(hd = hexdigest, fsz = filesize)):
if self.get_file_content_from_cache(r['ID']) == content:
assert(existing_filecontent_id == None) # content uniqueness check
existing_filecontent_id = r['ID']
return existing_filecontent_id
def _add_file_content(self, file_content, tsession = None, rm_trailing_line_whitespace = False, forced_mime_type = None):
'''Takes file file_content (and an option to remove trailing whitespace from lines e.g. to normalize PDB files), adds
a new record if necessary, and returns the associated FileContent.ID value.'''
tsession = tsession or self.get_session()
if rm_trailing_line_whitespace:
file_content = remove_trailing_line_whitespace(file_content)
# Check to see whether the file has been uploaded before
hexdigest = get_hexdigest(file_content)
existing_filecontent_id = self.get_file_id(file_content, tsession = tsession, hexdigest = hexdigest)
# Create the FileContent record if the file is a new file
if existing_filecontent_id == None:
# Determing the MIME type
mime_type = forced_mime_type
if not mime_type:
# Note: in case the wrong mime-types are being returned, try saving to file first and then calling magic.from_file.
# See commit c62883b58649bd813bf022f7d1193abb06f1676d for the code. This used to be necessary for some odd reason.
mime_type = magic.from_buffer(file_content, mime = True)
# Create the database record
# Note: We have already searched the file cache and database for uniqueness so we do NOT call get_or_create_in_transaction here.
file_content_record = FileContent(**dict(
Content = file_content,
MIMEType = mime_type,
Filesize = len(file_content),
MD5HexDigest = hexdigest
))
tsession.add(file_content_record)
tsession.flush()
existing_filecontent_id = file_content_record.ID
assert(existing_filecontent_id != None)
return existing_filecontent_id
def get_file_content_cache_stats(self):
'''Returns basic statistics on the file content cache access.'''
return dict(
size = self.file_content_buffer_size,
hits = self.file_content_cache_hits,
misses = self.file_content_cache_misses
)
def get_file_content_from_cache(self, file_content_id):
# Sanity check
assert(len(self.file_content_cache) == len(self.file_content_buffer))
assert(sorted(self.file_content_cache.keys()) == sorted(self.file_content_buffer))
if file_content_id not in self.file_content_cache:
self.file_content_cache_misses += 1
file_content = self.get_session().query(FileContent).filter(FileContent.ID == file_content_id).one()
record = row_to_dict(file_content)
# Add the file content to the API cache
self.file_content_buffer.append(file_content_id)
self.file_content_cache[file_content_id] = record['Content']
num_records_to_remove = max(len(self.file_content_buffer) - self.file_content_buffer_size, 0)
if num_records_to_remove > 0:
for stored_file_content_id in self.file_content_buffer[:num_records_to_remove]:
del self.file_content_cache[stored_file_content_id]
self.file_content_buffer = self.file_content_buffer[num_records_to_remove:]
assert(len(self.file_content_buffer) == self.file_content_buffer_size)
assert(len(self.file_content_cache) == len(self.file_content_buffer))
assert(sorted(self.file_content_cache.keys()) == sorted(self.file_content_buffer))
else:
self.file_content_cache_hits += 1
# Promote the most recently active files to the start of the buffer
self.file_content_buffer.remove(file_content_id)
self.file_content_buffer.append(file_content_id)
return self.file_content_cache[file_content_id]
#################################
# #
# PDB data retrieval API #
# #
#################################
def get_pdb_details(self, pdb_ids, cached_pdb_details = None):
'''Returns the details stored in the database about the PDB files associated with pdb_ids e.g. chains, resolution,
technique used to determine the structure etc.'''
pdbs = {}
cached_pdb_ids = []
if cached_pdb_details:
cached_pdb_ids = set(cached_pdb_details.keys())
for pdb_id in pdb_ids:
if pdb_id in cached_pdb_ids:
pdbs[pdb_id] = cached_pdb_details[pdb_id]
else:
record = self.DDG_db.execute_select('SELECT * FROM PDBFile WHERE ID=%s', parameters=(pdb_id,))[0]
p = PDB(record['Content'], parse_ligands = True)
pdb_chain_lengths = {}
for chain_id, s in p.atom_sequences.iteritems():
pdb_chain_lengths[chain_id] = len(s)
# todo: get the list of protein chains and PDB residues from the database and assert that they are the same
# as what were extracted from the PDB file.
# maybe change 'chains' below to 'protein_chains'
pdbs[pdb_id] = dict(
chains = pdb_chain_lengths,
TM = record['Transmembrane'],
Technique = record['Techniques'],
XRay = record['Techniques'].find('X-RAY') != -1,
Resolution = record['Resolution'],
)
return pdbs
def get_rcsb_record(self, pdbfile_db_record, tsession = None):
'''pdbfile_db_record should be a kddg.api.schema.py::PDBFile object.
Winds up the 'derived from' tree to find the RCSB file that this file originated from.
Throws an exception if there are no such files.
This is useful for a number of reasons:
- Looking up the resolution of the original structure, the determination technique, and its b-factors
We do not copy this information into derived structures as it is generally meaningless (e.g. for PDB_REDO
structures or structures minimized or repacked with some force-field).
- Determining the name of the molecules in the derived PDB file.
etc.
'''
if not tsession:
tsession = self.get_session()
try:
c = 0
while (pdbfile_db_record.DerivedFrom) and (pdbfile_db_record.FileSource != 'RCSB') and (c < 40): # the last expression should be unnecessary but just in case...
pdbfile_db_record = tsession.query(PDBFile).filter(PDBFile.ID == pdbfile_db_record.DerivedFrom).one()
c += 1
assert(pdbfile_db_record.FileSource == 'RCSB')
return pdbfile_db_record
except Exception, e:
raise Exception('Failed to retrieve an RCSB record corresponding to "{0}".'.format(pdbfile_db_record.ID))
####################################
# #
# Publication entry - public API #
# #
####################################
# @todo: code debt: write the add_publication function (using the RIS parsing module in klab and the PubMed/DOI downloading modules).
a = """
def add_publication(self, ...):
@todo: write the add_publication function (using the RIS parsing module in klab and the PubMed/DOI downloading modules).
publication = get_or_create_in_transaction(tsession, dbmodel.Publication, dict(
ID = 'PMID:23041932',
DGUnit = 'fitness',
DDGConvention = 'ProTherm',
DDGProThermSignNotes = 'Note: The DeltaE values are fitness values so negative values indicate worse binding. This is therefore comparable to the ProTherm DDG convention.',
DDGValuesNeedToBeChecked = False,
RIS = '''TY - JOUR
AU - McLaughlin Jr, Richard N.
AU - Poelwijk, Frank J.
AU - Raman, Arjun
AU - Gosal, Walraj S.
AU - Ranganathan, Rama
TI - The spatial architecture of protein function and adaptation
JA - Nature
PY - 2012/11/01/print
VL - 491
IS - 7422
SP - 138
EP - 142
PB - Nature Publishing Group, a division of Macmillan Publishers Limited. All Rights Reserved.
SN - 0028-0836
UR - http://dx.doi.org/10.1038/nature11500
M3 - 10.1038/nature11500
N1 - 10.1038/nature11500
L3 - http://www.nature.com/nature/journal/v491/n7422/abs/nature11500.html#supplementary-information
ER - ''',
DOI = '10.1038/nature11500',
Title = 'The spatial architecture of protein function and adaptation',
Issue = '7422',
Volume = '491',
StartPage = '138',
EndPage = '142',
PublicationDate = datetime.date(2012, 11, 1),
PublicationYear = '2012',
Publication = 'Nature',
URL = 'http://www.nature.com/nature/journal/v491/n7422/full/nature11500.html',
Notes = None,
DGNotes = None,
DGUnitUsedInProTherm = None,
))
dataset_publication_ID = publication.ID
"""
#################################
# #
# PDB file entry - public API #
# #
# Missing tables: #
# FileContent #
# #
# Protein #
# ProteinDatabaseIdentifier#
# ProteinName #
# ProteinOrganism #
# ProteinResidue #
# ProteinSegment #
# #
#################################
def initialize_cache_directory(self):
pdb_dir = os.path.join(self.cache_dir, 'pdbs')
try:
os.makedirs(pdb_dir)
except: pass
if not os.path.exists(pdb_dir):
raise colortext.Exception('The cache directory "{0}" could not be created.'.format(self.cache_dir))
def _retrieve_pdb_contents(self, pdb_id, fresh = False):
if (not fresh) and (self.cache_dir):
cached_filepath = os.path.join(self.cache_dir, 'pdbs', '{0}.pdb'.format(pdb_id))
if os.path.exists(cached_filepath):
print('Retrieving locally cached file for {0}.'.format(pdb_id))
return read_file(cached_filepath)
elif os.path.exists(cached_filepath + '.gz'):
print('Retrieving locally cached file for {0}.'.format(pdb_id))
return read_file(cached_filepath + '.gz')
print('Retrieving {0} from RCSB.'.format(pdb_id))
contents = rcsb.retrieve_pdb(pdb_id)
if self.cache_dir:
write_file(os.path.join(self.cache_dir, 'pdbs', '{0}.pdb'.format(pdb_id)), contents)
return contents
def add_pdb_from_rcsb(self, pdb_id, previously_added = set(), update_sections = set(), trust_database_content = False, ligand_params_file_paths = {}, debug = False):
'''NOTE: This API is used to create and analysis predictions or retrieve information from the database.
This function adds new raw data to the database and does not seem to belong here. It should be moved into
an admin API instead.
This function adds imports a PDB into the database, creating the associated molecule, chain and residue etc. records.
If previously_added contains pdb_id then we return. Otherwise, we step through the full PDB file import. This
is added purely for optimistic efficiency e.g. if were to add 100 designed files based off the same RCSB PDB,
we would not want to run the full import code for the RCSB PDB file more than once.
If trust_database_content is True then we return if we find a PDBFile record without delving into the related tables.
This is useful in certain circumstances e.g. when adding derived PDB files using add_designed_pdb.
Touched tables:
PDBFile
todo: FileContent (see ticket 1489)
'''
assert(not ligand_params_file_paths) # todo: handle this case when we need to. See the implementation for add_designed_pdb
if pdb_id in previously_added:
return pdb_id
assert(len(pdb_id) == 4)
pdb_id = pdb_id.upper()
# RCSB files should be a straightforward case so we will use a new session and commit all changes
tsession = self.get_session(new_session = True)
try:
pdb_object = None
db_record_object = get_single_record_from_query(tsession.query(PDBFile).filter(PDBFile.ID == pdb_id))
is_new_record = db_record_object == None
if not is_new_record:
print('Retrieving {0} from database.'.format(pdb_id))
pdb_object = PDB(db_record_object.Content, parse_ligands = True)
assert(db_record_object.FileSource == 'RCSB')
if trust_database_content:
print('Trusting the existing data and early-outing.')
return pdb_id
else:
db_record_object = PDBFile()
contents = self._retrieve_pdb_contents(pdb_id)
pdb_object = PDB(contents, parse_ligands = True)
db_record_object.ID = pdb_id
db_record_object.FileSource = 'RCSB'
db_record_object.Content = contents
update_sections = set() # add all related data
# Fill in the FASTA, Resolution, Techniques, Transmembrane, and b-factor fields of a PDBFile record
self._add_pdb_file_information(db_record_object, pdb_object, pdb_id, is_new_record, is_rcsb_pdb = True)
# Add a new PDBFile record
if is_new_record:
db_record_object.UserID = None
db_record_object.Notes = None
db_record_object.DerivedFrom = None
tsession.add(db_record_object)
tsession.flush()
# Publication
if is_new_record or (not db_record_object.Publication):
self._add_pdb_publication(tsession, db_record_object.ID, pdb_object = pdb_object)
# add all other data
self.add_pdb_data(tsession, pdb_id, update_sections = update_sections, ligand_params_file_paths = ligand_params_file_paths)
previously_added.add(pdb_id)
print('Success.\n')
if debug:
print('Debug call - rolling back the transaction.\n')
tsession.rollback()
else:
tsession.commit()
tsession.close()
except:
colortext.error('Failure.')
tsession.rollback()
tsession.close()
raise
def _add_pdb_file_information(self, db_record_object, pdb_object, pdb_id, is_new_record, is_rcsb_pdb = False):
'''Fills in the FASTA, Resolution, Techniques, Transmembrane, and b-factor fields of a PDBFile record.'''
# Checks
pdb_atom_chains = pdb_object.atom_chain_order
if not pdb_atom_chains:
raise Exception('No ATOM chains were found in the PDB file.')
foundRes = pdb_object.CheckForPresenceOf(["CSE", "MSE"])
if foundRes:
colortext.error("The PDB %s contains residues which could affect computation (%s)." % (pdb_id, ', '.join(foundRes, )))
if "CSE" in foundRes:
colortext.error("The PDB %s contains CSE. Check." % pdb_id)
if "MSE" in foundRes:
colortext.error("The PDB %s contains MSE. Check." % pdb_id)
# FASTA
if is_new_record or (not db_record_object.FASTA):
db_record_object.FASTA = pdb_object.create_fasta()
# Resolution
if is_new_record or (not db_record_object.Resolution):
resolution = pdb_object.get_resolution()
if not resolution:
colortext.error("Could not determine resolution for {0}.".format(pdb_id))
if resolution == "N/A":
resolution = None
db_record_object.Resolution = resolution
# Techniques
if is_rcsb_pdb:
if is_new_record or (not db_record_object.Techniques):
db_record_object.Techniques = pdb_object.get_techniques()
# Transmembrane
if is_rcsb_pdb:
if is_new_record or (db_record_object.Transmembrane == None):
pdbtmo = PDBTM(read_file(sys_settings.PDBTM.xml)) # this must be set up as a parameter in settings.json
pdb_id_map = pdbtmo.get_pdb_id_map()
uc_pdb_id_map = {}
for k, v in pdb_id_map.iteritems():
uc_pdb_id_map[k.upper()] = v
if pdb_id in uc_pdb_id_map:
colortext.warning('{0} is a transmembrane protein.'.format(pdb_id))
db_record_object.Transmembrane = pdb_id in pdb_id_map
# B-factors
overall_bfactors = pdb_object.get_B_factors().get('Overall')
db_record_object.BFactorMean = overall_bfactors['mean']
db_record_object.BFactorDeviation = overall_bfactors['stddev']
def is_session_utf(self, tsession):
return str(tsession.bind.url).lower().find('utf') != -1
def get_pdb_object(self, database_pdb_id, tsession = None):
'''Create a PDB object from content in the database.'''
if tsession:
assert(not(self.is_session_utf(tsession)))
tsession = tsession or self.get_session()
assert(not(self.is_session_utf(tsession)))
db_record = get_single_record_from_query(tsession.query(PDBFile).filter(PDBFile.ID == database_pdb_id))
assert(db_record)
return PDB(db_record.Content, parse_ligands = True)
def add_pdb_data(self, tsession, database_pdb_id, update_sections = set(), ligand_mapping = {}, chain_mapping = {}, ligand_params_file_paths = {}):
'''
The point of separating the data entry into these sub-functions and calling them from this function is so we
have an API to update the information for specific PDB files.
database_pdb_id is the RCSB ID for RCSB files and a custom ID for other (designed) structures.
If transaction_session is None (e.g. if this was called directly outside of a transaction), create a transaction
session for the remaining inner calls. If update_sections is non-empty, just call those specific inner functions.
Note: If the caller creates a transaction then it is responsible for committing/rolling back the transaction.
ligand_mapping = {}, chain_mapping = {}
:param tsession:
:param database_pdb_id:
:param update_sections:
:param ligand_mapping: A mapping from ligand IDs (e.g. "FPP") to RCSB IDs. This is only necessary for non-RCSB files as these files may have modified IDs.
:param chain_mapping: A mapping from chain IDs (e.g. "A") to the chain in the original RCSB file. This is only necessary for non-RCSB files as these files may have modified chain IDs (e.g. changing the ligand chain to "X").
:return:
'''
if not tsession:
tsession = self.get_session(new_session = True)
# Retrieve the PDB object
pdb_object = self.get_pdb_object(database_pdb_id, tsession = tsession)
if not(update_sections) or ('Chains' in update_sections):
colortext.warning('*** Chains ***')
self._add_pdb_chains(tsession, database_pdb_id, pdb_object, chain_mapping = chain_mapping)
if not(update_sections) or ('Molecules' in update_sections):
colortext.warning('*** Molecules ***')
self._add_pdb_molecules(tsession, database_pdb_id, pdb_object, chain_mapping = chain_mapping)
if not(update_sections) or ('Residues' in update_sections):
colortext.warning('*** Residues ***')
self._add_pdb_residues(tsession, database_pdb_id, pdb_object)
if not(update_sections) or ('Ligands' in update_sections):
colortext.warning('*** Ligands ***')
self._add_pdb_rcsb_ligands(tsession, database_pdb_id, pdb_object, ligand_mapping, ligand_params_file_paths = ligand_params_file_paths)
if not(update_sections) or ('Ions' in update_sections):
colortext.warning('*** Ions ***')
self._add_pdb_rcsb_ions(tsession, database_pdb_id, pdb_object)
#if not(update_sections) or ('UniProt' in update_sections):
# colortext.warning('*** UniProt ***')
# self._add_pdb_uniprot_mapping(tsession, database_pdb_id, pdb_object)
def _add_pdb_chains(self, tsession, database_pdb_id, pdb_object = None, chain_mapping = {}):
'''
Touched tables:
PDBChain
'''
pdb_object = pdb_object or self.get_pdb_object(database_pdb_id, tsession = tsession)
db_chains = {}
for r in tsession.query(PDBChain).filter(PDBChain.PDBFileID == database_pdb_id).order_by(PDBChain.Chain):
db_chains[r.Chain] = r
db_chains_ids = sorted(db_chains.keys())
chain_ids = sorted(set(pdb_object.seqres_sequences.keys() + pdb_object.atom_sequences.keys() + pdb_object.chain_types.keys()))
# Sanity checks for derived structures
self._check_derived_record_against_rcsb_record(tsession, database_pdb_id, pdb_object, chain_mapping)
if chain_ids != db_chains_ids:
#colortext.warning('PDB chains: {0}\t DB chains: {1}'.format(','.join(chain_ids), ','.join(db_chains_ids)))
#colortext.error('Missing chains.')
new_chain_ids = sorted(set(chain_ids).difference(db_chains_ids))
for c in new_chain_ids:
db_chain = get_or_create_in_transaction(tsession, PDBChain, dict(
PDBFileID = database_pdb_id,
Chain = c,
MoleculeType = pdb_object.chain_types[c]
), missing_columns = ['WildtypeProteinID', 'FullProteinID', 'SegmentProteinID', 'WildtypeAlignedProteinID', 'AcquiredProteinID', 'Coordinates'])
db_chains = {}
for r in tsession.query(PDBChain).filter(PDBChain.PDBFileID == database_pdb_id):
db_chains[r.Chain] = r
db_chains_ids = sorted(db_chains.keys())
assert(chain_ids == db_chains_ids)
for chain_id in pdb_object.chain_types.keys():
if pdb_object.chain_types[chain_id] != db_chains[chain_id].MoleculeType:
db_chain = tsession.query(PDBChain).filter(and_(PDBChain.PDBFileID == database_pdb_id, PDBChain.Chain == chain_id)).one() # we expect exactly one record
db_chain.MoleculeType = pdb_object.chain_types[chain_id]
tsession.flush()
return
# todo: Extract and store the coordinates
# Extract and store the coordinates
for chain_id in pdb_object.atom_sequences.keys():
chain_dataframe = pdb_object.extract_xyz_matrix_from_chain(chain_id)
if isinstance(chain_dataframe, NoneType):
raise Exception('The coordinates dataframe could not be created for {0}, chain {1}'.format(pdb_id, chain_id))
ufname, cfname = None, None
if isinstance(ppi_api.get_pdb_chain_coordinates(pdb_id, chain_id), NoneType):
try:
f, ufname = open_temp_file('/tmp', suffix = '.hdf5')
f.close()
f, cfname = open_temp_file('/tmp', suffix = '.hdf5.gz')
f.close()
store = pandas.HDFStore(ufname)
store['dataframe'] = chain_dataframe
store.close()
content = read_file(ufname, binary = True)
with gzip.open(cfname, 'wb') as f:
f.write(content)
f = open(cfname)
zipped_contents = f.read()
f.close()
ppi_api.DDG_db.execute('UPDATE PDBChain SET Coordinates=%s WHERE PDBFileID=%s AND Chain=%s', parameters=(zipped_contents, pdb_id, chain_id))
os.remove(ufname)
os.remove(cfname)
except Exception, e:
print('Failed to add coordinates for {0}, chain {1}'.format(pdb_id, chain_id))
if ufname: os.remove(ufname)
if cfname: os.remove(cfname)
print(str(e))
print(traceback.format_exc())
else:
print(pdb_id + chain_id + ' has coordinates')
def _add_pdb_molecules(self, tsession, database_pdb_id, pdb_object = None, allow_missing_molecules = False, chain_mapping = {}):
'''
Add PDBMolecule and PDBMoleculeChain records
Touched tables:
PDBMolecule
PDBMoleculeChain
'''
assert(allow_missing_molecules == False) # todo: do we ever use allow_missing_molecules? We can inspect that case when it presents itself
pdb_object = pdb_object or self.get_pdb_object(database_pdb_id, tsession = tsession)
# Sanity checks for derived structures
self._check_derived_record_against_rcsb_record(tsession, database_pdb_id, pdb_object, chain_mapping)
if not(chain_mapping):
try:
molecules = pdb_object.get_molecules_and_source()
except MissingRecordsException:
molecules = []
for molecule in molecules:
chains = molecule['Chains']
molecule['PDBFileID'] = database_pdb_id
molecule['Organism'] = molecule['OrganismScientificName'] or molecule['OrganismCommonName']
md = {}
for k in ['PDBFileID', 'MoleculeID', 'Name', 'Organism', 'Fragment', 'Synonym', 'Engineered', 'EC', 'Mutation', 'OtherDetails']:
md[k] = molecule[k]
# Add the PDBMolecule record
db_molecule = get_or_create_in_transaction(tsession, PDBMolecule, md)
# Add the PDBMoleculeChain records
for c in chains:
try:
db_molecule_chain = get_or_create_in_transaction(tsession, PDBMoleculeChain, dict(
PDBFileID = database_pdb_id,
MoleculeID = md['MoleculeID'],
Chain = c
))
except:
if allow_missing_molecules: pass
else: raise
else:
# Copy the molecule information from the original RCSB structure
# First, get the DB records for the derived structure and the original RCSB structure
db_record = tsession.query(PDBFile).filter(PDBFile.ID == database_pdb_id).one()
assert(db_record.FileSource != 'RCSB')
rcsb_record = self.get_rcsb_record(db_record, tsession = tsession)
# Get the list of RCSB chains
rcsb_chains = chain_mapping.values()
# Get the list of PDB molecules associated with chains that are in the derived PDB (accounting for chain renaming)
rcsb_molecule_chains = {} # a dict mapping RCSB chain IDs to the associated PDBMoleculeChain record
rcsb_molecule_ids = set()
for r in tsession.query(PDBMoleculeChain).filter(PDBMoleculeChain.PDBFileID == rcsb_record.ID):
if r.Chain in rcsb_chains:
rcsb_molecule_chains[r.Chain] = r
rcsb_molecule_ids.add(r.MoleculeID)
# Add the PDBMolecule records
for r in tsession.query(PDBMolecule).filter(PDBMolecule.PDBFileID == rcsb_record.ID):
if r.MoleculeID in rcsb_molecule_ids:
db_molecule = get_or_create_in_transaction(tsession, PDBMolecule, dict(
PDBFileID = database_pdb_id,
MoleculeID = r.MoleculeID,
Name = r.Name,
Organism = r.Organism,
Fragment = r.Fragment,
Synonym = r.Synonym,
Engineered = r.Engineered,
EC = r.EC,
Mutation = r.Mutation,
OtherDetails = r.OtherDetails,
))
# Add the PDBMoleculeChain records
for derived_chain_id, rcsb_chain_id in sorted(chain_mapping.iteritems()):
associated_molecule_id = rcsb_molecule_chains[rcsb_chain_id].MoleculeID
try:
db_molecule_chain = get_or_create_in_transaction(tsession, PDBMoleculeChain, dict(
PDBFileID = database_pdb_id,
MoleculeID = associated_molecule_id,
Chain = derived_chain_id
))
except:
if allow_missing_molecules: pass
else: raise
def _add_pdb_residues(self, tsession, database_pdb_id, pdb_object = None):
'''
The code here is the same for both RCSB and non-RCSB structures.
Touched tables:
PDBResidue
'''
pdb_object = pdb_object or self.get_pdb_object(database_pdb_id, tsession = tsession)
residue_bfactors = pdb_object.get_B_factors().get('PerResidue')
# Run DSSP over the entire structure
dssp_complex_d, dssp_monomer_d = None, None
try:
# This fails for some PDB e.g. if they only have CA atoms
dssp_complex_d = ComplexDSSP(pdb_object, read_only = True)
except MissingAtomException, e:
print('DSSP (complex) failed for this case: {0}.'.format(database_pdb_id))
for db_chain in tsession.query(PDBChain).filter(PDBChain.PDBFileID == database_pdb_id):
print(db_chain.Chain, db_chain.MoleculeType)
assert(db_chain.MoleculeType != 'Protein') # we should always pass on protein chains
# Run DSSP over the individual chains
try:
# This fails for some PDB e.g. if they only have CA atoms
dssp_monomer_d = MonomerDSSP(pdb_object, read_only = True)
except MissingAtomException, e:
print('DSSP (monomer) failed for this case: {0}.'.format(database_pdb_id))
for db_chain in tsession.query(PDBChain).filter(PDBChain.PDBFileID == database_pdb_id):
print(db_chain.Chain, db_chain.MoleculeType)
assert(db_chain.MoleculeType != 'Protein') # we should always pass on protein chains
# Generate a list of residues with coordinates in the PDB file
parsed_residues = set()
for c, seq in pdb_object.atom_sequences.iteritems():
for s in seq:
res_id, r = s
parsed_residues.add(c + r.ResidueID)
# Sanity checks: make sure that the residue records exist for all results of DSSP
monomeric_records, complex_records = {}, {}
if dssp_monomer_d:
for chain_id, mapping in dssp_monomer_d:
if pdb_object.chain_types[chain_id] == 'Protein' or pdb_object.chain_types[chain_id] == 'Protein skeleton':
for residue_id, residue_details in sorted(mapping.iteritems()):
if residue_details['3LC'] != 'UNK': # todo: should we handle these residues?
chain_residue_id = chain_id + residue_id
assert(chain_residue_id in parsed_residues)
monomeric_records[chain_residue_id] = residue_details
if dssp_complex_d:
for chain_id, mapping in dssp_complex_d:
if pdb_object.chain_types[chain_id] == 'Protein' or pdb_object.chain_types[chain_id] == 'Protein skeleton':
for residue_id, residue_details in sorted(mapping.iteritems()):
if residue_details['3LC'] != 'UNK': # todo: should we handle these residues?
chain_residue_id = chain_id + residue_id
assert(chain_residue_id in parsed_residues)
complex_records[chain_residue_id] = residue_details
# Read existing data from the database
existing_residues = {}
for r in tsession.query(PDBResidue).filter(PDBResidue.PDBFileID == database_pdb_id):
chain_residue_id = r.Chain + r.ResidueID
assert(chain_residue_id not in existing_residues)
existing_residues[chain_residue_id] = r
# Add PDBResidue records
# dssp_monomer_d and dssp_complex_d are maps: chain -> residue_id -> DSSP record
for c, seq in sorted(pdb_object.atom_sequences.iteritems()):
count = 1
for s in seq:
res_id, r = s
assert(len(r.ResidueID) == 5)
assert(c == r.Chain)
chain_residue_id = c + r.ResidueID
dssp_res_complex_ss, dssp_res_complex_exposure, dssp_res_monomer_ss, dssp_res_monomer_exposure = ' ', None, ' ', None
monomeric_record = monomeric_records.get(chain_residue_id)
if monomeric_record:
dssp_res_monomer_ss = monomeric_record['ss']
dssp_res_monomer_exposure = monomeric_record['exposure']
complex_record = complex_records.get(chain_residue_id)
if complex_record:
dssp_res_complex_ss = complex_record['ss']
dssp_res_complex_exposure = complex_record['exposure']
average_bfactors = residue_bfactors.get(chain_residue_id, {})
existing_residue_record = existing_residues.get(chain_residue_id)
if not existing_residue_record:
db_residue = get_or_create_in_transaction(tsession, PDBResidue, dict(
PDBFileID = database_pdb_id,
Chain = c,
ResidueID = r.ResidueID,
ResidueAA = r.ResidueAA,
ResidueType = r.residue_type,
IndexWithinChain = count,
CoordinatesExist = True,
RecognizedByRosetta = None,
BFactorMean = average_bfactors.get('mean'),
BFactorDeviation = average_bfactors.get('stddev'),
MonomericExposure = dssp_res_monomer_exposure,
MonomericDSSP = dssp_res_monomer_ss,
ComplexExposure = dssp_res_complex_exposure,
ComplexDSSP = dssp_res_complex_ss,
), missing_columns = ['ID'])
else:
# Sanity check: make sure that the current data matches the database
#print('EXISTING RESIDUE')
#pprint.pprint(existing_residue_record.__dict__)
if existing_residue_record.BFactorMean != None:
assert(abs(float(existing_residue_record.BFactorMean) - average_bfactors.get('mean')) < 0.001)
if existing_residue_record.BFactorDeviation != None:
assert(abs(float(existing_residue_record.BFactorDeviation) - average_bfactors.get('stddev')) < 0.001)
if existing_residue_record.MonomericExposure != None:
assert(abs(float(existing_residue_record.MonomericExposure) - dssp_res_monomer_exposure) < 0.001)
if existing_residue_record.MonomericDSSP != None:
assert(r.ResidueAA == 'X' or (existing_residue_record.MonomericDSSP == dssp_res_monomer_ss))
if existing_residue_record.ComplexExposure != None:
assert(abs(float(existing_residue_record.ComplexExposure) - dssp_res_complex_exposure) < 0.001)
if existing_residue_record.ComplexDSSP != None:
assert(r.ResidueAA == 'X' or (existing_residue_record.ComplexDSSP == dssp_res_complex_ss))
# Update data (add new data if is was previously missing)
existing_residue_record.BFactorMean = average_bfactors.get('mean')
existing_residue_record.BFactorDeviation = average_bfactors.get('stddev')
existing_residue_record.MonomericExposure = dssp_res_monomer_exposure
existing_residue_record.MonomericDSSP = dssp_res_monomer_ss
existing_residue_record.ComplexExposure = dssp_res_complex_exposure
existing_residue_record.ComplexDSSP = dssp_res_complex_ss
tsession.flush()
#self.ddGdb.insertDictIfNew('PDBResidue', db_res, ['PDBFileID', 'Chain', 'ResidueID'])
count += 1
#print(count)
def _add_pdb_rcsb_ligands(self, tsession, database_pdb_id, pdb_object = None, ligand_mapping = {}, ligand_params_file_paths = {}):
'''This function associates the ligands of a PDB file (which may be arbitrarily named) with ligands entered in
the database using the ligand's PDB code. The insertion is handled by a transaction which should be set up
by the caller.
Touched tables:
PDBLigand
Other Ligand tables by proxy (via add_ligand_by_pdb_code)
'''
pdb_object = pdb_object or self.get_pdb_object(database_pdb_id, tsession = tsession)
db_record = get_single_record_from_query(tsession.query(PDBFile).filter(PDBFile.ID == database_pdb_id))
db_ligand_ids = {}
if db_record.FileSource == 'RCSB':
# This structure came straight from the RCSB. We trust the ligand codes to be correct
assert(not(ligand_mapping))
# Add the ligand description using data from the RCSB if they do not exist.
for ligand_code in pdb_object.get_ligand_codes():
db_ligand_ids[ligand_code] = self.add_ligand_by_pdb_code(ligand_code)
else:
# This structure is not from the RCSB and may use non-standard ligand codes.
# We therefore require a mapping from all ligand codes in the PDB to RCSB ligand codes.
ligand_codes = pdb_object.get_ligand_codes()
if ligand_codes:
assert(ligand_mapping)
# Check all codes have a mapping and that the codomain values already exist in the database (the underlying
# RCSB file and its ligands should already have been added)
for ligand_code in ligand_codes:
assert(ligand_mapping.map_code(ligand_code))
db_ligand_record = tsession.query(DBLigand).filter(DBLigand.PDBCode == ligand_mapping.map_code(ligand_code)).one()
db_ligand_ids[ligand_code] = db_ligand_record.ID
# Check whether any codes exist in the mapping which have corresponding Ion records in the database.
# Since non-RCSB files may be missing headers and we cannot assume an heterogen is an ion purely due to
# the number of ATOM records totaling one record (e.g. missing coordinates), we need to make this check.
# Note: this check assumes that the sets of ligand and ion PDB codes are mutually exclusive.
for ligand_code in ligand_codes:
existing_db_record = tsession.query(DBIon).filter(DBIon.PDBCode == ligand_code)
if existing_db_record.count() > 0:
raise Exception('Handle this case and add a PDBIon record below instead.')
# Record all instances of ligands in the PDB file (add PDBLigand records).
# PDBLigandCode is the code used by the PDB file regardless of whether the structure came from the RCSB i.e. it
# may not be the standard code. The standard code can be found by looking up the associated Ligand record.
for chain_id, chain_ligands in sorted(pdb_object.ligands.iteritems()):
for het_seq_id, lig in sorted(chain_ligands.iteritems()):
try:
assert(lig.PDBCode in db_ligand_ids)
pdb_ligand = get_or_create_in_transaction(tsession, PDBLigand, dict(
PDBFileID = database_pdb_id,
Chain = chain_id,
SeqID = het_seq_id,
PDBLigandCode = lig.PDBCode,
LigandID = db_ligand_ids[lig.PDBCode],
ParamsFileContentID = None,
))
except Exception, e:
colortext.error(str(e))
colortext.error(traceback.format_exc())
raise Exception('An exception occurred committing ligand "{0}" from {1} to the database.'.format(lig.PDBCode, database_pdb_id))
# Params files
if ligand_params_file_paths:
if not(0 < max(map(len, ligand_params_file_paths.keys())) <= 3):
bad_keys = sorted([k for k in ligand_params_file_paths.keys() if len(k) > 3])
raise colortext.Exception('The ligand codes "{0}" are invalid - all codes must be between 1 and 3 characters e.g. "CIT".'.format('", "'.join(bad_keys)))
bad_keys = sorted(set(ligand_params_file_paths.keys()).difference(pdb_object.get_ligand_codes()))
if bad_keys:
raise colortext.Exception('The ligand codes "{0}" were specified but were not found in the PDB file.'.format('", "'.join(bad_keys)))
ligand_params_file_content = {}
if ligand_params_file_paths:
# Read all params files
for ligand_code, params_filepath in ligand_params_file_paths.iteritems():
ligand_params_file_content[ligand_code] = read_file(params_filepath)
for ligand_code, params_file_content in ligand_params_file_content.iteritems():
# First, add a new file using FileContent.
file_content_id = self._add_file_content(params_file_content, tsession = tsession, rm_trailing_line_whitespace = True, forced_mime_type = 'text/plain')
# Next, associate this file with the PDBLigand record.
pdb_ligand_file = get_or_create_in_transaction(tsession, PDBLigandFile, dict(
PDBFileID = database_pdb_id,
PDBLigandCode = lig.PDBCode,
ParamsFileContentID = file_content_id,
))
def _add_pdb_rcsb_ions(self, tsession, database_pdb_id, pdb_object = None):
'''This function associates the ions of a PDB file with ions entered in the database using PDB codes from RCSB
PDB files.
For simplicity, we make the assumption that ion codes in all PDB files are not modified from the original PDB file.
We support this assumption with a couple of checks:
- we check that the elemental code is the same for the ion's atom and for the database record;
- we only allow the addition of Ion records from RCSB PDB files. Since we require that the RCSB PDB file be
added prior to adding derived/designed structures, any ions should have a corresponding record in the database
unless: i) the ion was manually or otherwise added to the structure; or ii) the ion code was indeed changed.
The insertion is handled by a transaction which should be set up
by the caller.
Touched tables:
PDBIon
Ion
'''
pdb_object = pdb_object or self.get_pdb_object(database_pdb_id, tsession = tsession)
db_record = get_single_record_from_query(tsession.query(PDBFile).filter(PDBFile.ID == database_pdb_id))
# Create a set of Ion records from the PDB file. ions maps PDB codes to dicts containing Ion table fields
ions = {}
for c, cions in pdb_object.ions.iteritems():
for seq_id, pdb_ion_object in cions.iteritems():
if not ions.get(pdb_ion_object.PDBCode):
ions[pdb_ion_object.PDBCode] = copy.deepcopy(pdb_ion_object.get_db_records(database_pdb_id)['Ion'])
else:
# Make sure that all ions in the PDB file have the same formula, description, etc.
subsequent_instance = copy.deepcopy(pdb_ion_object.get_db_records(database_pdb_id)['Ion'])
for k, v in ions[pdb_ion_object.PDBCode].iteritems():
assert(v == subsequent_instance[k])
# Make sure that the ions in the PDB file have the same formula, description, etc. as currently in the database
existing_ion_codes = set()
for pdb_code, d in ions.iteritems():
existing_db_record = tsession.query(DBIon).filter(DBIon.PDBCode == pdb_code)
if existing_db_record.count() > 0:
assert(existing_db_record.count() == 1)
existing_db_record = existing_db_record.one()
if ions[pdb_ion_object.PDBCode]['PDBCode'] == existing_db_record.PDBCode:
if db_record.FileSource == 'RCSB':
assert(ions[pdb_ion_object.PDBCode]['Description'] == existing_db_record.Description) # This can differ e.g. CL in 127L is 3(CL 1-) since there are 3 ions but in PDB files with 2 ions, this can be 2(CL 1-). We can assert this if we do extra parsing.
assert(ions[pdb_ion_object.PDBCode]['Formula'] == existing_db_record.Formula) # This can differ e.g. CL in 127L is 3(CL 1-) since there are 3 ions but in PDB files with 2 ions, this can be 2(CL 1-). We can assert this if we do extra parsing.
else:
if ions[pdb_ion_object.PDBCode]['Description'] != existing_db_record.Description:
colortext.warning('The description for {0} ("{1}") does not match the database record. This may occur if the PDB headers are missing. However, it also may indicate that the code "{0}" for the ion was manually altered which is not handled by our pipeline and could cause errors.'.format(pdb_code, ions[pdb_ion_object.PDBCode]['Description']))
if ions[pdb_ion_object.PDBCode]['Formula'] != existing_db_record.Formula:
colortext.warning('The formula for {0} ("{1}") does not match the database record. This may occur if the PDB headers are missing. However, it also may indicate that the code "{0}" for the ion was manually altered which is not handled by our pipeline and could cause errors.'.format(pdb_code, ions[pdb_ion_object.PDBCode]['Formula']))
existing_ion_codes.add(pdb_code)
# Create the main Ion records, only creating records for ions in RCSB files.
if db_record.FileSource == 'RCSB':
for pdb_code, ion_record in ions.iteritems():
if pdb_code not in existing_ion_codes:
# Do not add existing records
colortext.message('Adding ion {0}'.format(pdb_code))
db_ion = get_or_create_in_transaction(tsession, DBIon, ion_record, missing_columns = ['ID'])
# Get the mapping from PDB code to Ion objects
db_ions = {}
for pdb_code in ions.keys():
existing_db_record = tsession.query(DBIon).filter(DBIon.PDBCode == pdb_code)
assert(existing_db_record.count() == 1)
db_ions[pdb_code] = existing_db_record.one()
# Record all instances of ions in the PDB file (add PDBIon records).
for c, cions in pdb_object.ions.iteritems():
for seq_id, pdb_ion_object in cions.iteritems():
assert(pdb_ion_object.get_db_records(None)['Ion']['PDBCode'] == db_ions[pdb_ion_object.PDBCode].PDBCode)
if db_record.FileSource == 'RCSB':
#assert(pdb_ion_object.get_db_records(None)['Ion']['Formula'] == db_ions[pdb_ion_object.PDBCode].Formula) # not always true e.g. see CL comment above. We can assert this if we do extra parsing.
assert(pdb_ion_object.get_db_records(None)['Ion']['Description'] == db_ions[pdb_ion_object.PDBCode].Description)
pdb_ion_record = pdb_ion_object.get_db_records(database_pdb_id, ion_id = db_ions[pdb_ion_object.PDBCode].ID)['PDBIon']
try:
db_ion = get_or_create_in_transaction(tsession, PDBIon, pdb_ion_record)
except Exception, e:
colortext.error(str(e))
colortext.error(traceback.format_exc())
raise Exception('An exception occurred committing ion "{0}" from {1} to the database.'.format(pdb_ion_object.get_db_records(None)['Ion']['PDBCode'], database_pdb_id))
def _add_pdb_uniprot_mapping(self, tsession, database_pdb_id, pdb_object = None):
'''UniProtACs have forms like 'P62937' whereas UniProtIDs have forms like 'PPIA_HUMAN.'''
pdb_object = pdb_object or self.get_pdb_object(database_pdb_id, tsession = tsession)
return
#protein = None
#UniProtAC = None
#UniProtID = None
# todo: add UniProt mapping here. The old approach was flawed.
#ref_pdb_id = self.dict['DerivedFrom'] or self.dict['ID']
#if ref_pdb_id not in self.NoUniProtIDs:
# read_UniProt_map(self.ddGdb)
# if not PDBToUniProt.get(ref_pdb_id):
# if not (self.UniProtAC and self.UniProtID):
# ACtoID_mapping, PDBtoAC_mapping = None, None
# try:
# getUniProtMapping(ref_pdb_id, storeInDatabase = False, ddGdb = self.ddGdb)
# if not (PDBtoAC_mapping and ACtoID_mapping):
# raise Exception("Could not find a UniProt mapping for %s in %s." % (ref_pdb_id, uniprotmapping))
# except:
# colortext.error("Could not find a UniProt mapping for %s in %s." % (ref_pdb_id, uniprotmapping))
# self.ACtoID_mapping = ACtoID_mapping
# self.PDBtoAC_mapping = PDBtoAC_mapping
#
# Add UniProt mapping
#if self.UniProtAC and self.UniProtID:
# results = self.ddGdb.locked_execute("SELECT * FROM UniProtKBMapping WHERE UniProtKB_AC=%s", parameters=(self.UniProtAC,))
# if results:
# if results[0]['PDBFileID'] != d['ID']:
# raise Exception("Existing UniProt mapping (%s->%s) does not agree with the passed-in parameters (%s->%s)." % (results[0]['UniProtKB_AC'],results[0]['PDBFileID'],self.UniProtAC,d['ID']))
# else:
# UniProtPDBMapping = dict(
# UniProtKB_AC = self.UniProtAC,
# PDBFileID = d[FieldNames_.PDB_ID],
# )
# Store the UniProt mapping in the database
#ref_pdb_id = self.dict['DerivedFrom'] or self.dict['ID']
#if ref_pdb_id not in self.NoUniProtIDs:
# if not (self.ACtoID_mapping and self.PDBtoAC_mapping):
# try:
# self.ACtoID_mapping, self.PDBtoAC_mapping = getUniProtMapping(ref_pdb_id, storeInDatabase = testonly)
# except:
# if False and self.dict['Techniques'] != 'Rosetta model':
# raise
# if False and self.dict['Techniques'] != 'Rosetta model':
# assert(self.ACtoID_mapping and self.PDBtoAC_mapping)
# if not testonly:
# if self.ACtoID_mapping and self.PDBtoAC_mapping:
# commitUniProtMapping(self.ddGdb, self.ACtoID_mapping, self.PDBtoAC_mapping)
def _add_pdb_publication(self, tsession, database_pdb_id, pdb_object = None):
'''Extracts the PDB source information.
Touched tables:
Publication
PublicationIdentifier
'''
pdb_object = pdb_object or self.get_pdb_object(database_pdb_id, tsession = tsession)
pdb_record = tsession.query(PDBFile).filter(PDBFile.ID == database_pdb_id).one()
PUBTYPES = ['ISSN', 'ESSN']
j = pdb_object.get_journal()
if not j:
return
database_pdb_id = database_pdb_id.strip()
# We identify the sources for a PDB identifier with that identifier
PublicationID = "PDB:%s" % database_pdb_id
publication_record = get_or_create_in_transaction(tsession, Publication, dict(ID = PublicationID), only_use_supplied_columns = True)
pdb_record.Publication = publication_record.ID
tsession.flush()
locations = tsession.query(PublicationIdentifier).filter(PublicationIdentifier.SourceID == publication_record.ID)
pub_locations = [location for location in locations if location.Type in PUBTYPES]
doi_locations = [location for location in locations if location.Type == 'DOI']
assert(len(pub_locations) <= 1)
assert(len(doi_locations) <= 1)
if j["published"]:
if pub_locations:
location = pub_locations[0]
if j["REFN"]["type"] == location.Type:
if j["REFN"]["ID"] != location.ID:
colortext.warning("REFN: Check that the PublicationIdentifier data ('{0}, {1}') matches the PDB REFN data ({2}).".format(location.ID, location.Type, j["REFN"]))
elif j.get('REFN'):
assert(j["REFN"]["type"] in PUBTYPES)
db_pub_id = get_or_create_in_transaction(tsession, PublicationIdentifier, dict(
SourceID = PublicationID,
ID = j["REFN"]["ID"],
Type = j["REFN"]["type"],
))
if j["DOI"]:
if doi_locations:
location = doi_locations[0]
if j["DOI"] != location.ID:
colortext.warning("DOI: Check that the PublicationIdentifier data ('{0}, {1}') matches the PDB DOI data ({2}).".format(location.ID, location.Type, j["DOI"]))
else:
db_pub_id = get_or_create_in_transaction(tsession, PublicationIdentifier, dict(
SourceID = PublicationID,
ID = j["DOI"],
Type = "DOI",
))
def add_designed_pdb(self, structural_details,
allow_missing_params_files = False,
minimum_sequence_identity = 95.0,
previously_added = set(),
trust_database_content = False,
update_sections = set(),
tsession = None,
debug = True):
'''
:param structural_details: A dict fitting the defined structure (see below).
:param allow_missing_params_files: If the PDB file contains ligands with no associated params files in structural_details
then the function will return with a message. If this option is set to True
then the file will be added but the ligand is not guaranteed to be kept
by the protocol (this depends on the version of Rosetta - versions from February
2016 onwards should have better ligand support).
:param minimum_sequence_identity: For non-RCSB files, we require a chain mapping from chains to the corresponding
RCSB chains. Clustal is run to ensure that the sequence identity is at least this
value.
:param previously_added:
:param trust_database_content:
:param update_sections:
:param tsession:
:param debug: If debug is set to True then the transaction used to insert the structure into the database will be
rolled back and a message stating that the insertion would have been successful is returned in the
return dict.
:return:
One example of the dict structure is as follows:
dict(
# Fields required for all structures. We require non-RCSB structures to be based on RCSB structures at least in this import interface.
rcsb_id = '1K5D',
# Exactly one of these fields must be specified for non-RCSB structures.
pdb_object = p,
file_path = 'pdbs/1K5D2.pdb',
# Fields required for non-RCSB structures.
db_id = '1K5D_TP0', # must be between 5-10 characters
techniques = "PDB_REDO",
file_source = "Roger Wilco",
user_id = "sierra",
# Required for non-RCSB structures and optional for RCSB structures
description = 'GSP1 complex (RanGAP1) from Tina Perica. This file was taken from PDB_REDO. Removed residues 180-213 from chain A (RAN/GSP1) (the ones wrapping around YRB1).',
# Optional fields for both non-RCSB structures and optional for RCSB structures
transmembrane = None, # None or True or False
publication = None, # this should have a corresponding entry (Publication.ID) in the database
resolution = None, # should be either None or a floating-point number
# Exactly one of these fields must be specified for non-RCSB structures.
# If identical_chains is passed then it must be set to True and all protein, DNA, and RNA chains in
# the input file must have the same chain ID as in the RCSB file.
# If chain_mapping is instead passed then the keys must cover all protein, DNA, and RNA chains in the input file.
# Since ligands are sometimes reassigned to a new chain e.g. 'X', we do not require a mapping for them in
# chain_mapping and we do not consider them in the sanity check for the identical_chains case.
# Note: This logic does not currently handle cases where multiple chains in the RCSB file were merged into one
# chain in the input file i.e. chain A in the input file corresponds to chains L and H in the RCSB
# structure.
# Clustal is run over the chain mapping to ensure a sequence identity of minimum_sequence_identity% (95% by default).
identical_chains = True
# or
chain_mapping = dict(
A = 'C', # To help other uses read the input file, it is useful to mention any other choices available i.e. "choice of A, D, G, J." followed by the chain name e.g. "RAN"
C = 'A', # e.g. "choice of C, F, I, L. Ran GTPase activating protein 1"
),
# Fields required for non-RCSB complexes if there are ligands/ions in the input structure.
# First, all ligands and ions will be found in the input structure. Each (non-water) ligand and ion code must be
# covered in either the ligand_instance_mapping, the ligand_code_mapping, or the unchanged_ligand_codes mapping
# In practice, you will probably one want to use ligand_code_mapping, unchanged_ligand_codes, and unchanged_ion_codes
# as it is rare for ion codes to be changed.
# LigandMap is defined in klab.bio.ligand.
# The three types of mapping are:
# 1. detailed mappings - one mapping per instance. You will probably not want to bother using this format as it can become unwieldy with large numbers of ligands
ligand_instance_mapping = LigandMap.from_tuples_dict({ # Input PDB's HET code, residue ID -> RCSB HET code, RCSB residue ID
('G13', 'X 1 ') : ('GNP', 'A1250 '),
}),
ion_instance_mapping = LigandMap.from_tuples_dict({ # Input PDB's HET code, residue ID -> RCSB HET code, RCSB residue ID
('UNX', 'A1249 ') : ('MG', 'A1250 '),
}),
# 2. ligand type mappings - one mapping per ligand code. This is the easiest to specify when there are changes to ligand codes
ligand_code_mapping = {
'G13' : 'GNP',
)
ion_code_mapping = {
'UNX' : 'MG',
)
# 3. white lists - lists of ligand and ion codes which have not changed. Most of your cases will probably fall into this category
unchanged_ligand_codes = ['MSE'],
unchanged_ion_codes = ['FE2'],
# Fields currently advised when there are ligands in the structure in order for the protocols to handle
# ligands properly. This may be a non-issue in the future; at the time of writing, February 5th 2016,
# there was a large push by the Rosetta XRW to address the problem of handling arbitrary ligands so this
# problem may be mainly solved.
ligand_params_file_paths = {
'G13' : 'temp/pdbs/1K5D2.params'
},
)
'''
################################
# Checks and balances
################################
rcsb_id = structural_details['rcsb_id']
ligand_params_file_paths = structural_details.get('ligand_params_file_paths', {})
assert(isinstance(ligand_params_file_paths, dict))
for k, v in ligand_params_file_paths.iteritems():
ligand_params_file_paths[k] = os.path.abspath(v)
# Type checks
assert((isinstance(rcsb_id, str) or isinstance(rcsb_id, unicode)) and (4 == len(rcsb_id.strip())))
rcsb_id = str(rcsb_id.strip())
# Adding an RCSB structure - cascade into add_pdb_from_rcsb
if not('file_path' in structural_details or 'pdb_object' in structural_details):
# Read the RCSB PDB
rcsb_pdb_object = self.get_pdb_object(rcsb_id)
# Params files
assert(len(set(ligand_params_file_paths.keys()).difference(rcsb_pdb_object.get_ligand_codes())) == 0)
return self.add_pdb_from_rcsb(rcsb_id, previously_added = previously_added, trust_database_content = trust_database_content,
update_sections = update_sections, ligand_params_file_paths = ligand_params_file_paths, debug = debug)
# The remainder of this function adds a designed structure to the database
assert('project_name' in structural_details) # todo: add a ProjectPDBFile record. We should require that all new PDB files are associated with a project.
# Required fields
pdb2pdb_chain_maps = []
design_pdb_id = structural_details['db_id']
techniques = structural_details['techniques']
assert((isinstance(design_pdb_id, str) or isinstance(design_pdb_id, unicode)) and (5 <= len(design_pdb_id.strip()) <= 10))
design_pdb_id = str(design_pdb_id.strip())
techniques = (techniques or '').strip() or None
if (techniques == None) or not(isinstance(techniques, str) or isinstance(techniques, unicode)):
raise colortext.Exception('The technique for generating the PDB file must be specified e.g. "Rosetta model" or "PDB_REDO structure" or "Manual edit".')
if 'description' not in structural_details:
raise colortext.Exception('A description is required for non-RCSB files. This should include any details on the structure preparation.')
if 'file_source' not in structural_details:
raise colortext.Exception('''A file_source is required for non-RCSB files. This should describe where the file came from e.g. "PDB REDO", "Rosetta", or the creator's name e.g. "Jon Snow".''')
if 'user_id' not in structural_details:
raise colortext.Exception('A user_id is required for non-RCSB files. This should correspond to a record in the User table.')
file_source, description, user_id = structural_details['file_source'], structural_details['description'] , structural_details['user_id']
assert((isinstance(file_source, str) or isinstance(file_source, unicode)) and (file_source.strip()))
assert((isinstance(description, str) or isinstance(description, unicode)) and (description.strip()))
assert((isinstance(user_id, str) or isinstance(user_id, unicode)) and (user_id.strip()))
file_source = file_source.strip()
description = description.strip()
user_id = str(user_id.strip())
# User checks. Make sure the user has a record in the database
try:
user_record = self.get_session().query(DBUser).filter(DBUser.ID == user_id).one()
except:
colortext.error('Could not find user "{0}" in the database.'.format(user_id))
raise
colortext.warning('User: {1} ({0})'.format(user_record.ID, ' '.join([n for n in [user_record.FirstName, user_record.MiddleName, user_record.Surname] if n])))
# Optional fields
resolution = structural_details.get('resolution')
assert(resolution == None or isinstance(resolution, float))
transmembrane = structural_details.get('transmembrane')
assert(transmembrane == None or isinstance(transmembrane, bool))
# Publication checks
# todo: if publication, assert that the publication record exists
publication = structural_details.get('publication')
assert(publication == None) #todo: handle
# Read the input PDB
designed_pdb_object = None
if structural_details.get('pdb_object'):
if structural_details.get('file_path'):
raise colortext.Exception('Only one of pdb_object and file_path should be specified, not both.')
designed_pdb_object = structural_details.get('pdb_object')
else:
if not structural_details.get('file_path'):
raise colortext.Exception('Exactly one of pdb_object or file_path must be specified.')
pdb_file_path = os.path.abspath(structural_details['file_path'])
if not os.path.exists(pdb_file_path):
raise colortext.Exception('Could not locate the file "{0}".'.format(pdb_file_path))
designed_pdb_object = PDB(read_file(pdb_file_path))
# Read the chain IDs
all_chain_ids, main_chain_ids = [], []
for k, v in sorted(designed_pdb_object.chain_types.iteritems()):
all_chain_ids.append(k)
if v != 'Unknown' and v != 'Solution' and v != 'Ligand':
main_chain_ids.append(k)
# Check the chain mapping
chain_mapping, chain_mapping_keys = {}, set()
if 'identical_chains' in structural_details:
# Create a partial chain mapping (ignoring ligand chains which may have a corresponding match)
assert('chain_mapping' not in structural_details)
assert(structural_details['identical_chains'] == True)
for c in main_chain_ids:
chain_mapping[c] = c
chain_mapping_keys.add(c)
else:
# Check that the chain mapping domain is a complete mapping over the main chain IDs ( main_chain_ids <= chain_mapping.keys() <= all_chain_ids where "<=" is non-strict subset of)
assert('chain_mapping' in structural_details)
chain_mapping = structural_details['chain_mapping']
assert(isinstance(chain_mapping, dict) and chain_mapping)
chain_mapping_keys = set(chain_mapping.keys())
assert(len(chain_mapping_keys.intersection(set(main_chain_ids))) == len(main_chain_ids)) # main_chain_ids is a subset of chain_mapping_keys
assert(len(set(all_chain_ids).intersection(chain_mapping_keys)) == len(chain_mapping_keys)) # chain_mapping_keys is a subset of all_chain_ids
unmapped_chains = sorted(set(all_chain_ids).difference(chain_mapping_keys))
# Add the RCSB structure to the database and then use that object to run Clustal
colortext.pcyan('Adding the original PDB file using a separate transaction.')
self.add_pdb_from_rcsb(rcsb_id, previously_added = previously_added, trust_database_content = True, ligand_params_file_paths = {})
# Now that we have added the RCSB structure, create a new session which will be aware of that structure
tsession = tsession or self.get_session(new_session = True)
try:
attempts = 0
rcsb_object = None
while (not rcsb_object) and (attempts < 10):
# Hacky but there seems to be a race condition here between closing the previous transaction in add_pdb_from_rcsb and creating the new transaction (tsession) above
try:
rcsb_object = self.get_pdb_object(rcsb_id, tsession = tsession)
except Exception, e:
colortext.warning(str(e))
colortext.warning(traceback.format_exc())
attempts += 1
time.sleep(1)
if not rcsb_object:
raise colortext.Exception('Race condition detected. Try the import again.')
# Run Clustal to check whether the chain mapping is correct
rcsb_db_record_object = get_single_record_from_query(tsession.query(PDBFile).filter(PDBFile.ID == rcsb_id))
design_sequences = None
if designed_pdb_object.seqres_sequences:
design_sequences = designed_pdb_object.seqres_sequences
else:
design_sequences = designed_pdb_object.atom_sequences
pcsa = PDBChainSequenceAligner()
for chain_id, sequence in sorted(rcsb_object.seqres_sequences.iteritems()):
pcsa.add(rcsb_id, chain_id, str(sequence))
for chain_id, sequence in sorted(design_sequences.iteritems()):
pcsa.add(design_pdb_id, chain_id, str(sequence))
output, best_matches = pcsa.align(ignore_bad_chains = True)
for dc, rc in sorted(chain_mapping.iteritems()):
# todo: this mapping is untested on ligand chains. We could do: for all c in unmapped_chains, see if the 3-letter sequences match and, if so, and add those
k1 = '{0}_{1}'.format(design_pdb_id, dc)
k2 = '{0}_{1}'.format(rcsb_id, rc)
assert(k1 in best_matches and k2 in best_matches[k1])
assert(k2 in best_matches and k1 in best_matches[k2])
sequence_identity = best_matches[k1][k2]
pdb2pdb_chain_maps.append(dict(
PDBFileID1 = design_pdb_id,
Chain1 = dc,
PDBFileID2 = rcsb_id,
Chain2 = rc,
SequenceIdentity = sequence_identity,
))
if sequence_identity < minimum_sequence_identity:
raise colortext.Exception('The chain mapping is defined as {0}, chain {1} -> {2}, chain {3} but the sequence identity for these chains is only {4}%.'.format(design_pdb_id, dc, rcsb_id, rc, sequence_identity))
sequence_identity = best_matches[k2][k1]
pdb2pdb_chain_maps.append(dict(
PDBFileID1 = rcsb_id,
Chain1 = rc,
PDBFileID2 = design_pdb_id,
Chain2 = dc,
SequenceIdentity = sequence_identity,
))
if sequence_identity < minimum_sequence_identity:
raise colortext.Exception('The chain mapping is defined as {0}, chain {1} -> {2}, chain {3} but the sequence identity for these chains is only {4}%.'.format(rcsb_id, rc, design_pdb_id, dc, sequence_identity))
# Make sure that no params files were specified that do not fit the PDB file
ligands_not_present = set(ligand_params_file_paths.keys()).difference(designed_pdb_object.get_ligand_codes())
if len(ligands_not_present) > 0:
raise colortext.Exception('Params files were specified for ligands which do not exist in the PDB file: "{0}".'.format('", "'.join(ligands_not_present)))
# Sanity-check the ligand mapping for consistency
ligand_code_map = {}
if 'ligand_instance_mapping' in structural_details:
for k, v in structural_details['ligand_instance_mapping'].code_map.iteritems():
assert(ligand_code_map.get(k) == None or ligand_code_map[k] == v)
ligand_code_map[k] = v
if 'ligand_code_mapping' in structural_details:
for k, v in structural_details['ligand_code_mapping'].iteritems():
assert(ligand_code_map.get(k) == None or ligand_code_map[k] == v)
ligand_code_map[k] = v
if 'unchanged_ligand_codes' in structural_details:
for k in structural_details['unchanged_ligand_codes']:
assert(ligand_code_map.get(k) == None or ligand_code_map[k] == k)
ligand_code_map[k] = k
# Sanity-check the ligand mapping for completeness
ligand_mapping = LigandMap.from_code_map(ligand_code_map)
assert(isinstance(ligand_mapping, LigandMap) and ligand_mapping)
if sorted(ligand_code_map.keys()) != sorted(designed_pdb_object.get_ligand_codes()):
raise colortext.Exception('Incomplete mapping or unexpected entries: The ligand mapping contains mappings for "{0}" but the PDB file contains ligand codes "{1}".'.format('", "'.join(sorted(ligand_code_map.keys())), '", "'.join(sorted(designed_pdb_object.get_ligand_codes()))))
# todo: currently unhandled
# todo: add ion mapping support - this seems less important as it is probably less likely that users will rename ion codes
assert('ion_instance_mapping' not in structural_details and 'ion_code_mapping' not in structural_details)
# Check to make sure that the set of ions is a subset of those in the original PDB
# todo: Ideally, we should pass an ion mapping like for the ligand mapping. However, this may be annoying for users
# to have to specify. Instead, we could check to make sure that the mapping ion_code -> atom_type matches in
# both structures which would be a more specific check than the assertion below. This would disallow renaming
# of ions but this seems a reasonable trade-off.
assert(len(set(designed_pdb_object.get_ion_codes()).difference(set(rcsb_object.get_ion_codes()))) == 0)
################################
# Data entry
################################
# Add the original PDB file to the database
colortext.message('Adding designed PDB file {0} based off {1}.'.format(design_pdb_id, rcsb_id))
db_record_object = get_single_record_from_query(tsession.query(PDBFile).filter(PDBFile.ID == design_pdb_id))
is_new_record = db_record_object == None
if not is_new_record:
print('Retrieving designed PDB {0} from database.'.format(design_pdb_id))
db_pdb_object = PDB(db_record_object.Content, parse_ligands = True)
assert(designed_pdb_object.lines == db_pdb_object.lines)
assert(db_record_object.FileSource != 'RCSB')
if trust_database_content:
print('Trusting the existing data and early-outing.')
return design_pdb_id
else:
db_record_object = PDBFile(**dict(
ID = design_pdb_id,
FileSource = file_source,
Content = str(designed_pdb_object),
Techniques = techniques,
UserID = user_record.ID,
Notes = description,
DerivedFrom = rcsb_id,
))
update_sections = set() # add all related data
# Fill in the FASTA, Resolution, and b-factor fields of a PDBFile record.
# We do not use the information from the RCSB object - derived PDB files may have useful new data e.g. b-factors
# from PDB_REDO and may have different sequences.
self._add_pdb_file_information(db_record_object, designed_pdb_object, design_pdb_id, is_new_record, is_rcsb_pdb = False)
# We copy the transmembrane classification (assuming that the designed protein keeps the same characteristic)
if transmembrane == None:
db_record_object.Transmembrane = rcsb_db_record_object.Transmembrane
else:
db_record_object.Transmembrane = transmembrane
# Add a new PDBFile record
if is_new_record:
tsession.add(db_record_object)
tsession.flush()
assert(not publication) # todo: add publication entry here
# add all other data
self.add_pdb_data(tsession, design_pdb_id, update_sections = set(), ligand_mapping = ligand_mapping, chain_mapping = chain_mapping, ligand_params_file_paths = ligand_params_file_paths)
# add the designed PDB -> RCSB PDB chain mapping
print('Creating the chain mapping')
for pdb2pdb_chain_map in pdb2pdb_chain_maps:
pdb2pdb_chain_map = get_or_create_in_transaction(tsession, PDB2PDBChainMap, pdb2pdb_chain_map, missing_columns = ['ID'])
previously_added.add(design_pdb_id)
print('Success.\n')
if debug:
print('Debug call - rolling back the transaction.\n')
tsession.rollback()
else:
tsession.commit()
tsession.close()
except:
colortext.error('Failure.')
tsession.rollback()
tsession.close()
raise
def _check_derived_record_against_rcsb_record(self, tsession, database_pdb_id, pdb_object, chain_mapping):
'''Sanity checks for derived structures compared to their RCSB ancestor.'''
rcsb_chains = None
rcsb_record = None
chain_ids = sorted(set(pdb_object.seqres_sequences.keys() + pdb_object.atom_sequences.keys() + pdb_object.chain_types.keys()))
if chain_mapping:
db_record = tsession.query(PDBFile).filter(PDBFile.ID == database_pdb_id).one()
assert(db_record.FileSource != 'RCSB')
rcsb_chains = {}
rcsb_record = self.get_rcsb_record(db_record, tsession = tsession)
for r in tsession.query(PDBChain).filter(PDBChain.PDBFileID == rcsb_record.ID):
rcsb_chains[r.Chain] = r
for chain_id in chain_ids:
if chain_id in chain_mapping:
assert(pdb_object.chain_types[chain_id] == rcsb_chains[chain_mapping[chain_id]].MoleculeType)
else:
# We cannot assert(chain_ids == sorted(chain_mapping.keys())) as this can fail e.g. if a user splits
# chain C (protein + ligand) into chain A (protein) and chain X (ligand). Instead, we use this weaker assertion.
assert(pdb_object.chain_types[chain_id] != 'Protein')
def _test():
# Create an import API instance
importer = DataImportInterface.get_interface_with_config_file(cache_dir = sys_settings.cache.cache_dir, echo_sql = False)
# Access the SQLAlchemy session directly
session = importer.session
# Access the MySQLdb interface layer directly
DDG_db = importer.DDG_db # or importerDDG_db_utf
# Update certain properties of RCSB files in the database
importer.update_pdbs(update_sections = set(['Residues', 'Publication']), start_at = None, restrict_to_file_source = 'RCSB')
#importer.update_pdbs(update_sections = set(['Ligands', 'Ions']), start_at = None, restrict_to_file_source = 'RCSB')
| mit |
T-B-F/pyBioUtils | BioUtils/core/BUplot.py | 1 | 12279 | import numpy as np
import scipy.stats as scst
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
from scipy.spatial.distance import pdist
from scipy.cluster.hierarchy import linkage, dendrogram
def simple_and_outward(ax, out=10):
simpleaxis(ax)
outward_spines(ax, out)
def outward_spines(ax, out=10):
ax.spines["left"].set_position(("outward", out))
ax.spines["bottom"].set_position(("outward", out))
def simpleaxis(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
def bottomaxisonly(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.set_yticks([])
def leftaxisonly(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.set_xticks([])
def check_dim(*args):
size = {len(args[i]) for i in range(len(args)) if args[i] != list()}
assert len(size) == 1, "Different size between parameters"
#for i in range(len(args)-1):
#if args[i] != list() and args[i+1] != list():
#assert len(args[i]) == len(args[i+1]), "Different size between parameters"
def plot_scatter_hist(data_x, data_y, alpha=0.8, labels=list(), xlabel="", ylabel="", savefig=None, show=True,
plot_corr=False):
""" combine a scatter plot with two histograms, one for each of the axis
"""
check_dim(data_x, data_y, labels)
#colors = mpl.rcParams['axes.color_cycle']
colors = [color['color'] for color in list(mpl.rcParams['axes.prop_cycle'])]
nullfmt = NullFormatter()
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left_h = left + width + 0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
rect_histy = [left_h, bottom, 0.2, height]
fig = plt.figure(1, figsize=(8, 8))
axScatter = plt.axes(rect_scatter)
axHistx = plt.axes(rect_histx)
axHisty = plt.axes(rect_histy)
simpleaxis(axScatter)
bottomaxisonly(axHistx)
leftaxisonly(axHisty)
# no labels
axHistx.xaxis.set_major_formatter(nullfmt)
axHisty.yaxis.set_major_formatter(nullfmt)
# the scatter plot:
ps = list()
xmax, xmin = -np.inf, np.inf
ymax, ymin = -np.inf, np.inf
k = 0
if labels != list():
for i in range(len(data_x)):
p = axScatter.scatter(data_x[i], data_y[i], label=labels[i], color=colors[k], alpha=alpha)
# now determine nice limits
xmax = max(np.max(data_x[i]), xmax)
xmin = min(np.min(data_x[i]), xmin)
ymax = max(np.max(data_y[i]), ymax)
ymin = min(np.min(data_y[i]), ymin)
ps.append(p)
k += 1
if k >= len(colors):
k = 0
else:
for i in range(len(data_x)):
p = axScatter.scatter(data_x[i], data_y[i], color = colors[k], alpha=alpha)
# now determine nice limits
xmax = max(np.max(data_x[i]), xmax)
xmin = min(np.min(data_x[i]), xmin)
ymax = max(np.max(data_y[i]), ymax)
ymin = min(np.min(data_y[i]), ymin)
ps.append(p)
k += 1
if k >= len(colors):
k = 0
if plot_corr:
k = 0
for i in range(len(data_x)):
x = np.linspace(min(data_x[i]), max(data_x[i]), 100)
cor, pval = scst.pearsonr(data_x[i], data_y[i])
slope, intercept, r_value, p_value, std_err = scst.linregress(data_x[i], data_y[i])
r2 = r_value ** 2
print(i, cor, r2)
f = lambda x , a, b : x * a + b
y = f(x, slope, intercept)
axScatter.plot(x, y, linestyle="-.", color="black")
k += 1
xsize = max(len(str(int(xmax))), len(str(int(xmin)))) - 1
ysize = max(len(str(int(ymax))), len(str(int(ymin)))) - 1
xoffset = 0.1 * (10 ** xsize)
yoffset = 0.1 * (10 ** ysize)
axScatter.set_xlim((xmin-xoffset, xmax+xoffset))
axScatter.set_ylim((ymin-yoffset, ymax+yoffset))
xbins = np.linspace(xmin, xmax, 21)
k = 0
for i in range(len(data_x)):
axHistx.hist(data_x[i], bins=xbins, alpha=alpha)
axHistx.axvline(x=np.median(data_x[i]), ymin=0, ymax=10, color=colors[k], linestyle="--")
k += 1
if k >= len(colors):
k = 0
ybins = np.linspace(ymin, ymax, 21)
k = 0
for i in range(len(data_y)):
axHisty.hist(data_y[i], bins=ybins, orientation='horizontal', alpha=alpha)
axHisty.axhline(y=np.median(data_y[i]), xmin=0, xmax=10, color=colors[k], linestyle="--")
k += 1
if k >= len(colors):
k = 0
axHistx.set_xlim(axScatter.get_xlim())
axHisty.set_ylim(axScatter.get_ylim())
if xlabel != list():
axScatter.set_xlabel(xlabel)
if ylabel != list():
axScatter.set_ylabel(ylabel)
if labels != list():
plt.legend(handles=ps)
if savefig:
plt.savefig(savefig, dpi=600)
if show:
plt.show()
return [axScatter, axHistx, axHisty]
def jitter_plot(data, labels=list(), xlabel=list(), ylabel="", alpha=0.5, savefig=None, show=True):
""" create a jitter plot
"""
fig, axJitter = plt.subplots()
check_dim(data, labels, xlabel)
simpleaxis(axJitter)
indexes = np.arange(1, len(data)+1)
for i in range(len(data)):
x = np.random.rand(len(data[i])) * 0.5 + (i+1-0.25)
labeli=labels[i] if i < len(labels) else ""
axJitter.scatter(x, data[i], s=12, alpha=alpha, label=labeli)
ymean = sum(data[i])/len(data[i])
axJitter.plot([i+1-0.20, i+1.20], [ymean, ymean], linewidth=2, linestyle="--")
axJitter.set_xticks(indexes)
if xlabel != list():
axJitter.set_xticklabels(xlabel)
if ylabel:
axBox.set_ylabel(ylabel)
plt.xlim(0.5, len(indexes)+0.5)
if savefig:
plt.savefig(savefig, dpi=600)
if show:
plt.show()
return axJitter
def fancy_box(data, labels=list(), xlabel=list(), ylabel="", alpha=0.5, savefig=None, show=True):
""" make a fancy box plot -> jitter + box
"""
fig, axBox = plt.subplots()
check_dim(data, labels, xlabel)
simpleaxis(axBox)
indexes = np.arange(1, len(data)+1)
axBox.boxplot([y_neu, y_hdel], labels=["neutral", "highly deleterious"], showfliers="",)
for i in range(len(data)):
x = np.random.rand(len(data[i])) * 0.5 + (i+1-0.25)
labeli=labels[i] if i < len(labels) else ""
axBox.scatter(x, data[i], s=12, alpha=alpha, label=labeli)
#ymean = sum(data[i])/len(data[i])
ymean = np.median(np.array(data[i]))
axBox.plot([i+1-0.20, i+1.20], [ymean, ymean], linewidth=2, linestyle="--")
axBox.set_xticks(indexes)
if xlabel != list():
axBox.set_xticklabels(xlabel)
if ylabel:
axBox.set_ylabel(ylabel)
plt.xlim(0.5, len(indexes)+0.5)
if savefig:
plt.savefig(savefig, dpi=600)
if show:
plt.show()
return axBox
def clustermap(x, draw_top=True, draw_left=True, colorbar_pad=0.5, cmap=plt.cm.viridis,
col_labels = None, row_labels = None, xlabel_rotation = -45,
ylabel_rotation = 0, label_fontsize = 8, figsize=(12, 8)):
''' adapted from https://github.com/WarrenWeckesser/heatmapcluster/blob/master/heatmapcluster.py
'''
assert draw_top or draw_left, "Warning, must at least specify one hystogram (use standard heatmap otherwise)"
if col_labels is None:
col_labels = np.arange(x.shape[1])
if row_labels is None:
row_labels = np.arange(x.shape[0])
fig, ax_heatmap = plt.subplots(figsize=figsize)
ax_heatmap.yaxis.tick_right()
divider = axes_grid1.make_axes_locatable(ax_heatmap)
if draw_left:
ax_dendleft = divider.append_axes("left", 1.2, pad=0.0, sharey=ax_heatmap)
ax_dendleft.set_frame_on(False)
left_threshold = -1
side_orientation = 'left'
lnk0 = linkage(pdist(x))
dg0 = dendrogram(lnk0, ax=ax_dendleft, orientation=side_orientation, color_threshold=left_threshold, no_labels=True)
if draw_top:
ax_dendtop = divider.append_axes("top", 1.2, pad=0.0, sharex=ax_heatmap)
ax_dendtop.set_frame_on(False)
top_threshold = -1
lnk1 = linkage(pdist(x.T))
dg1 = dendrogram(lnk1, ax=ax_dendtop, color_threshold=top_threshold, no_labels=True)
colorbar_width = 0.45
ax_colorbar = divider.append_axes("right", colorbar_width, pad=colorbar_pad)
# Reorder the values in x to match the order of the leaves of
# the dendrograms.
if draw_left:
z = x[dg0['leaves'], :]
else:
z = x
if draw_top:
z = z[:, dg1['leaves']]
if draw_top:
ymax = ax_dendtop.get_xlim()[1]
else:
ymax = ax_dendleft.get_xlim()[1]
if draw_left:
im = ax_heatmap.imshow(z[::-1], aspect='auto', cmap=cmap, interpolation='nearest',
extent=(0, ymax, 0, ax_dendleft.get_ylim()[1]))
else:
im = ax_heatmap.imshow(z[::-1], aspect='auto', cmap=cmap, interpolation='nearest',
extent=(0, ymax, 0, ax_heatmap.get_ylim()[1]))
xlim = ax_heatmap.get_xlim()[1]
ncols = len(col_labels)
halfxw = 0.5*xlim/ncols
ax_heatmap.xaxis.set_ticks(np.linspace(halfxw, xlim - halfxw, ncols))
if draw_top:
ax_heatmap.xaxis.set_ticklabels(np.array(col_labels)[dg1['leaves']])
else:
ax_heatmap.xaxis.set_ticklabels(col_labels)
ylim = ax_heatmap.get_ylim()[1]
nrows = len(row_labels)
halfyw = 0.5*ylim/nrows
ax_heatmap.yaxis.set_ticks(np.linspace(halfyw, ylim - halfyw, nrows))
if draw_left:
ax_heatmap.yaxis.set_ticklabels(np.array(row_labels)[dg0['leaves']])
else:
ax_heatmap.yaxis.set_ticklabels(row_labels)
# Make the dendrogram labels invisible.
if draw_left:
plt.setp(ax_dendleft.get_yticklabels() + ax_dendleft.get_xticklabels(), visible=False)
if draw_top:
plt.setp(ax_dendtop.get_xticklabels() + ax_dendtop.get_yticklabels(), visible=False)
# Hide all tick lines.
lines = (ax_heatmap.xaxis.get_ticklines() +
ax_heatmap.yaxis.get_ticklines())
plt.setp(lines, visible=False)
if draw_left:
lines = (ax_dendleft.xaxis.get_ticklines() +
ax_dendleft.yaxis.get_ticklines())
plt.setp(lines, visible=False)
if draw_top:
lines = (ax_dendtop.xaxis.get_ticklines() +
ax_dendtop.yaxis.get_ticklines())
plt.setp(lines, visible=False)
xlbls = ax_heatmap.xaxis.get_ticklabels()
plt.setp(xlbls, rotation=xlabel_rotation)
plt.setp(xlbls, fontsize=label_fontsize)
ylbls = ax_heatmap.yaxis.get_ticklabels()
plt.setp(ylbls, rotation=ylabel_rotation)
plt.setp(ylbls, fontsize=label_fontsize)
cb = plt.colorbar(im, cax=ax_colorbar)
# This code to draw the histogram in the colorbar can
# probably be simplified.
# Also, there are several values that someone, sometime,
# will probably want to change, but for now, all the
# details are hardcoded.
nbins = min(80, max(int(x.size/10+0.5), 11))
counts, edges = np.histogram(x.ravel(), bins=nbins)
max_count = counts.max()
counts = counts / max_count
edges = (edges - edges[0])/(edges[-1] - edges[0])
# cc and ee contain the values in counts and edges, repeated
# as needed to draw the histogram curve similar to the 'steps-mid'
# drawstyle of the plot function.
cc = np.repeat(counts, 2)
ee = np.r_[edges[0], np.repeat(edges[1:-1], 2), edges[-1]]
ax_colorbar.plot(cc, ee, 'k', alpha=0.5)
ax_colorbar.xaxis.set_ticks([0, 1])
pctstr = '%.2g%%' % (100*max_count/x.size)
ax_colorbar.xaxis.set_ticklabels(['0', pctstr])
ax_colorbar.xaxis.set_label_text('Histogram\n(% count)')
plt.show()
| mit |
ComNets-Bremen/ResourceMonitor | tools/analyze/analyze.py | 1 | 7867 | #!/usr/bin/env python
"""
A simple data analysis script for the ResourceMonitor 2.0
Jens Dede, ComNets University of Bremen
jd@comnets.uni-bremen.de
"""
import gzip
import json
from datetime import datetime
import argparse
import matplotlib
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['text.latex.unicode'] = True
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.dates as mdates
from dateutil import parser as dparser
from matplotlib.ticker import MultipleLocator, FormatStrFormatter, NullLocator, FuncFormatter
from resourceMonitor import graphHelper
from resourceMonitor import resourceHelpers
LINESTYLE = ":"
MARKER = None
LINEWIDTH = 0.8
trueMin = -0.1
trueMax = 1.1
argparser = argparse.ArgumentParser(description="Analyze exported files from ResourceMonitor")
argparser.add_argument('files', metavar='filename', type=str, nargs="+", help="Filename to process")
argparser.add_argument('-v', "--verbose", action="store_true", help="Increase debug output")
argparser.add_argument('--show', action="store_true", help="Show results instead of plotting them into an output file.")
argparser.add_argument('--objects', action="store_true", help="Print json object structure.")
argparser.add_argument('--xmin', default=None, type=str, help="Minimum datetime for x axis and analysis")
argparser.add_argument('--xmax', default=None, type=str, help="Minimum datetime for x axis and analysis")
argparser.add_argument('-z', "--gzip", action="store_true", help="Input file is gzip compressed")
argparser.add_argument('-d', "--date", action="store_true", help="Show dates in x axis")
#argparser.add_argument('--label', metavar="Label to use", default=None, type=str, help="Add label to the output")
#argparser.add_argument('--timelimit', metavar='timelimit', type=float, default=None, help="Stop reading values after n seconds")
args = argparser.parse_args()
yAxisMajorLocator = MultipleLocator(1)
fig, (batteryAxis, chargingAxis, screenAxis, wifiAxis, mobiledataAxis) = \
plt.subplots(nrows=5, figsize=(6,5), sharex=True)
xMin = xMax = None
# How to open the file. Default: uncompressed json
opener = open
if args.gzip:
print "Using gzip compressed json file"
opener = gzip.open
resourceDataHandlers = {}
# Load data into resourceDataHandlers
for f in args.files:
with opener(f, "rb") as rf:
data = json.load(rf)
resourceDataHandlers[f] = resourceHelpers.ResourceDataHandler(data, args.verbose)
graphMin, graphMax = resourceDataHandlers[f].getMinMax()
if xMin == None:
xMin = graphMin
if xMax == None:
xMax = graphMax
xMin = max(xMin, graphMin)
xMax = min(xMax, graphMax)
# Check if mMin / xMax were set via CLI
if args.xmin != None:
xMin = dparser.parse(args.xmin).replace(tzinfo=None)
print "xMin set to", xMin, "via CLI"
if args.xmax != None:
xMax = dparser.parse(args.xmax).replace(tzinfo=None)
print "xMax set to", xMax, "via CLI"
print "Using xMin", xMin, "and xMax", xMax
# Print structure of json object?
if args.objects:
for f in resourceDataHandlers:
for af in resourceDataHandlers[f].getArrayFields():
print "Field:", af
for d in resourceDataHandlers[f].getFieldTypes(af):
print "*", d
# Create graph
for f in resourceDataHandlers:
## Battery level
x, y = resourceDataHandlers[f].getDatasets("BatteryStatus", "percentage")
batteryAxis.plot(x, y, linestyle=LINESTYLE, marker=MARKER, linewidth=LINEWIDTH)
# Battery charging
x, y = resourceDataHandlers[f].getSmoothedDatasets("BatteryStatus", "is_charging")
chargingAxis.plot(x, y, linestyle=LINESTYLE, marker=MARKER, linewidth=LINEWIDTH)
# Screen status
x, y = resourceDataHandlers[f].getSmoothedDatasets("ScreenStatus", "screen_status")
screenAxis.plot(x, y, linestyle=LINESTYLE, marker=MARKER, linewidth=LINEWIDTH)
# WiFi status
x, y = resourceDataHandlers[f].getSmoothedDatasets("WiFiStatus", "wifi_status")
wifiData = zip(x,y)
convertedData = []
for line in wifiData:
# authenticating, connected, connecting, obtaining ip, scanning
if line[1] == 1 or \
line[1] == 4 or \
line[1] == 5 or \
line[1] == 9 or \
line[1] == 10:
convertedData.append([line[0], 1])
else:
convertedData.append([line[0], 0])
x, y = zip(*convertedData)
wifiAxis.plot(x, y, linestyle=LINESTYLE, marker=MARKER, linewidth=LINEWIDTH)
# Cellular network status
x, y = resourceDataHandlers[f].getSmoothedDatasets("CellularStatus", "cellular_type")
cellData = zip(x, y)
convertedData = []
for line in cellData:
if line[1] == "LTE" or\
line[1] == "EDGE" or\
line[1] == "HSPA+" or\
line[1] == "GPRS" or \
line[1] == "HSUPA":
convertedData.append([line[0], 1])
#print line[1]
elif line[1] == "NONE" or line[1] == "UNKNOWN":
convertedData.append([line[0], 0])
else:
pass
if len(convertedData) > 0:
x, y = zip(*convertedData)
mobiledataAxis.plot(x, y, linestyle=LINESTYLE, marker=MARKER, linewidth=LINEWIDTH)
# Plotting
batteryAxis.yaxis.set_major_formatter(FuncFormatter(graphHelper.percentage_formatter))
batteryAxis.set_title("Battery Level")
chargingAxis.yaxis.set_major_locator(yAxisMajorLocator)
chargingAxis.yaxis.set_minor_locator(NullLocator())
chargingAxis.yaxis.set_major_formatter(FuncFormatter(graphHelper.trueFalse_formatter))
chargingAxis.set_title("Device Charging")
chargingAxis.set_ylim([trueMin, trueMax])
screenAxis.yaxis.set_major_locator(yAxisMajorLocator)
screenAxis.yaxis.set_minor_locator(NullLocator())
screenAxis.yaxis.set_major_formatter(FuncFormatter(graphHelper.onOff_formatter))
screenAxis.set_title("Screen Status")
screenAxis.set_ylim([trueMin, trueMax])
wifiAxis.yaxis.set_major_locator(yAxisMajorLocator)
wifiAxis.yaxis.set_minor_locator(NullLocator())
wifiAxis.yaxis.set_major_formatter(FuncFormatter(graphHelper.trueFalse_formatter))
wifiAxis.set_title("WiFi Connected")
wifiAxis.set_ylim([trueMin, trueMax])
mobiledataAxis.yaxis.set_major_locator(yAxisMajorLocator)
mobiledataAxis.yaxis.set_minor_locator(NullLocator())
mobiledataAxis.yaxis.set_major_formatter(FuncFormatter(graphHelper.trueFalse_formatter))
mobiledataAxis.set_title("Mobile Data Connected")
mobiledataAxis.set_ylim([trueMin, trueMax])
if args.date:
mobiledataAxis.xaxis.set_major_formatter(mdates.DateFormatter('%d.%m. %H:%M'))
else:
mobiledataAxis.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
mobiledataAxis.set_xlim([xMin, xMax])
fig.autofmt_xdate()
fig.tight_layout()
if args.show:
plt.show()
else:
plt.savefig("example-resources.pdf")
print "Statistics"
for key in resourceDataHandlers:
print "Processing file", key
print "BatteryStatus/is_charging", resourceDataHandlers[key].getStatePercentages("BatteryStatus", "is_charging", xMin, xMax)
print "ScreenStatus", "screen_status", resourceDataHandlers[key].getStatePercentages("ScreenStatus", "screen_status", xMin, xMax)
print "WiFiStatus/wifi_status", resourceDataHandlers[key].getStatePercentages("WiFiStatus", "wifi_status", xMin, xMax)
print "CellularStatus/cellular_type", resourceDataHandlers[key].getStatePercentages("CellularStatus", "cellular_type", xMin, xMax)
print "Max Battery gap", resourceDataHandlers[key].getMaxBatteryGap()
# WiFi Codes:
# AUTHENTICATING: 1
# BLOCKED: 2
# CAPTIVE_PORTAL_CHECK: 3
# CONNECTED: 4
# CONNECTING: 5
# DISCONNECTED: 6
# FAILED: 7
# IDLE: 8
# OBTAINING_IPADDR: 9
# SCANNING: 10
# SUSPENDED: 11
# VERIFYING_POOR_LINK: 12
| gpl-3.0 |
mira67/TakeoutDataAnalysis | python/userPattern.py | 1 | 3440 | #exploratory analysis for user spatial patterns
#Author: Qi Liu
#Email: qliu.hit@gmail.com
import ctypes
import os
import sys
import time
if getattr(sys, 'frozen', False):
# Override dll search path.
ctypes.windll.kernel32.SetDllDirectoryW('C:/Users/ngj/AppData/Local/Continuum/Anaconda3/Library/bin/')
# Init code to load external dll
ctypes.CDLL('mkl_avx2.dll')
ctypes.CDLL('mkl_def.dll')
ctypes.CDLL('mkl_vml_avx2.dll')
ctypes.CDLL('mkl_vml_def.dll')
# Restore dll search path.
ctypes.windll.kernel32.SetDllDirectoryW(sys._MEIPASS)
import psycopg2
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm
from mpl_toolkits.basemap import Basemap
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
from matplotlib.colors import Normalize
#global timing
start = time.time()
path = 'E:/myprojects/takeout/code/'
#1. connect to database
try:
conn = psycopg2.connect("dbname='urbandata' user='postgres' host='localhost' password='1234'")
except:
print "I am unable to connect to the database"
cur = conn.cursor()
#2. read in list of potential users from csv, generate from matlab
users = pd.read_excel(path+'topusers.xlsx');
#print users.head()
#3. got through each user, query visited shops (freq, avgTime, lat, lon)
sql = """
SELECT rates.shop_id, count(*) as sfreq, avg(to_number(rates.cost_time,'999')), shops.wgs_lat, shops.wgs_lon
FROM postgres.baidu_takeout_rating as rates
LEFT JOIN baidu_takeout_shops as shops ON shops.shop_id = rates.shop_id
WHERE rates.pass_uid = %(user_id)s
GROUP BY rates.shop_id, shops.wgs_lat, shops.wgs_lon
ORDER BY sfreq;
"""
def plot_area(m,pos):
x, y = m(pos[4], pos[3])
size = 5
m.plot(x, y, 'o', markersize=size, color='#f45642', alpha=0.8)
plt.text(x,y,pos[1],fontsize=10,fontweight='medium',
ha='center',va='center',color='b')
#users = ['673426103'];
for index, row in users.iterrows():
if index >= 289:
print index
user = str(row['user'])
try:
cur.execute(sql, {'user_id': user})
except:
print "I am not able to query!"
rows = cur.fetchall()
#3.1 for each user , draw shops on map with freq and avgTime labels, on a beijing base map
#westlimit=116.0431; southlimit=39.6586; eastlimit=116.7599; northlimit=40.1852
fig, ax = plt.subplots(figsize=(10,20))
m = Basemap(resolution='c', # c, l, i, h, f or None
projection='merc',
lat_0=39.905960083, lon_0=116.391242981,
llcrnrlon=116.185913, llcrnrlat= 39.754713, urcrnrlon=116.552582, urcrnrlat=40.027614)
m.drawmapboundary(fill_color='#46bcec')
m.fillcontinents(color='#f2f2f2',lake_color='#46bcec')
m.drawcoastlines()
m.readshapefile(path+'roads', 'bjroads')
for row in rows:
plot_area(m,row)
#plt.show()
plt.savefig(path+'/figs/'+user+'.png',bbox_inches='tight')
plt.close()
end = time.time()
runtime = end - start
msg = "Took {time} seconds to complete"
print(msg.format(time=runtime))
print 'TEST DONE'
#3.2 save figure for each user to a directory
#3.3 done and report total time
"""
print "\nShow me the databases:\n"
for row in rows:
print " ", row[1]
"""
| gpl-3.0 |
xuleiboy1234/autoTitle | tensorflow/tensorflow/examples/learn/hdf5_classification.py | 75 | 2899 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, hdf5 format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
import h5py # pylint: disable=g-bad-import-order
X_FEATURE = 'x' # Name of the input feature.
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Note that we are saving and load iris data as h5 format as a simple
# demonstration here.
h5f = h5py.File('/tmp/test_hdf5.h5', 'w')
h5f.create_dataset('X_train', data=x_train)
h5f.create_dataset('X_test', data=x_test)
h5f.create_dataset('y_train', data=y_train)
h5f.create_dataset('y_test', data=y_test)
h5f.close()
h5f = h5py.File('/tmp/test_hdf5.h5', 'r')
x_train = np.array(h5f['X_train'])
x_test = np.array(h5f['X_test'])
y_train = np.array(h5f['y_train'])
y_test = np.array(h5f['y_test'])
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = [
tf.feature_column.numeric_column(
X_FEATURE, shape=np.array(x_train).shape[1:])]
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=200)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class_ids'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| mit |
RTS2/rts2 | scripts/rts2saf/rts2saf/fitdisplay.py | 3 | 3586 | #!/usr/bin/python
# (C) 2013, Markus Wildi, markus.wildi@bluewin.ch
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Or visit http://www.gnu.org/licenses/gpl.html.
#
__author__ = 'markus.wildi@bluewin.ch'
import sys
if 'matplotlib' not in sys.modules:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
class FitDisplay(object):
"""Display a fit with matplotlib
:var date: date when focus run started
:var comment: optional comment
:var logger: :py:mod:`rts2saf.log`
"""
def __init__(self, date=None, comment=None, logger=None):
self.date=date
self.logger=logger
self.comment=comment
self.fig=None
self.ax1=None
self.fig = plt.figure()
self.ax1 = self.fig.add_subplot(111)
def fitDisplay(self, dataFit=None, resultFit=None, show=True, display=False, xdisplay = None):
"""Display fit using matplotlib
:param dataFit: :py:mod:`rts2saf.data.DataFit`
:param resultFit: :py:mod:`rts2saf.data.ResultFit`
:param display: if True display and save plot to file, False save only
:return: :py:mod:`rts2saf.data.DataFit`.plotFn
"""
try:
x_pos = np.linspace(dataFit.pos.min(), dataFit.pos.max())
except Exception, e:
self.logger.error('fitDisplay: numpy error:\n{0}'.format(e))
return e
self.ax1.plot(dataFit.pos, dataFit.val, 'ro', color=resultFit.color)
self.ax1.errorbar(dataFit.pos, dataFit.val, xerr=dataFit.errx, yerr=dataFit.erry, ecolor='black', fmt='none')
if resultFit.fitFlag:
line, = self.ax1.plot(x_pos, dataFit.fitFunc(x_pos, p=resultFit.fitPar), 'r-', color=resultFit.color)
if self.comment:
self.ax1.set_title('rts2saf, {0},{1},{2}C,{3},{4}'.format(self.date, dataFit.ftName, dataFit.ambientTemp, resultFit.titleResult, self.comment), fontsize=12)
else:
self.ax1.set_title('rts2saf, {0},{1},{2}C,{3}'.format(self.date, dataFit.ftName, dataFit.ambientTemp, resultFit.titleResult), fontsize=12)
self.ax1.set_xlabel('FOC_POS [tick]')
self.ax1.set_ylabel(resultFit.ylabel)
self.ax1.grid(True)
if show and display and xdisplay:
# NO: self.fig.show()
plt.show()
elif display and not xdisplay:
self.logger.warn('fitDisplay: NO $DISPLAY no plot')
# no return here, save plot
try:
self.fig.savefig(dataFit.plotFn)
self.logger.info('fitDisplay: storing plot file: {0}'.format(dataFit.plotFn))
return dataFit.plotFn
except Exception, e:
self.logger.error('fitDisplay: can not save plot to: {0}, matplotlib error:\n{1}'.format(dataFit.plotFn,e))
return e
| lgpl-3.0 |
Skarzee/python-keystroke-dynamics | main.py | 1 | 7536 | __author__ = 'Tristan Watson'
# Keystroke Dynamic software that covers the following key functionality:
# 1. User File management
# 2. Input gathering and management (including storage)
# 3. Plotting of keystrokes taking into consideration both up events and down events.
import pyHook
import pythoncom
import os
import matplotlib.pyplot as plt
import json
import numpy
import sys
# File is to be opened and closed numerous times. Should be re-written as a class.
global userFilePath
time_between_ups = []
time_between_downs = []
def banner():
print("------------------------------")
print("Keystroke Dynamics Software")
print("Author: Tristan Watson, 2015")
print("------------------------------")
print("Current Working Directory: ", os.getcwd())
def menuOptions():
#Menu
print("Please choose a following option:")
print("1: User Login or Create New")
print("2: Username and Password Input")
print("3: Plot Graph (Based on Username)")
print("4: Help")
print("5: Exit")
def menuHandler():
choice = input("Please enter option choice: ")
if choice == "1":
getUserFileWriteSession()
elif choice == "2":
usernamePasswordInput()
elif choice == "3":
plotMenu()
elif choice == "4":
documentation()
elif choice == "5":
print("Program Quitting")
sys.exit()
else:
print("Please select a valid option (1-5)")
menuHandler()
# For writing events
def getUserFileWriteSession():
print("File Location: ", os.getcwd())
username = input("Enter your username: ")
userFileName = (username + ".txt")
# If directory DNE.
if not os.path.isdir((os.path.join("./", "accounts"))):
# Create it.
os.makedirs("accounts")
if os.path.exists(os.path.join("accounts", userFileName)):
userFile = (os.path.join("accounts", userFileName))
else:
print("No File Exists! Creating New User")
if os.path.exists(os.path.join("accounts", userFileName)):
print("Username exists! Load it or choose different name")
else:
userFile = (os.path.join("accounts", userFileName))
writeFile = open(userFile, "w")
# Have to prime a file ready to be used with JSON
fileSetup = json.dumps([])
writeFile.write(fileSetup)
writeFile.close()
print("User Successfully Created", userFile)
print("Your account has been created: ", userFile)
global userFilePath
userFilePath = userFile
# Used for matplotlib only
def getUserFileReadSession():
userFileName = input("Username:") + ".txt"
if os.path.exists(os.path.join("accounts", userFileName)):
userFile = (os.path.join("accounts", userFileName))
open(userFile, "r")
return "File Loaded Successfully"
else:
print("Username does not exist")
def plotMenu():
print("What would you like to plot?")
print("1. Key Up")
print("2. Key Down")
print("3. Back")
print("4. Quit")
plotMenuHandler()
def plotMenuHandler():
plotChoice = input("Choice: ")
if plotChoice == "1":
timeBetweenUPS()
elif plotChoice == "2":
timeBetweenDOWNS()
elif plotChoice == "3":
menuHandler()
elif plotChoice == "4":
sys.exit()
else:
print("Please Choose Valid Option")
def plotGraph(y):
userInput = ("Enter if you want to plot KeyUp or KeyDowns")
data = y
x = list(range(len(data)))
# Average
average = numpy.mean(data)
# Words Per Minute = (Chr / 5) / Time
wpm = len(data) / 5
# MatPlotLib Handling
plt.title("Time Elapsed Between Down Events")
plt.ylabel("Key Number")
plt.ylabel("Milliseconds")
plt.plot(x, y)
# Format average display box
plt.text(5, 35, ("WPM: ", wpm, "Average", average) ,style='italic',
bbox={'facecolor':'red', 'alpha':0.5, 'pad':10})
plt.show()
def documentation():
print ("The menu works in a way that accepts a corresponding number.")
print ("For example, press 2 to enter information.")
print ("A file must be created or loaded first.")
print ("If not defined, program will exit.")
print ("To end input in option '2'. use ESC character")
print ("Option 3 gives an option to either print out a graph of 'up' or 'down' events")
def userRecordData(eventList):
userFile = userFilePath
#Read File to Grab Sessions
readUserFile = open(userFile, "r")
testFile = readUserFile.read()
#print(testFile)
userSessionList = json.loads(testFile)
readUserFile.close()
# Create New Session and Write To File
writeUserFile = open(userFile, "w")
newUserEventList = eventList
userSessionList.append(newUserEventList)
data = json.dumps(userSessionList)
writeUserFile.write(data)
writeUserFile.close()
def timeBetweenUPS():
# Define the list first
eventFile = open(userFilePath, "r")
eventList = json.loads(eventFile.read())
ups = ([(etype, etime) for etype, etime in eventList[0] if etype == "Up"])
while len(ups) > 1:
#Get the time from the tuple
startTime = ups.pop(0)[1]
betweenTime = ups[0][1] - startTime
time_between_ups.append(betweenTime)
#average = numpy.mean(time_between_downs)
plotGraph(time_between_ups)
def timeBetweenDOWNS():
# Define the list first
eventFile = open(userFilePath, "r")
eventList = json.loads(eventFile.read())
downs = ([(etype, etime) for etype, etime in eventList[0] if etype == "Down"])
while len(downs) > 1:
startTime = downs.pop(0)[1] #Get the time from the tuple
betweenTime = downs[0][1] - startTime
time_between_downs.append(betweenTime)
#average = numpy.mean(time_between_downs)
plotGraph(time_between_downs)
def usernamePasswordInput():
keyLogger = KeyLogger()
hookManager = pyHook.HookManager()
hookManager.KeyDown = keyLogger.keyDownEvent
hookManager.KeyUp = keyLogger.keyUpEvent
hookManager.HookKeyboard()
keyLogger.mainLoop()
# Unhooks the keyboard, no more data recorded, returns to menu
hookManager.UnhookKeyboard()
class KeyLogger(object):
def __init__(self):
self.enterPressed = False
self.eventList = []
def keyDownEvent(self, event):
self.storeEvent("Down", event)
return True
# Fixes Requires Integer Bug (Got Nonetype)
def keyUpEvent(self, event):
self.storeEvent("Up", event)
return True
# Fixes Requires Integer (Got Nonetype)
def mainLoop(self):
while not self.enterPressed:
pythoncom.PumpWaitingMessages()
def storeEvent(self, activity, event):
keystrokeTime = int(event.Time)
#keystrokeCharacter = chr(event.Ascii)
self.eventList.append ((activity, int(keystrokeTime)))
# Chosen to use Escape key (ESC) due to input using a similar method
# Enter Key - KeyCode: 13 Ascii: 13 ScanCode: 28 - ESC = 27 @ Ascii
if event.Ascii == 27:
self.enterPressed = True
userRecordData(self.eventList)
# Starts the program
banner()
#Main Program Loop
while True:
menuOptions()
menuHandler()
| mit |
theoryno3/scikit-learn | examples/cluster/plot_dbscan.py | 346 | 2479 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
I2Cvb/prostate | scratch/slic_segmentation.py | 1 | 1330 | from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import maskslic as seg
from protoclass.data_management import T2WModality
from protoclass.data_management import GTModality
path_t2w = '/data/prostate/experiments/Patient 1036/T2W'
path_gt = ['/data/prostate/experiments/Patient 1036/GT_inv/prostate']
label_gt = ['prostate']
# Read the data
t2w_mod = T2WModality()
t2w_mod.read_data_from_path(path_t2w)
print t2w_mod.data_.shape
# Extract the information about the spacing
spacing_itk = t2w_mod.metadata_['spacing']
# Remember that this is in ITK format X, Y, Z and that we are in Y, X, Z
spacing = (spacing_itk[1], spacing_itk[0], spacing_itk[2])
print spacing
# Read the ground-truth
gt_mod = GTModality()
gt_mod.read_data_from_path(label_gt, path_gt)
print gt_mod.data_[0].shape
img = t2w_mod.data_[:, :, 32]
img = (img - np.min(img)) * ((1.) / (np.max(img) - np.min(img)))
roi = gt_mod.data_[0, :, :, 32].astype(bool)
# Make SLIC over-segmentation
segments = seg.slic(img, compactness=1,
seed_type='nplace',
multichannel=False,
convert2lab=False,
enforce_connectivity=True,
mask=roi, n_segments=50,
recompute_seeds=True,
plot_examples=True)
| mit |
pratapvardhan/scikit-learn | sklearn/gaussian_process/gaussian_process.py | 12 | 34972 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
from ..utils import deprecated
MACHINE_EPSILON = np.finfo(np.double).eps
@deprecated("l1_cross_distances is deprecated and will be removed in 0.20.")
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
@deprecated("GaussianProcess is deprecated and will be removed in 0.20. "
"Use the GaussianProcessRegressor instead.")
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The legacy Gaussian Process model class.
Note that this class is deprecated and will be removed in 0.20.
Use the GaussianProcessRegressor instead.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://imedea.uib-csic.es/master/cambioglobal/Modulo_V_cod101615/Lab/lab_maps/krigging/DACE-krigingsoft/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given attributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given attributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = np.atleast_2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = np.atleast_2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = np.atleast_2d(self.thetaL)
self.thetaU = np.atleast_2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| bsd-3-clause |
paloczy/ap_tools | fit.py | 1 | 6250 | # Description: Functions to fit statistical models to data series.
# Author: André Palóczy
# E-mail: paloczy@gmail.com
# Description: Functions to fit statistical models to data series.
__all__ = ['sinfitn',
'fourfilt']
import numpy as np
import matplotlib.pyplot as plt
from numpy.fft import fft, ifft
def sinfitn(x, t, periods, constant=True, line=True, return_misfit=False, return_params=False):
"""
USAGE
-----
xm = sinfitn(d, t, periods, constant=True, line=True, return_misfit=False, return_params=False)
Returns a statistical model 'xm(t)' with M periodic components. The period of each sinusoid
is specified in the array 'period'. The model parameters are obtained by solving the
overdetermined least-squares problem that minimizes the model misfit as measured by
the Euclidean norm L2 = ||Gm - d||^2:
m = (G^{T}*G)^{-1} * G^{T}*d,
where 'd(t)'' is the data vector (N x 1) specified at the coordinates 't', m is the model
parameter vector and G is the data kernel matrix.
If 'constant'=True (default), a constant term is added to the model. If 'line'=True (default),
a linear term is added to the model.
If return_misfit=True (defaults to False), the model misfit L2 is also returned.
If 'return_params'=True (defaults to False), the model parameter vector 'm' is returned
instead of the statistical model 'xm'.
REFERENCES
----------
Strang, G.: Introduction to Linear Algebra. 4th edition (2009).
EXAMPLE
-------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from ap_tools.fit import sinfitn
>>> t = np.linspace(0., 200., 300.)
>>> periods = [25., 50., 100.]
>>> f1, f2, f3 = 2*np.pi/np.array(periods)
>>> x = 50. + 0.8*t + 12.5*sin(f1*t) + 10*sin(f2*t) + 30*sin(f3*t) + 5.0*np.random.randn(t.size)
>>> xm = sinfitn(x, t, periods)
>>> fig, ax = plt.subplots()
>>> ax.plot(t, x, 'g', label='Data')
>>> ax.plot(t, xm, 'm', label='Sinusoidal model')
>>> ax.grid(True)
>>> ax.legend(loc='upper left')
>>> ax.set_xlabel('Time [s]', fontsize=20)
>>> ax.set_ylabel('Signal [arbitrary units]', fontsize=20)
>>> plt.show()
"""
d = np.matrix(x)
periods = list(periods)
N = d.size # Number of data points.
M = len(periods) # Number of model parameters.
## The data must be a row vector.
if d.shape==(1,N):
d = d.T
## Setting up the data kernel matrix G.
## Contains model functions evaluated at data points.
G = np.matrix(np.zeros((N, 0)))
if constant:
x0 = np.expand_dims(np.repeat(1., N), 1)
G = np.concatenate((G,x0), axis=1)
M = M + 1
if line:
x1 = np.expand_dims(t, 1)
G = np.concatenate((G,x1), axis=1)
M = M + 1
for period in periods:
fn = 2*np.pi/period
xn = np.matrix([np.sin(fn*t), np.cos(fn*t)]).T
G = np.concatenate((G,xn), axis=1)
## Solution to the overdetermined least-squares problem
## to obtain the model parameters that minimize the
## L2 norm of the model misfit vector, ||Gm - d||^2.
## e.g., Strang (2009), pg. 218.
m = (G.T*G).I*G.T*d
## Assemble the statistical model using the parameters in the vector m.
xm = G*m
## Compute the L2 norm, ||Gm - d||^2.
err = xm - d
L2 = np.sqrt(err.T*err)
L2 = np.float(L2)
print("")
print("Model-data misfit: %.1f"%L2)
if return_params:
if return_misfit:
return m, L2
else:
return m
else:
xm = np.array(xm).squeeze()
if return_misfit:
return xm, L2
else:
return xm
def fourfilt(x, dts, Tmax, Tmin):
# See the docstring of the original MATLAB function.
# The usage is the same, see the docstring for
# low-pass, high-pass and band-pass filter examples.
#
# In the Python version, the variable names
# have been changed as follows:
#
# MATLAB Python
# delt -> dts
# tmax -> Tmax
# tmin -> Tmin
# filtdat -> xfilt
#
# Translated from the MATLAB function fourfilt.m
# by André Palóczy (github.com/apaloczy).
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# function [filtdat]=fourfilt(x,delt,tmax,tmin)
# FOURFILT Fourier low, high, or bandpass filter.
#
# [filtdat]=fourfilt(x,delt,tmax,tmin)
#
# where: x: data series to be filtered
# delt: sampling interval
# tmax: maximum period filter cutoff
# tmin: minimum period filter cutoff
#
# usage: lowpassdata=fourfilt(data,0.5,2000,20)
#
# gives lowpass filter with cutoff at 20.0 sec
# tmax set > (length(x)*delt) for no cutoff at low freq end
#
# usage: highpassdata=fourfilt(x,0.5,20,0.9)
#
# gives highpass filter with cutoff at 20.0 sec
# tmin set < (2*delt) for no cutoff at high freq end
#
# usage: bandpassdata=fourfilt(x,0.5,20,10)
#
# gives bandpass filter passing 10-20 sec. band
#
# Reference:
# Walters, R. A. and Heston, C., 1982. Removing the tidal-period variations from time-series
# data using low-pass digital filters. Journal of Physical Oeanography, 12 112-115 .
#
#############################
# Version 1.0 (12/4/96) Jeff List (jlist@usgs.gov)
# Version 1.1 (1/8/97) Rich Signell (rsignell@usgs.gov)
# removed argument for number of points and add error trapping for matrices
# Version 1.1b (12/1/2005) Rich Signell (rsignell@usgs.gov)
# added reference
# (3/18/2019)
# Translated to Python by André Palóczy.
#############################
npts = x.size
# if npts%2==0: # N is even.
nby2 = npts//2
# else:
# nby2 = (npts-1)//2
tfund = npts*dts
ffund = 1.0/tfund
xmean = x.mean()
x -= xmean # Remove the mean from data.
coeffs = fft(x) # Fourier-transform data.
# Filter coefficients.
f = ffund
for i in range(1, nby2+2):
t = 1.0/f
if np.logical_or(t>Tmax, t<Tmin):
coeffs[i] = coeffs[i]*0
f += ffund
# Calculate the remaining coefficients.
for i in range(1, nby2+1):
coeffs[npts-i] = coeffs[i].conj()
# Back-transform data and take real part.
xfilt = ifft(coeffs).real
xfilt += xmean # Add back the mean.
return xfilt
| mit |
mhdella/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 254 | 2253 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
joshloyal/scikit-learn | sklearn/utils/random.py | 46 | 10523 | # Author: Hamzeh Alsalhi <ha258@cornell.edu>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribution over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if p is not None:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if p is not None:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if p is not None:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
| bsd-3-clause |
aestrivex/PySurfer | examples/plot_label.py | 1 | 1525 | """
Display ROI Labels
==================
Using PySurfer you can plot Freesurfer cortical labels on the surface
with a large amount of control over the visual representation.
"""
print __doc__
import os
from surfer import Brain
subject_id = "fsaverage"
hemi = "lh"
surf = "smoothwm"
brain = Brain(subject_id, hemi, surf)
# If the label lives in the normal place in the subjects directory,
# you can plot it by just using the name
brain.add_label("BA1")
# Some labels have an associated scalar value at each ID in the label.
# For example, they may be probabilistically defined. You can threshold
# what vertices show up in the label using this scalar data
brain.add_label("BA1", color="blue", scalar_thresh=.5)
# Or you can give a path to a label in an arbitrary location
subj_dir = os.environ["SUBJECTS_DIR"]
label_file = os.path.join(subj_dir, subject_id,
"label", "%s.MT.label" % hemi)
brain.add_label(label_file)
# By default the label is 'filled-in', but you can
# plot just the label boundaries
brain.add_label("BA44", borders=True)
# You can also control the opacity of the label color
brain.add_label("BA6", alpha=.7)
# Finally, you can plot the label in any color you want.
brain.show_view(dict(azimuth=-42, elevation=105, distance=225,
focalpoint=[-30, -20, 15]))
# Use any valid matplotlib color.
brain.add_label("V1", color="steelblue", alpha=.6)
brain.add_label("V2", color="#FF6347", alpha=.6)
brain.add_label("entorhinal", color=(.2, 1, .5), alpha=.6)
| bsd-3-clause |
fspaolo/scikit-learn | examples/cluster/plot_lena_segmentation.py | 8 | 2421 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import pylab as pl
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
pl.figure(figsize=(5, 5))
pl.imshow(lena, cmap=pl.cm.gray)
for l in range(N_REGIONS):
pl.contour(labels == l, contours=1,
colors=[pl.cm.spectral(l / float(N_REGIONS)), ])
pl.xticks(())
pl.yticks(())
pl.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
pl.show()
| bsd-3-clause |
lemming52/white_knight | pendulum/normal_modes.py | 1 | 2535 | # External Packages
import numpy as np
import matplotlib.pyplot as plt
# Custom Packages
import tools
def displacements(solutions, label, config):
t = solutions[:, 0]
theta1 = solutions[:, 1]
theta2 = solutions[:, 2]
sim_time = config['sim_time']
plt.figure()
plt.plot(t, theta1, label=r'$\theta_1$')
plt.plot(t, theta2, label=r'$\theta_2$')
plt.xlim([0, sim_time/6]) # Arbitrary scaling
plt.legend()
plt.xlabel('t')
plt.ylabel(r'$\theta$ / rad')
plt.title('Angular Displacement against time for both Masses : %s' % label)
plt.savefig('angular_displacements_%s.png' % label)
def frequencies(solutions, label, config):
t = solutions[:, 0]
theta1 = solutions[:, 1]
theta2 = solutions[:, 2]
# Generate the power specctrum of the displacements
freq_1 = np.power(abs(np.fft.fft(theta1)), 2)
freq_2 = np.power(abs(np.fft.fft(theta2)), 2)
timestep = t[1] - t[0]
freq = 2*np.pi*np.fft.fftfreq(freq_1.size, d=timestep) # Convert to rads
coeff = config['g']/config['L1']
# Calculated predicted frequencies
mode_freq1 = ((2 + config['initial'][1]/config['initial'][0])*coeff)**(1/2)
mode_freq2 = ((2 - config['initial'][1]/config['initial'][0])*coeff)**(1/2)
f, axarr = plt.subplots(2)
axarr[0].plot(freq, freq_1, label=r'$\theta_1$')
axarr[0].set_ylabel(r'w$^2$ / (rad/s)$^2$')
axarr[0].set_xlim([0, 8])
axarr[1].plot(freq, freq_2, label=r'$\theta_2$')
axarr[1].set_ylabel(r'w$^2$ / (rad/s)$^2$')
axarr[1].set_xlim([0, 8])
axarr[1].set_xlabel('w / rad/s')
axarr[0].set_title('Power Spectrum for pendulum motion - %s' % label)
# Add vertical lines at predicted peak values.
axarr[0].axvline(mode_freq1, 0, np.amax(freq_1), color='r', label='Prediction 1')
axarr[1].axvline(mode_freq1, 0, np.amax(freq_2), color='r', label='Prediction 1')
axarr[0].axvline(mode_freq2, 0, np.amax(freq_1), color='g', label='Prediction 2')
axarr[1].axvline(mode_freq2, 0, np.amax(freq_2), color='g', label='Prediction 2')
axarr[0].legend(loc='upper center')
axarr[1].legend(loc='upper center')
f.savefig("angular_displacements_freq_%s.png" % label)
def main():
label = input('Enter config file label (e.g. core): ')
config = tools.load_config(label)
solutions = np.loadtxt('data/double_solutions_%s.txt' % label)
displacements(solutions, label, config)
frequencies(solutions, label, config)
main()
| mit |
hantek/pylearn2 | pylearn2/scripts/browse_conv_weights.py | 44 | 7605 | #! /usr/bin/env python
"""
Interactive viewer for the convolutional weights in a pickled model.
Unlike ./show_weights, this shows one unit's weights at a time. This
allows it to display weights from higher levels (which can have 100s
of input channels), not just the first.
"""
import os
import sys
import warnings
import argparse
import numpy
from pylearn2.models.mlp import MLP, ConvElemwise, CompositeLayer
from pylearn2.models.maxout import MaxoutConvC01B
from pylearn2.utils import safe_zip, serial
from pylearn2.space import Conv2DSpace
try:
from matplotlib import pyplot
except ImportError as import_error:
warnings.warn("Can't use this script without matplotlib.")
pyplot = None
def _parse_args():
parser = argparse.ArgumentParser(
description=("Interactive browser of convolutional weights. "
"Up/down keys switch layers. "
"Left/right keys switch units."))
parser.add_argument('-i',
'--input',
required=True,
help=".pkl file of model")
result = parser.parse_args()
if os.path.splitext(result.input)[1] != '.pkl':
print("Expected --input to end in .pkl, got %s." % result.input)
sys.exit(1)
return result
def _get_conv_layers(layer, result=None):
'''
Returns a list of the convolutional layers in a model.
Returns
-------
rval: list
Lists the convolutional layers (ConvElemwise, MaxoutConvC01B).
'''
if result is None:
result = []
if isinstance(layer, (MLP, CompositeLayer)):
for sub_layer in layer.layers:
_get_conv_layers(sub_layer, result)
elif isinstance(layer, (MaxoutConvC01B, ConvElemwise)):
result.append(layer)
return result
def _get_conv_weights_bc01(layer):
'''
Returns a conv. layer's weights in BC01 format.
Parameters
----------
layer: MaxoutConvC01B or ConvElemwise
Returns
-------
rval: numpy.ndarray
The kernel weights in BC01 axis order. (B: output channels, C: input
channels)
'''
assert isinstance(layer, (MaxoutConvC01B, ConvElemwise))
weights = layer.get_params()[0].get_value()
if isinstance(layer, MaxoutConvC01B):
c01b = Conv2DSpace(shape=weights.shape[1:3],
num_channels=weights.shape[0],
axes=('c', 0, 1, 'b'))
bc01 = Conv2DSpace(shape=c01b.shape,
num_channels=c01b.num_channels,
axes=('b', 'c', 0, 1))
weights = c01b.np_format_as(weights, bc01)
elif isinstance(layer, ConvElemwise):
weights = weights[:, :, ::-1, ::-1] # reverse 0, 1 axes
return weights
def _num_conv_units(conv_layer):
'''
Returns a conv layer's number of output channels.
'''
assert isinstance(conv_layer, (MaxoutConvC01B, ConvElemwise))
weights = conv_layer.get_params()[0].get_value()
if isinstance(conv_layer, MaxoutConvC01B):
return weights.shape[-1]
elif isinstance(conv_layer, ConvElemwise):
return weights.shape[0]
def main():
"Entry point of script."
args = _parse_args()
model = serial.load(args.input)
if not isinstance(model, MLP):
print("Expected the .pkl file to contain an MLP, got a %s." %
str(model.type))
sys.exit(1)
def get_figure_and_axes(conv_layers, window_width=800):
kernel_display_width = 20
margin = 5
grid_square_width = kernel_display_width + margin
num_columns = window_width // grid_square_width
max_num_channels = numpy.max([layer.get_input_space().num_channels
for layer in conv_layers])
# pdb.set_trace()
num_rows = max_num_channels // num_columns
if num_rows * num_columns < max_num_channels:
num_rows += 1
assert num_rows * num_columns >= max_num_channels
window_width = 15
# '* 1.8' comse from the fact that rows take up about 1.8 times as much
# space as columns, due to the title text.
window_height = window_width * ((num_rows * 1.8) / num_columns)
figure, all_axes = pyplot.subplots(num_rows,
num_columns,
squeeze=False,
figsize=(window_width,
window_height))
for unit_index, axes in enumerate(all_axes.flat):
subplot_title = axes.set_title('%d' % unit_index)
subplot_title.set_size(8)
subplot_title.set_color((.3, .3, .3))
# Hides tickmarks
for axes_row in all_axes:
for axes in axes_row:
axes.get_xaxis().set_visible(False)
axes.get_yaxis().set_visible(False)
return figure, all_axes
conv_layers = _get_conv_layers(model)
figure, all_axes = get_figure_and_axes(conv_layers)
title_text = figure.suptitle("title")
pyplot.tight_layout(h_pad=.1, w_pad=.5) # in inches
layer_index = numpy.array(0)
unit_indices = numpy.zeros(len(model.layers), dtype=int)
def redraw():
'''
Draws the currently selected convolutional kernel.
'''
axes_list = all_axes.flatten()
layer = conv_layers[layer_index]
unit_index = unit_indices[layer_index, ...]
weights = _get_conv_weights_bc01(layer)[unit_index, ...]
active_axes = axes_list[:weights.shape[0]]
for axes, weights in safe_zip(active_axes, weights):
axes.set_visible(True)
axes.imshow(weights, cmap='gray', interpolation='nearest')
assert len(frozenset(active_axes)) == len(active_axes)
unused_axes = axes_list[len(active_axes):]
assert len(frozenset(unused_axes)) == len(unused_axes)
assert len(axes_list) == len(active_axes) + len(unused_axes)
for axes in unused_axes:
axes.set_visible(False)
title_text.set_text("Layer %s, unit %d" %
(layer.layer_name,
unit_indices[layer_index]))
figure.canvas.draw()
def on_key_press(event):
"Callback for key press events"
def increment(index, size, step):
"""
Increments an index in-place.
Parameters
----------
index: numpy.ndarray
scalar (0-dim array) of dtype=int. Non-negative.
size: int
One more than the maximum permissible index.
step: int
-1, 0, or 1.
"""
assert index >= 0
assert step in (0, -1, 1)
index[...] = (index + size + step) % size
if event.key in ('up', 'down'):
increment(layer_index,
len(conv_layers),
1 if event.key == 'up' else -1)
unit_index = unit_indices[layer_index]
redraw()
elif event.key in ('right', 'left'):
unit_index = unit_indices[layer_index:layer_index + 1]
increment(unit_index,
_num_conv_units(conv_layers[layer_index]),
1 if event.key == 'right' else -1)
redraw()
elif event.key == 'q':
sys.exit(0)
figure.canvas.mpl_connect('key_press_event', on_key_press)
redraw()
pyplot.show()
if __name__ == '__main__':
main()
| bsd-3-clause |
ekostat/ekostat_calculator | core/mapping.py | 1 | 66417 | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 20 14:57:56 2017
@author: a002028
"""
import pandas as pd
import numpy as np
import core
import os, sys
import uuid
import re
import codecs
import core.exceptions as exceptions
#if current_path not in sys.path:
# sys.path.append(os.path.dirname(os.path.realpath(__file__)))
"""
#==============================================================================
#==============================================================================
"""
class IndSetHomPar(dict):
"""
Created 20180612 by Magnus Wenzer
"""
def __init__(self, file_path):
self.file_path = file_path
self.load_file()
def load_file(self):
with codecs.open(self.file_path) as fid:
for line in fid:
line = line.strip()
if line:
indicator, par = [item.strip() for item in line.split('\t')]
if not self.get(indicator):
self[indicator] = []
self[indicator].append(par)
"""
#==============================================================================
#==============================================================================
"""
class SimpleList(list):
"""
Created 20180616 by Magnus Wenzer
"""
def __init__(self, file_path):
self.file_path = file_path
self.load_file()
def load_file(self, **kwargs):
"""
Updated 20180721 by Magnus Wenzer
"""
with codecs.open(self.file_path, **kwargs) as fid:
for line in fid:
line = line.strip()
if line:
self.append(line)
"""
#==============================================================================
#==============================================================================
"""
class SharkwebSettings(dict):
"""
Created 20180824 by Magnus Wenzer
"""
def __init__(self, file_path, **kwargs):
self.file_path = file_path
read_options = {'encoding': 'cp1252'}
with codecs.open(self.file_path, **read_options) as fid:
for line in fid:
if line.startswith('#'):
continue
key, value = [item.strip() for item in line.split('\t')]
if not value:
value = False
elif ';' in value:
value = [item.strip() for item in value.split(';')]
else:
try:
value = int(value)
except:
pass
self[key] = value
"""
#==============================================================================
#==============================================================================
"""
class MappingObject(list):
"""
Created 20180721 by Magnus Wenzer
Updated 20180831 by Magnus
"""
def __init__(self, file_path, from_column=None, to_column=None, **kwargs):
self.file_path = file_path
self.from_column = from_column
self.to_column = to_column
read_options = {'sep': '\t',
'encoding': 'cp1252'}
read_options.update(kwargs)
self.df = pd.read_csv(self.file_path, **read_options)
#==========================================================================
def get_mapping(self, item=None, from_column=None, to_column=None):
"""
Created 20180721 by Magnus Wenzer
"""
if not from_column:
from_column = self.from_column
if not to_column:
to_column = self.to_column
result = self.df.loc[self.df[from_column]==item, to_column]
if len(result):
return result.values[0]
return item
#==========================================================================
def get_list(self, key):
return list(self.df[key].values)
"""
#==============================================================================
#==============================================================================
"""
class AttributeDict(dict):
"""
Base class for attribute dictionary.
"""
def __init__(self):
super().__init__()
#==========================================================================
def _add_arrays_to_entries(self, **entries):
"""
Updated 20180912 by Lena. Added list(set(array)) to remove duplicates in array
"""
for key, array in entries.items():
array_set = list(set(array))
# array = [v for v in array if v] # if you are using '' as nan value
# array = array[np.logical_and(array!='', ~pd.isnull(array))]
if len(array_set)==1:
array_set = array_set[0]
setattr(self, key, array_set)
#==========================================================================
def add_corresponding_arrays(self, df=None, first_key=u'',
second_key=u'', match_key=u''):
"""
Ex. Add arrays of all water bodies within a specific type area (key)
"""
for value in df[first_key].unique():
array = self._get_array_from_df(df=df,
key_a=match_key,
key_b=first_key,
match=value)
setattr(self, value, sorted(array))
if second_key:
df['temp_key'] = np.array([a+b for a,b in zip(df[first_key].values,
df[second_key].values)])
for value in np.unique(df['temp_key']):
if value not in self:
array = self._get_array_from_df(df=df,
key_a=match_key,
key_b='temp_key',
match=value)
setattr(self, value, sorted(array))
#==========================================================================
def add_entries(self, **entries):
"""
Turns elements in arrays into attributes with a corresponding official
field name
"""
for key, array in entries.items():
setattr(self, key, key)
setattr(self, key.lower(), key)
for value in array.values:
if not pd.isnull(value):
setattr(self, value.lower(), key)
#==========================================================================
def add_info_dict(self, df=None, first_key=u'', key_list=[]):
"""
Adds values from "first_key"-array to attribute with a corresponding
dictionary of values from key_list-arrays
"""
for i, value in enumerate(df[first_key].values):
setattr(self, value.strip(), {key: df[key][i] for key in key_list})
#==========================================================================
def keys(self):
return list(self.__dict__.keys())
#==========================================================================
def get(self, key):
"""
Updated 20180613 by Lena Viktorsson
"""
if key.lower() in self.keys():
return getattr(self, key.lower())
if key in self.keys():
return getattr(self, key)
if 'SE' + key in self.keys():
return getattr(self, 'SE' + key)
return None
# try:
# return getattr(self, key.lower())
# except:
# try:
# return getattr(self, key)
# except:
# return getattr(self, 'SE' + key)
#==========================================================================
def _get_array_from_df(self, df=None, key_a=u'', key_b=u'', match=None):
# print(type(df[key_a]), type(df[key_a][0]))
# return df[key_a].loc[df[key_b].isin([match])].values.str.strip()
return [x.strip() for x in df[key_a].loc[df[key_b].isin([match])].values]
# return [x.strip() for x in df[key_a].iloc[np.where(df[key_b]==match)].values]
#==========================================================================
def get_list(self, key_list):
return list(self.get(key) for key in key_list)
#==========================================================================
def get_mapping_dict(self, key_list):
return dict(list((key, self.get(key)) for key in key_list))
#==========================================================================
def __getitem__(self, key):
return getattr(self, key)
#==========================================================================
"""
#==============================================================================
#==============================================================================
"""
class Hypsograph():
"""
Created 20180320 by Magnus Wenzer
"""
def __init__(self, file_path):
self.file_path = file_path
self.wb_par = 'EUCD'
self.depth_par = 'Djup'
self.area_par = 'Sum_Area(km2)'
self.volume_par = 'Volym'
self.acc_volume_par = 'acc_volume'
self.frac_volume_par = 'frac_volume'
self.frac_area_par = 'frac_area'
self._load_data()
self._add_columns()
#==========================================================================
def _load_data(self):
dtype = {self.depth_par: int,
self.area_par: float,
self.volume_par: float}
self.df = pd.read_csv(self.file_path, sep='\t', encoding='cp1252', dtype=dtype)
self.water_body_list = sorted(set(self.df[self.wb_par]))
self.wb_df = {}
for wb in self.water_body_list:
self.wb_df[wb] = self.df.loc[self.df[self.wb_par]==wb, :].copy()
#==========================================================================
def _add_columns(self):
for wb in self.water_body_list:
# Accumulated volume
self.wb_df[wb][self.acc_volume_par] = self.wb_df[wb].loc[::-1, self.volume_par].cumsum()[::-1]
# Volume fraction
self.wb_df[wb][self.frac_volume_par] = self.wb_df[wb][self.acc_volume_par]/self.wb_df[wb][self.acc_volume_par].values[0]
# Area fraction
self.wb_df[wb][self.frac_area_par] = self.wb_df[wb][self.area_par]/self.wb_df[wb][self.area_par].values[0]
#==========================================================================
def get_max_depth_of_water_body(self, water_body):
if water_body in self.wb_df.keys():
result = self.wb_df[water_body][self.depth_par].max()
return result
else:
return False
# if len(result):
# return result.values[0]
# else:
# return False
#==========================================================================
def get_total_area_of_water_body(self, water_body):
if water_body in self.wb_df.keys():
result = self.wb_df[water_body].loc[self.wb_df[water_body][self.depth_par]==0, self.area_par]
if len(result):
return result.values[0]
else:
return False
else:
return False
#==========================================================================
def get_volume_below_depth(self, water_body=None, depth=None):
if water_body in self.wb_df.keys():
result = self.wb_df[water_body].loc[self.wb_df[water_body][self.depth_par]==depth, self.volume_par]
if len(result):
return result.values[0]
else:
return False
else:
return False
#==========================================================================
def get_volume_fraction_below_depth(self, water_body=None, depth=None):
if water_body in self.wb_df.keys():
result = self.wb_df[water_body].loc[self.wb_df[water_body][self.depth_par]==depth, self.frac_volume_par]
if len(result):
return result.values[0]
else:
return False
else:
return False
#==========================================================================
def get_area_fraction_at_depth(self, water_body=None, depth=None):
if water_body in self.wb_df.keys():
result = self.wb_df[water_body].loc[self.wb_df[water_body][self.depth_par]==depth, self.frac_area_par]
if len(result):
return result.values[0]
else:
return False
else:
return False
"""
#==============================================================================
#==============================================================================
"""
class ParameterMapping(AttributeDict):
"""
Load file to map data fields and parameters to a standard setting format
"""
def __init__(self):
super().__init__()
#==========================================================================
def load_mapping_settings(self, file_path=u'',sep='\t',encoding='cp1252'):
""" Reading csv/txt files """
self.mapping_file = core.Load().load_txt(file_path, sep=sep,
encoding=encoding,
fill_nan=u'')
self.add_entries(**self.mapping_file)
#==========================================================================
def map_parameter_list(self, para_list, ext_list=False):
return self.get_list(para_list)
#==========================================================================
def get_parameter_mapping(self, para_list, ext_list=False):
return self.get_mapping_dict(para_list)
#==========================================================================
def get_mapping(self, item=None, from_column=None, to_column=None, **kwargs):
"""
Created 20180222 by Magnus Wenzer
Updated 20180222 by Magnus Wenzer
"""
result = self.mapping_file.loc[self.mapping_file[from_column]==item, to_column]
if len(result):
return result.values[0]
if kwargs.get('return_blank_if_not_found'):
return ''
return item
"""
#==============================================================================
#==============================================================================
"""
class WaterBody(AttributeDict):
"""
Object to hold information on water bodies and type areas
- get various info for each water body
- get list on different water bodies within a specific type area
"""
def __init__(self, **kwargs):
"""
Created ???????? by Johannes Johansson
Updated 20180320 by Magnus Wenzer
"""
super().__init__()
self.column_name = {}
self.column_name['water_body'] = {'internal': 'MS_CD',
'display': 'WATERBODY_NAME'}
self.column_name['type_area'] = {'internal': 'TYPE_AREA_CODE',
'display': 'TYPE_AREA_NAME'}
self.column_name['water_district'] = {'internal': 'WATER_DISTRICT_CODE',
'display': 'WATER_DISTRICT_NAME'}
#TODO Add Parametermapping for water body names
#TODO Map against .lower() letters
if kwargs:
self.load_water_body_match(**kwargs)
#==========================================================================
def _add_type_area_no_and_suffix(self):
def get_numer(string):
match = re.findall(pattern='\d+', string=string)
if match:
return match[0]
else:
return ''
def get_suffix(string):
match = re.findall(pattern='\D+', string=string)
if match:
return match[0]
else:
return ''
self.water_bodies['TYPE_AREA_NO'] = self.water_bodies['TYPE_AREA_CODE'].apply(get_numer)
self.water_bodies['TYPE_AREA_SUFFIX'] = self.water_bodies['TYPE_AREA_CODE'].apply(get_suffix)
#==========================================================================
def load_water_body_match(self, file_path=u'', sep='\t', encoding='cp1252'):
"""
Created ???????? by Johannes Johansson
Updated 20180320 by Magnus Wenzer
"""
self.water_bodies = core.Load().load_txt(file_path, sep=sep,
encoding=encoding,
fill_nan=u'')
self._add_type_area_no_and_suffix()
key_list = list(self.water_bodies.keys())
key_list.remove(self.column_name['water_body']['internal'])
self.add_info_dict(df=self.water_bodies,
first_key=self.column_name['water_body']['internal'],
key_list=key_list)
self.add_corresponding_arrays(df=self.water_bodies,
first_key=u'TYPE_AREA_NO',
second_key=u'TYPE_AREA_SUFFIX',
match_key=self.column_name['water_body']['internal'])
#==========================================================================
def get_water_bodies_in_type_area(self, type_area):
return self.get(type_area)
#==========================================================================
def get_display_name(self, **kwargs):
"""
Created 20180315 by Magnus Wenzer
Updated 20180315 by Magnus Wenzer
kwargs examples:
water_body='SE633710-200500'
type_area='1n'
"""
area = list(kwargs.keys())[0]
value = kwargs[area]
result = self.water_bodies.loc[self.water_bodies[self.column_name[area]['internal']]==value, self.column_name[area]['display']]
if len(result):
return result.values[0]
return False
#==========================================================================
def get_internal_name(self, **kwargs):
"""
Created 20180315 by Magnus Wenzer
Updated 20180315 by Magnus Wenzer
"""
area = list(kwargs.keys())[0]
value = kwargs[area]
result = self.water_bodies.loc[self.water_bodies[self.column_name[area]['display']]==value, self.column_name[area]['internal']]
if len(result):
return result.values[0]
return False
#==========================================================================
def get_list(self, area_level, **kwargs):
"""
Created 20180315 by Magnus Wenzer
Updated 20180315 by Magnus Wenzer
"""
if kwargs:
area = list(kwargs.keys())[0]
value = kwargs[area]
if type(value) != list:
# print(type(value), value)
value = [value]
result = self.water_bodies.loc[self.water_bodies[self.column_name[area]['internal']].isin(value), self.column_name[area_level]['internal']]
if len(result):
return sorted(set(result.values))
return []
else:
return sorted(set(self.water_bodies[self.column_name[area_level]['internal']]))
#==========================================================================
def get_mapping(self, item, from_col, to_col):
"""
Created 20180917 by Magnus Wenzer
"""
result = self.water_bodies.loc[self.water_bodies[from_col]==item, to_col]
if len(result):
return result.values[0]
return item
#==========================================================================
def get_url(self, water_body):
result = self.water_bodies.loc[self.water_bodies[self.column_name['water_body']['internal']]==water_body, 'URL_VISS']
if len(result):
return result.values[0]
return False
# #==========================================================================
# def get_type_area_list(self, water_district=None, water_body=None):
# """
# Created 20180222 by Magnus Wenzer
# Updated 20180315 by Magnus Wenzer
#
# Returns a list with type areas.
# """
# if water_district:
# if type(water_district) != list:
# water_district = [water_district]
# return sorted(set(self.water_bodies.loc[self.water_bodies[self.column_name['water_district']['internal']].isin(water_district), self.column_name['type_area']['internal']].values))
# elif water_body:
# if type(water_body) != list:
# water_body = [water_body]
# return sorted(set(self.water_bodies.loc[self.water_bodies[self.column_name['water_body']['internal']].isin(water_body), self.column_name['type_area']['internal']].values))
# else:
# return sorted(set(self.water_bodies[self.column_name['type_area']['internal']]))
#
#
# #==========================================================================
# def get_water_body_list(self, type_area=None, water_district=None):
# """
# Created 20180222 by Magnus Wenzer
# Updated 20180315 by Magnus Wenzer
#
# Returns a list with water bodies.
# """
# if type_area:
# if type(type_area) != list:
# type_area = [type_area]
# return sorted(set(self.water_bodies.loc[self.water_bodies[self.column_name['type_area']['internal']].isin(type_area), self.column_name['water_body']['internal']].values))
# elif water_district:
# if type(water_district) != list:
# water_district = [water_district]
# return sorted(set(self.water_bodies.loc[self.water_bodies[self.column_name['water_district']['internal']].isin(water_district), self.column_name['water_body']['internal']].values))
# else:
# return sorted(set(self.water_bodies[self.column_name['water_body']['internal']]))
#
# #==========================================================================
# def get_water_district_list(self, type_area=None, water_body=None):
# """
# Created 20180315 by Magnus Wenzer
# Updated 20180315 by Magnus Wenzer
#
# Returns a list with water districts.
# """
# if type_area:
# if type(type_area) != list:
# type_area = [type_area]
# return sorted(set(self.water_bodies.loc[self.water_bodies[self.column_name['type_area']['internal']].isin(type_area), self.column_name['water_distric']['internal']].values))
# elif water_body:
# if type(water_body) != list:
# water_body = [water_body]
# return sorted(set(self.water_bodies.loc[self.water_bodies[self.column_name['water_body']['internal']].isin(water_body), self.column_name['water_distric']['internal']].values))
# else:
# return sorted(set(self.water_bodies[self.column_name['water_distric']['internal']]))
#==========================================================================
def get_type_area_for_water_body(self, wb, include_suffix=False,
key_number=u'TYPE_AREA_NO',
key_suffix=u'TYPE_AREA_SUFFIX'):
"""
Updated 20180613 by Lena Viktorsson
"""
if self.get(wb) == None:
return None
if include_suffix:
string = self.get(wb).get(key_number) + '-' + \
self.get(wb).get(key_suffix)
return string.strip('-')
else:
return self.get(wb).get(key_number)
#==========================================================================
def get_type_area_suffix_for_water_body(self, wb, key=u'TYPE_AREA_SUFFIX'):
return self.get(wb).get(key)
#==========================================================================
def get_type_area_name_for_water_body(self, wb, key='TYPE_AREA_NAME'):
if self.get(wb) == None:
return None
return self.get(wb).get(key)
#==========================================================================
def get_waterdistrictname_for_water_body(self, wb, key=u'WATER_DISTRICT_NAME'):
if self.get(wb) == None:
return None
return self.get(wb).get(key)
#==========================================================================
def get_waterdistrictcode_for_water_body(self, wb, key=u'WATER_DISTRICT_CODE'):
#TODO: fix what happens if wb_id is a list or get(wb) is a list
# print(wb)
# print(self.get(wb))
# if wb == '':
# print(self.get(wb))
# print(wb)
# if isinstance(self, list):
# print(self)
# print(wb)
# if isinstance(self.get(wb), list):
# print(self.get(wb))
# print(wb)
if self.get(wb) == None:
return None
return self.get(wb).get(key)
#==========================================================================
def get_visseucd_for_water_body(self, wb, key=u'VISS_EU_CD'):
if self.get(wb) == None:
print(wb)
return self.get(wb).get(key)
#==========================================================================
def get_mscd_for_water_body(self, wb, key=u'MS_CD'):
if self.get(wb) == None:
return None
return self.get(wb).get(key)
#==========================================================================
def get_name_for_water_body(self, wb, key=u'WATERBODY_NAME'):
if self.get(wb) == None:
return None
return self.get(wb).get(key)
#==========================================================================
def get_basin_number_for_water_body(self, wb, key=u'BASIN_NUMBER'):
"""
basin number refers to basin number in kustzonsmodellen
"""
return self.get(wb).get(key)
#==========================================================================
def get_hid_for_water_body(self, wb, key=u'HID'):
return self.get(wb).get(key)
#==========================================================================
def get_url_viss_for_water_body(self, wb, key=u'URL_VISS'):
return self.get(wb).get(key)
#==========================================================================
def get_center_position_for_water_body(self, wb, key_lat=u'CENTER_LAT',
key_lon=u'CENTER_LON'):
return {'lat': self.get(wb).get(key_lat),
'lon': self.get(wb).get(key_lon)}
#==========================================================================
"""
#==============================================================================
#==============================================================================
"""
class QualityElement(object):
"""
Created 20180222 by Magnus Wenzer
Updated 20180222 by Magnus Wenzer
"""
def __init__(self, file_path):
self.file_path = file_path
self.cf_df = pd.read_csv(self.file_path, sep='\t', dtype='str', encoding='cp1252')
assert all(['quality element' in self.cf_df.keys(), 'indicator' in self.cf_df.keys(), 'parameters' in self.cf_df.keys(), 'additional_parameters' in self.cf_df.keys(), 'indicator_class' in self.cf_df.keys()]) #'configuration file must contain quality element, indicator and parameters information'
self.cfg = {}
self.cfg['quality elements'] = self.cf_df.groupby('quality element')['indicator'].unique()
self.cfg['indicators'] = self.cf_df.groupby('indicator')['parameters'].unique()
self.indicator_config = self.cf_df.set_index('indicator')
#==========================================================================
def get_mapping(self, item, from_col, to_col):
"""
Created 20180918 by Magnus Wenzer
"""
result = self.cf_df.loc[self.cf_df[from_col]==item, to_col]
if len(result):
return result.values[0]
return item
#==========================================================================
def get_quality_element_list(self):
return sorted(self.cfg['quality elements'].keys())
#==========================================================================
def get_indicator_list_for_quality_element(self, quality_element):
return sorted([item for item in self.cfg['quality elements'][quality_element] if 'indicator' in item])
"""
#==============================================================================
#==============================================================================
"""
class RawDataFiles(object):
"""
Class to hold information in dtype_settings.txt based on the file
content in the raw_data-directory of the workspace.
Also keeps and handles information about active files.
"""
def __init__(self, raw_data_directory):
self.raw_data_directory = raw_data_directory.replace('\\', '/')
self.info_file_name ='dtype_settings.txt'
self.info_file_path = '/'.join([self.raw_data_directory, self.info_file_name])
self.has_info = False
self.load_and_sync_dtype_settings()
#==========================================================================
def load_and_sync_dtype_settings(self):
"""
Loads the info file and check if all links and info is present.
Returns True if all is ok, else False.
"""
if not os.path.exists(self.info_file_path):
print('No dtype_setting file found in raw_data directory')
return False
# Load info file
self.df = pd.read_csv(self.info_file_path, sep='\t', dtype={'status': int,
'filename': str,
'data_type': str})
self.has_info = True
# List all files
all_file_names = sorted([item for item in os.listdir(self.raw_data_directory) if item != os.path.basename(self.info_file_path)])
# Check that all files are in info file
if sorted(self.df['filename']) != all_file_names:
print('='*50)
print('\n'.join(sorted(self.df['filename'])))
print('.'*50)
print('\n'.join(all_file_names))
print('-'*50)
print('All files not in dtype_settings file!')
return False
# Check that all data_types are present
if not all(self.df['data_type']):
print('dtype not specified for all files!')
return False
return True
#==========================================================================
def get_active_paths(self):
if not self.has_info:
return False
return sorted(['/'.join([self.raw_data_directory, item]) for item in self.df.loc[self.df['status']==1, 'filename']])
#==========================================================================
def get_active_paths_with_data_type(self):
if not self.has_info:
return False
file_paths = self.get_active_paths()
output_list = []
for file_path in file_paths:
dt = self.df.loc[self.df['filename']==os.path.basename(file_path), 'data_type'].values[0]
output_list.append((file_path, dt))
return output_list
# #==========================================================================
# def activate(self, file_list):
# """
# Activates the given filenames and deactivate the rest. Returns True if all ok.
# Returns False if filename is missing.
# file_list is a list with strings.
# """
# file_list = [os.path.basename(item) for item in file_list]
# for file_name in file_list:
# print(file_name)
# if file_name not in self.df['filename'].values:
# return False
#
# for file_name in self.df['filename']:
# if file_name in file_list:
# self.df.loc[self.df['filename']==file_name, 'status'] = 1
# else:
# self.df.loc[self.df['filename']==file_name, 'status'] = 0
#
# # Save file
# self._save_file()
# return True
#==========================================================================
def add_file(self, file_name=None, data_type=None):
"""
Takes tha basname of the file_name (Could be path) and adds it to the file.
"""
assert all([file_name, data_type]), 'Not enough input arguments'
file_name = os.path.basename(file_name)
if file_name in self.df['filename'].values:
print('File already added')
return False
next_index = len(self.df)
self.df.iloc[next_index] = [1, file_name, data_type]
return True
#==========================================================================
def _save_file(self):
self.df.to_csv(self.info_file_path, index=False, sep='\t')
"""
#==============================================================================
#==============================================================================
"""
class DataTypeMapping(object):
"""
Created 20180422 by Magnus Wenzer
Updated 20180422 by Magnus Wenzer
Class to hold information in dtype_settings.txt based on the file
content in the raw_data- and exports-directory of the workspace.
Also keeps and handles information about active files and which files are loaded.
"""
def __init__(self, input_data_directory):
self.input_data_directory = input_data_directory.replace('\\', '/')
self.raw_data_directory = '{}/raw_data'.format(self.input_data_directory)
if not os.path.exists(self.raw_data_directory):
os.mkdir(self.raw_data_directory)
self.info_file_name ='datatype_settings.txt'
self.info_file_path = '/'.join([self.input_data_directory, self.info_file_name])
self.datatype_list = ['physicalchemical',
'physicalchemicalsatellite',
'physicalchemicalmodel',
'chlorophyll',
'phytoplankton',
'zoobenthos']
self.has_info = False
self.load_and_check_dtype_settings()
#==========================================================================
def load_and_check_dtype_settings(self):
"""
Loads the info file and check if all links and info is present.
Returns True if all is ok, else False.
"""
# print('self.info_file_path', self.info_file_path)
if not os.path.exists(self.info_file_path):
print('No datatype_setting file found in raw_data directory')
return False
# Load info file
self.df = pd.read_csv(self.info_file_path, sep='\t', dtype={'status': int,
'loaded': int,
'filename': str,
'datatype': str})
# Remove "loaded" if not "status"
# print(self.df)
self.df.loc[self.df['status']==0, 'loaded'] = 0
self._save_file()
self.has_info = True
# List all files
all_file_names = sorted([item for item in os.listdir(self.raw_data_directory) if item != os.path.basename(self.info_file_path)])
# Check that all files are in info file
if sorted(self.df['filename']) != all_file_names:
# print('='*50)
# print('\n'.join(sorted(self.df['filename'])))
# print('.'*50)
# print('\n'.join(all_file_names))
# print('-'*50)
# print('All files not in dtype_settings file!')
return False
# Check that all data_types are present
if not all(self.df['datatype']):
print('dtype not specified for all files!')
return False
return True
#==========================================================================
def no_data_to_load(self):
"""
Created 20180423 by Magnus Wenzer
Updated 20180423 by Magnus Wenzer
Returns True if all rows in status column is 0.
"""
self.load_and_check_dtype_settings()
if len(self.df.loc[self.df['status'] == 1, 'loaded']) == 0:
print('No data selected to be loaded!')
return True
return False
#==========================================================================
def all_data_is_loaded(self):
"""
Created 20180422 by Magnus Wenzer
Updated 20180422 by Magnus Wenzer
Returns True is all files with status 1 is loaded, else return False.
"""
self.load_and_check_dtype_settings()
if all(self.df.loc[self.df['status'] == 1, 'loaded']):
return True
return False
#==========================================================================
def all_selected_files_loaded_for_datatypes(self, datatype):
"""
Created 20180422 by Magnus Wenzer
Updated 20180423 by Magnus Wenzer
"""
self.load_and_check_dtype_settings()
if all(self.df.loc[(self.df['datatype']==datatype) & (self.df['status']==1), 'loaded']):
return True
return False
#==========================================================================
def get_file_list(self):
if not self.has_info:
return []
return self.df['filename'].values
#==========================================================================
def has_data(self):
"""
Created 20180422 by Magnus Wenzer
Returns True if data is available else return False.
"""
if len(self.df):
return True
else:
return False
#==========================================================================
def get_datatype_list(self):
return self.datatype_list
#==========================================================================
def get_file_paths_to_load_for_datatype(self, datatype, force=False, reload_file=True):
"""
Created 20180422 by Magnus Wenzer
Updated 20180601 by Magnus Wenzer
Creates a list with file paths of the active files for the give datatype.
By default (force=False):
The list is returned if any of the files are not "loaded".
False is returned if all files are "loaded"
If force = True the list is returned in any case.
"""
if reload_file:
self.load_and_check_dtype_settings()
selection = self.df.loc[(self.df['datatype']==datatype) & (self.df['status']==1), :]
file_paths = ['/'.join([self.raw_data_directory, path]) for path in selection['filename'].values]
if not len(selection):
return []
if force:
return file_paths
else:
if all(selection['loaded']):
return []
else:
return file_paths
#==========================================================================
def get_file_paths_to_delete_for_datatype(self, datatype, reload_file=True):
"""
Created 20180422 by Magnus Wenzer
Updated 20180422 by Magnus Wenzer
Creates a list with file paths of the non active files for the give datatype.
"""
if reload_file:
self.load_and_check_dtype_settings()
selection = self.df.loc[(self.df['datatype']==datatype) & (self.df['status']==0), :]
return ['/'.join([self.raw_data_directory, path]) for path in selection['filename'].values]
#==========================================================================
def reset_loaded(self):
"""
Created 20180423 by Magnus Wenzer
Updated 20180423 by Magnus Wenzer
Resets the "loaded" column. This will trigger the data to be reloaded.
"""
self.df['loaded'] = 0
self._save_file()
#==========================================================================
def set_file_is_loaded(self, filename):
"""
Created 20180422 by Magnus Wenzer
Updated 20180422 by Magnus Wenzer
"""
if all(self.df.loc[self.df['filename']==filename, 'status']):
self.df.loc[self.df['filename']==filename, 'loaded'] = 1
self._save_file()
#==========================================================================
def set_load_for_datatype(self, datatype):
"""
Created 20180422 by Magnus Wenzer
Updated 20180422 by Magnus Wenzer
Sets files as loaded for the given datatype if status is 1.
"""
self.df.loc[(self.df['datatype']==datatype) & (self.df['status']==1), 'loaded'] = 1
return True
#==========================================================================
def get_active_paths(self):
if not self.has_info:
return False
return sorted(['/'.join([self.raw_data_directory, item]) for item in self.df.loc[self.df['status']==1, 'filename']])
#==========================================================================
def get_info_for_file(self, filename):
"""
Created 20180524 by Magnus Wenzer
Updated
returns a dict with information about a given file name
"""
if filename not in self.df['filename'].values:
return {}
df_dict = {}
df = self.df.loc[(self.df['filename']==filename)]
for col in df.columns:
df_dict[col] = df[col].values[0]
return df_dict
#==========================================================================
def get_active_paths_with_data_type(self):
if not self.has_info:
return False
file_paths = self.get_active_paths()
output_list = []
for file_path in file_paths:
dt = self.df.loc[self.df['filename']==os.path.basename(file_path), 'datatype'].values[0]
output_list.append((file_path, dt))
return output_list
#==========================================================================
def old_activate(self, file_list, only_this_list=False):
"""
Activates the given filenames and deactivate the rest. Returns True if all ok.
Returns False if filename is missing.
file_list is a list with strings.
Created by Magnus Wenzer
Updated 20180424 by Magnus Wenzer
"""
if type(file_list) != list:
file_list = [file_list]
file_list = [os.path.basename(item) for item in file_list]
for file_name in file_list:
# print(file_name)
if file_name not in self.df['filename'].values:
return False
for file_name in self.df['filename']:
if file_name in file_list:
self.df.loc[self.df['filename']==file_name, 'status'] = 1
else:
if only_this_list:
self.df.loc[self.df['filename']==file_name, 'status'] = 0
# Save file
self._save_file()
return True
#==========================================================================
def set_status(self, file_name, status):
"""
Activates the given filenames and deactivate the rest. Returns True if all ok.
Returns False if filename is missing.
file_list is a list with strings.
Created by Magnus Wenzer
Updated 20180424 by Magnus Wenzer
"""
self.df.loc[self.df['filename']==file_name, 'status'] = int(status)
# Save file
self._save_file()
return True
#==========================================================================
def set_key(self, file_name=None, key=None, value=None):
"""
Set the key for file_name to value.
Created 20180720 by Magnus Wenzer
"""
assert all([file_name, key, value != None])
try:
value = int(value)
except:
pass
self.df.loc[self.df['filename']==file_name, key] = value
# Save file
self._save_file()
return True
#==========================================================================
def add_file(self, file_name=None, data_type=None, status=0):
"""
Takes tha basname of the file_name (Could be path) and adds it to the file.
"""
self.load_and_check_dtype_settings()
assert all([file_name, data_type]), 'Not enough input arguments'
file_name = os.path.basename(file_name)
if file_name in self.df['filename'].values:
print('File already added')
return False
next_index = len(self.df)
# print(self.df)
# print(next_index)
self.df.loc[next_index, :] = [status, 0, file_name, data_type] # status = 0
self._save_file()
return True
#==========================================================================
def _save_file(self):
self.df.to_csv(self.info_file_path, index=False, sep='\t')
#==========================================================================
#==========================================================================
class UUIDmapping():
"""
Holds the mapping file for uuid.
MW 20180530: In general user_id should be tha same in
"""
def __init__(self, file_path=None, user_id=None):
self.file_path = file_path
self.user_id = user_id
self.all_status = ['editable', 'readable', 'deleted']
self._load_file()
#==========================================================================
def _load_file(self):
"""
Created by Magnus Wenzer
Updated 20180321 by Magnus Wenzer
"""
# print('FILE_PATH:', self.file_path)
self.df = pd.read_csv(self.file_path, sep='\t', encoding='cp1252')
self.df.fillna('', inplace=True)
self.df.loc[self.df['uuid']=='default_workspace', 'user_id'] = self.user_id
self._save_file()
#==========================================================================
def _save_file(self):
self.df.to_csv(self.file_path, sep='\t', index=False)
#==========================================================================
def _get_status_list(self):
return sorted(set(self.df['status']))
#==========================================================================
def add_new_uuid_for_alias(self, alias=None):
"""
Updated 20180530 by Magnus
Adds a new uuid to the mapping file and returns its value.
"""
print('¤', alias)
status = self.all_status
if not alias or self.get_uuid(alias, status=status):
return False
unique_id = str(uuid.uuid4())
# print('&&&&&')
# print(unique_id)
# print(alias)
# print(user_id)
add_df = pd.DataFrame([[unique_id, alias, self.user_id, 'editable', 'True']], columns=['uuid', 'alias', 'user_id', 'status', 'active'])
self.df = self.df.append(add_df)
self.df = self.df.reset_index(drop=True)
self._save_file()
return unique_id
#==========================================================================
def get_alias(self, unique_id=None, status=None):
"""
Updated 20180530 by Magnus
"""
if not status:
status = self.all_status
if 'default_' in unique_id:
return unique_id
# print('status', status)
# print('status', status)
result = self.df.loc[(self.df['user_id']==self.user_id) & \
(self.df['uuid']==unique_id) & \
(self.df['status'].isin(status)), 'alias']
if len(result):
return result.values[0]
return False
#==========================================================================
def get_status(self, unique_id=None, alias=None):
if unique_id:
result = self.df.loc[self.df['uuid']==unique_id, 'status']
else:
result = self.df.loc[(self.df['alias']==alias) & \
(self.df['user_id']==self.user_id), 'status']
if len(result):
return result.values[0]
return False
#==========================================================================
def get_user_id(self, unique_id, status=None):
if not status:
status = self.all_status
result = self.df.loc[(self.df['uuid']==unique_id) & \
(self.df['status'].isin(status)), 'user_id']
if len(result):
return result.values[0]
return False
#==========================================================================
def get_uuid(self, alias=None, status=None):
"""
Updated 20180530 by Magnus
"""
if not status:
status = self.all_status
if alias not in self.df['alias'].values:
return False
result = self.df.loc[(self.df['user_id']==self.user_id) & \
(self.df['alias']==alias) & \
(self.df['status'].isin(status)), 'uuid']
if len(result):
return result.values[0]
return False
#==========================================================================
def get_alias_list_for_user(self, status=None):
if not status:
status = self.all_status
return list(self.df.loc[(self.df['user_id']==self.user_id) & \
(self.df['status'].isin(status)), 'alias'])
#==========================================================================
def get_uuid_list_for_user(self, status=None):
if not status:
status = self.all_status
return list(self.df.loc[(self.df['user_id']==self.user_id) & \
(self.df['status'].isin(status)), 'uuid'])
#==========================================================================
def is_active(self, unique_id):
result = self.df.loc[self.df['uuid']==unique_id, 'active']
if len(result):
result = str(result.values[0]).lower()
if result == 'true':
return True
elif result == 'false':
return False
return None
#==========================================================================
def is_present(self, unique_id):
"""
Created 20180719 by Magnus Wenzer
Updated
Returns True if unique_id is present else return False.
"""
if unique_id in self.df['uuid'].values:
return True
else:
return False
#==========================================================================
def permanent_delete_uuid(self, unique_id):
self.df = self.df.drop(self.df.index[self.df['uuid']==unique_id])
self._save_file()
#==========================================================================
def set_active(self, unique_id):
self.df.loc[self.df['uuid']==unique_id, 'active'] = 'True'
self._save_file()
#==========================================================================
def set_inactive(self, unique_id):
self.df.loc[self.df['uuid']==unique_id, 'active'] = 'False'
self._save_file()
#==========================================================================
def set_alias(self, unique_id, new_alias):
if new_alias in self.get_alias_list_for_user():
raise exceptions.WorkspaceAlreadyExists('when trying to set new alias')
self.df.loc[self.df['uuid']==unique_id, 'alias'] = new_alias
self._save_file()
return True
#==========================================================================
def set_new_uuid(self, current_uuid):
new_uuid = str(uuid.uuid4())
self.df.loc[self.df['uuid']==current_uuid, 'uuid'] = new_uuid
self._save_file()
return new_uuid
#==========================================================================
def set_status(self, unique_id, status):
self.df.loc[self.df['uuid']==unique_id, 'status'] = status
self._save_file()
return status
#==========================================================================
#==========================================================================
class old_UUIDmapping():
"""
Holds the mapping file for uuid.
MW 20180530: In general user_id should be tha same in
"""
def __init__(self, file_path=None):
self.file_path = file_path
self.all_status = ['editable', 'readable', 'deleted']
self._load_file()
#==========================================================================
def _load_file(self):
"""
Created by Magnus Wenzer
Updated 20180321 by Magnus Wenzer
"""
# print('FILE_PATH:', self.file_path)
self.df = pd.read_csv(self.file_path, sep='\t', encoding='cp1252')
self.df.fillna('', inplace=True)
#==========================================================================
def _save_file(self):
self.df.to_csv(self.file_path, sep='\t', index=False)
#==========================================================================
def _get_status_list(self):
return sorted(set(self.df['status']))
#==========================================================================
def add_new_uuid_for_alias(self, alias=None, user_id=None):
"""
Adds a new uuid to the mapping file and returns its value.
"""
print('¤', alias)
print('¤', user_id)
status = self.all_status
if not alias or self.get_uuid(alias, user_id, status=status):
return False
unique_id = str(uuid.uuid4())
# print('&&&&&')
# print(unique_id)
# print(alias)
# print(user_id)
add_df = pd.DataFrame([[unique_id, alias, user_id, 'editable', 'True']], columns=['uuid', 'alias', 'user_id', 'status', 'active'])
self.df = self.df.append(add_df)
self.df = self.df.reset_index(drop=True)
self._save_file()
return unique_id
#==========================================================================
def get_alias(self, unique_id=None, user_id=None, status=None):
if not status:
status = self.all_status
print('status', status)
# print('status', status)
result = self.df.loc[(self.df['uuid']==unique_id) & \
(self.df['status'].isin(status)), 'alias']
if len(result):
return result.values[0]
return False
#==========================================================================
def get_status(self, alias=None, user_id=None, unique_id=None):
if unique_id:
result = self.df.loc[self.df['uuid']==unique_id, 'status']
else:
result = self.df.loc[(self.df['alias']==alias) & \
(self.df['user_id']==user_id), 'status']
if len(result):
return result.values[0]
return False
#==========================================================================
def get_user_id(self, unique_id, status=None):
if not status:
status = self.all_status
result = self.df.loc[(self.df['uuid']==unique_id) & \
(self.df['status'].isin(status)), 'user_id']
if len(result):
return result.values[0]
return False
#==========================================================================
def get_uuid(self, alias=None, user_id=None, status=None):
if not status:
status = self.all_status
result = self.df.loc[(self.df['alias']==alias) & \
(self.df['user_id']==user_id) & \
(self.df['status'].isin(status)), 'uuid']
if len(result):
return result.values[0]
return False
#==========================================================================
def get_alias_list_for_user(self, user_id, status=None):
if not status:
status = self.all_status
return list(self.df.loc[(self.df['user_id']==user_id) & \
(self.df['status'].isin(status)), 'alias'])
#==========================================================================
def get_uuid_list_for_user(self, user_id, status=None):
if not status:
status = self.all_status
return list(self.df.loc[(self.df['user_id']==user_id) & \
(self.df['status'].isin(status)), 'uuid'])
#==========================================================================
def is_active(self, unique_id):
result = self.df.loc[self.df['uuid']==unique_id, 'active']
if len(result):
result = str(result.values[0]).lower()
if result == 'true':
return True
elif result == 'false':
return False
return None
#==========================================================================
def permanent_delete_uuid(self, unique_id):
self.df = self.df.drop(self.df.index[self.df['uuid']==unique_id])
self._save_file()
#==========================================================================
def set_active(self, unique_id):
self.df.loc[self.df['uuid']==unique_id, 'active'] = 'True'
#==========================================================================
def set_inactive(self, unique_id):
self.df.loc[self.df['uuid']==unique_id, 'active'] = 'False'
#==========================================================================
def set_alias(self, unique_id, new_alias):
user_id = self.get_user_id(unique_id)
if new_alias in self.get_alias_list_for_user(user_id):
return False
self.df.loc[self.df['uuid']==unique_id, 'alias'] = new_alias
self._save_file()
return True
#==========================================================================
def set_new_uuid(self, current_uuid):
new_uuid = str(uuid.uuid4())
self.df.loc[self.df['uuid']==current_uuid, 'uuid'] = new_uuid
self._save_file()
return new_uuid
#==========================================================================
def set_status(self, unique_id, status):
self.df.loc[self.df['uuid']==unique_id, 'status'] = status
self._save_file()
return status
"""#========================================================================"""
if __name__ == '__main__':
if 0:
current_path = os.path.dirname(os.path.realpath(__file__))[:-4]
sys.path.append(current_path)
print('='*50)
print('Running module "mapping.py"')
print('-'*50)
print('')
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
source_dir = u'D:\\Utveckling\\GitHub\\ekostat_calculator\\'
first_filter_directory = source_dir + 'resources/mappings/mapping_parameter_dynamic_extended.txt'
fields_filter_directory = source_dir + '/resources/filters/filter_fields_zoobenthos.txt'
water_body_match_directory = source_dir + 'resources/mappings/water_body_match.txt'
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
# Mapping
print('\n# Mapping')
p_map = ParameterMapping()
p_map.load_mapping_settings(file_path=first_filter_directory)
print(p_map.map_parameter_list(['myear', u'ammonium nh4-n']))
print(p_map.get_parameter_mapping(['myear', u'ammonium nh4-n']))
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
f_filter = AttributeDict()
data = core.Load().load_txt(fields_filter_directory, fill_nan=u'')
f_filter._add_arrays_to_entries(**data)
# print('compulsory_fields',f_filter.compulsory_fields)
# print('parameter_key',f_filter.parameter_key)
# print('sort_by_fields',f_filter.sort_by_fields)
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
# Water Body Match
print('\n# Water Body Match')
# wb_match = WaterBody()
# wb_match.load_water_body_match(file_path=water_body_match_directory)
## print(wb_match.dict.get('S. Seskaröfjärden sek namn').get('TYP'))
# print(wb_match.get_type_area_for_water_body('Vändelsöarkipelagen', include_suffix=True))
# print('='*50)
# print(wb_match.get_basin_number_for_water_body('Vändelsöarkipelagen'))
# print('='*50)
# print(wb_match.get_eu_cd_for_water_body('Vändelsöarkipelagen'))
# print('='*50)
# print(wb_match.get_hid_for_water_body('Vändelsöarkipelagen'))
# print('='*50)
# print(wb_match.get_url_viss_for_water_body('Vändelsöarkipelagen'))
# print('='*50)
# print(wb_match.get_center_position_for_water_body('Vändelsöarkipelagen'))
# print('='*50)
# print(wb_match.get_water_bodies_in_type_area('1n'))
# print('='*50)
# print(wb_match.get_water_bodies_in_type_area('1s'))
# print('='*50)
# print(wb_match.get_water_bodies_in_type_area('1'))
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
print('-'*50)
print('done')
print('-'*50)
# for k in p_map.keys():
# if k.startswith('sili'):
# print(k, len(k), p_map.get(k))
if 1:
file_path = 'D:/Utveckling/git/ekostat_calculator/resources/mappings/hypsografs_2017.txt'
h = Hypsograph(file_path)
wb = 'NO591045-111030'
h.get_total_area_of_water_body(wb)
| mit |
henningjp/CoolProp | Web/scripts/logo_2013.py | 2 | 1263 | import matplotlib
matplotlib.use('WXAgg')
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
import CoolProp
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(2, 2))
ax = fig.add_subplot(111, projection='3d')
NT = 1000
NR = 1000
rho, t = np.logspace(np.log10(2e-3), np.log10(1100), NR), np.linspace(275.15, 700, NT)
RHO, T = np.meshgrid(rho, t)
P = CoolProp.CoolProp.PropsSI('P', 'D', RHO.reshape((NR * NT, 1)), 'T', T.reshape((NR * NT, 1)), 'REFPROP-Water').reshape(NT, NR)
Tsat = np.linspace(273.17, 647.0, 100)
psat = CoolProp.CoolProp.PropsSI('P', 'Q', 0, 'T', Tsat, 'Water')
rhoL = CoolProp.CoolProp.PropsSI('D', 'Q', 0, 'T', Tsat, 'Water')
rhoV = CoolProp.CoolProp.PropsSI('D', 'Q', 1, 'T', Tsat, 'Water')
ax.plot_surface(np.log(RHO), T, np.log(P), cmap=cm.jet, edgecolor='none')
ax.plot(np.log(rhoL), Tsat, np.log(psat), color='k', lw=2)
ax.plot(np.log(rhoV), Tsat, np.log(psat), color='k', lw=2)
ax.text(0.3, 800, 22, "CoolProp", size=12)
ax.set_frame_on(False)
ax.set_axis_off()
ax.view_init(22, -136)
ax.set_xlabel(r'$\ln\rho$ ')
ax.set_ylabel('$T$')
ax.set_zlabel('$p$')
plt.tight_layout()
plt.savefig('_static/PVTCP.png', transparent=True)
plt.savefig('_static/PVTCP.pdf', transparent=True)
plt.close()
| mit |
cedadev/jasmin_cis | doc/conf.py | 3 | 9669 | # -*- coding: utf-8 -*-
#
# Community Intercomparison Suite documentation build configuration file, created by
# sphinx-quickstart on Tue Mar 25 10:10:43 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import mock
import sys
import os
MOCK_MODULES = ['numpy', 'scipy', 'matplotlib.pyplot', 'scipy', 'iris', 'netCDF4', 'nose',
'psutil', 'pyhdf', 'iris.cube', 'iris.exceptions', 'cf_units', 'scipy.stats',
'iris.analysis', 'iris.std_names', 'iris.coords', 'scipy.stats.mstats', 'iris.coord_categorisation',
'iris.analysis.interpolate', 'iris.analysis.cartography', 'numpy.ma', 'scipy.sparse', 'iris.util',
'matplotlib.cbook', 'matplotlib.ticker', 'matplotlib']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
import numpy, matplotlib
numpy.pi = 3.4
matplotlib.__version__ = 1
import cis
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Community Intercomparison Suite'
copyright = u'2013, University of Oxford'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = cis.__version__
# The full version, including alpha/beta/rc tags.
release = cis.__version__ + " (" + cis.__status__ + ")"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'api/cis.test.harness.rst']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
pass
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'CommunityIntercomparisonSuitedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'CommunityIntercomparrisonSuite.tex', u'Community Intercomparison Suite Documentation',
u'Centre of Environmental Data Archival', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'CommunityIntercomparisonSuite', u'Community Intercomparison Suite Documentation',
[u'Centre of Environmental Data Archival'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'CommunityIntercomparisonSuite', u'Community Intercomparison Suite Documentation',
u'Centre of Environmental Data Archival', 'CommunityIntercomparisonSuite', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Enable todos
# Use ``SPHINXOPTS="-D todo_include_todos=True" make -e html`` to enable this on build.
# todo_include_todos = True
# Configure autodoc
autodoc_default_flags = ['members']
| gpl-3.0 |
deepakantony/sms-tools | workspace/A4/A4Part4.py | 1 | 5970 | import os
import sys
import numpy as np
from scipy.signal import get_window
import matplotlib.pyplot as plt
import math
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../software/models/'))
import stft
import utilFunctions as UF
eps = np.finfo(float).eps
"""
A4-Part-4: Computing onset detection function (Optional)
Write a function to compute a simple onset detection function (ODF) using the STFT. Compute two ODFs
one for each of the frequency bands, low and high. The low frequency band is the set of all the
frequencies between 0 and 3000 Hz and the high frequency band is the set of all the frequencies
between 3000 and 10000 Hz (excluding the boundary frequencies in both the cases).
A brief description of the onset detection function can be found in the pdf document (A4-STFT.pdf,
in Relevant Concepts section) in the assignment directory (A4). Start with an initial condition of
ODF(0) = 0 in order to make the length of the ODF same as that of the energy envelope. Remember to
apply a half wave rectification on the ODF.
The input arguments to the function are the wav file name including the path (inputFile), window
type (window), window length (M), FFT size (N), and hop size (H). The function should return a numpy
array with two columns, where the first column is the ODF computed on the low frequency band and the
second column is the ODF computed on the high frequency band.
Use stft.stftAnal() to obtain the STFT magnitude spectrum for all the audio frames. Then compute two
energy values for each frequency band specified. While calculating frequency bins for each frequency
band, consider only the bins that are within the specified frequency range. For example, for the low
frequency band consider only the bins with frequency > 0 Hz and < 3000 Hz (you can use np.where() to
find those bin indexes). This way we also remove the DC offset in the signal in energy envelope
computation. The frequency corresponding to the bin index k can be computed as k*fs/N, where fs is
the sampling rate of the signal.
To get a better understanding of the energy envelope and its characteristics you can plot the envelopes
together with the spectrogram of the signal. You can use matplotlib plotting library for this purpose.
To visualize the spectrogram of a signal, a good option is to use colormesh. You can reuse the code in
sms-tools/lectures/4-STFT/plots-code/spectrogram.py. Either overlay the envelopes on the spectrogram
or plot them in a different subplot. Make sure you use the same range of the x-axis for both the
spectrogram and the energy envelopes.
NOTE: Running these test cases might take a few seconds depending on your hardware.
Test case 1: Use piano.wav file with window = 'blackman', M = 513, N = 1024 and H = 128 as input.
The bin indexes of the low frequency band span from 1 to 69 (69 samples) and of the high frequency
band span from 70 to 232 (163 samples). To numerically compare your output, use loadTestCases.py
script to obtain the expected output.
Test case 2: Use piano.wav file with window = 'blackman', M = 2047, N = 4096 and H = 128 as input.
The bin indexes of the low frequency band span from 1 to 278 (278 samples) and of the high frequency
band span from 279 to 928 (650 samples). To numerically compare your output, use loadTestCases.py
script to obtain the expected output.
Test case 3: Use sax-phrase-short.wav file with window = 'hamming', M = 513, N = 2048 and H = 256 as
input. The bin indexes of the low frequency band span from 1 to 139 (139 samples) and of the high
frequency band span from 140 to 464 (325 samples). To numerically compare your output, use
loadTestCases.py script to obtain the expected output.
In addition to comparing results with the expected output, you can also plot your output for these
test cases. For test case 1, you can clearly see that the ODFs have sharp peaks at the onset of the
piano notes (See figure in the accompanying pdf). You will notice exactly 6 peaks that are above
10 dB value in the ODF computed on the high frequency band.
"""
def computeODF(inputFile, window, M, N, H):
"""
Inputs:
inputFile (string): input sound file (monophonic with sampling rate of 44100)
window (string): analysis window type (choice of rectangular, triangular, hanning, hamming,
blackman, blackmanharris)
M (integer): analysis window size (odd integer value)
N (integer): fft size (power of two, bigger or equal than than M)
H (integer): hop size for the STFT computation
Output:
The function should return a numpy array with two columns, where the first column is the ODF
computed on the low frequency band and the second column is the ODF computed on the high
frequency band.
ODF[:,0]: ODF computed in band 0 < f < 3000 Hz
ODF[:,1]: ODF computed in band 3000 < f < 10000 Hz
"""
### your code here
fs,x = UF.wavread(inputFile)
w = get_window(window,M)
mX,pX = stft.stftAnal(x,w,N,H)
mX = pow(10,mX/20.)
num_frames = len(mX)
band_energy = np.zeros((len(mX),2))
for frm_idx in range(num_frames):
frm = mX[frm_idx]
for k in range(len(frm)):
cur_f = k*44100/N
if cur_f > 0 and cur_f < 3000:
band_energy[frm_idx,0] += (frm[k]*frm[k])
elif cur_f > 3000 and cur_f < 10000:
band_energy[frm_idx,1] += (frm[k]*frm[k])
band_energy = 10.0*np.log10(band_energy)
odf = np.zeros((num_frames,2))
for frm_idx in range(1,num_frames):
odf[frm_idx,0] = band_energy[frm_idx,0]-band_energy[frm_idx-1,0]
odf[frm_idx,0] = 0 if odf[frm_idx,0] < 0 else odf[frm_idx,0]
odf[frm_idx,1] = band_energy[frm_idx,1]-band_energy[frm_idx-1,1]
odf[frm_idx,1] = 0 if odf[frm_idx,1] < 0 else odf[frm_idx,1]
return odf
| agpl-3.0 |
Frankkkkk/arctic | tests/unit/serialization/test_numpy_records.py | 1 | 2846 | import numpy as np
from numpy.testing import assert_array_equal
from mock import patch, Mock, sentinel
from arctic.serialization.numpy_records import PandasSerializer, _to_primitive
from pandas import Timestamp
def test_to_primitive_timestamps():
arr = _to_primitive(np.array([Timestamp('2010-11-12 00:00:00')]))
assert_array_equal(arr, np.array([Timestamp('2010-11-12 00:00:00').value], dtype='datetime64[ns]'))
def test_can_convert_to_records_without_objects_returns_false_on_exception_in_to_records():
store = PandasSerializer()
store._to_records = Mock(side_effect=TypeError('uhoh'))
with patch('arctic.serialization.numpy_records.log') as mock_log:
assert store.can_convert_to_records_without_objects(sentinel.df, 'my_symbol') is False
mock_log.info.assert_called_once_with('Pandas dataframe my_symbol caused exception "TypeError(\'uhoh\',)"'
' when attempting to convert to records. Saving as Blob.')
store._to_records.assert_called_once_with(sentinel.df)
def test_can_convert_to_records_without_objects_returns_false_when_records_have_object_dtype():
store = PandasSerializer()
store._to_records = Mock(return_value=(np.array(['a', 'b', None, 'd']), None))
with patch('arctic.serialization.numpy_records.log') as mock_log:
assert store.can_convert_to_records_without_objects(sentinel.df, 'my_symbol') is False
mock_log.info.assert_called_once_with('Pandas dataframe my_symbol contains Objects, saving as Blob')
store._to_records.assert_called_once_with(sentinel.df)
def test_can_convert_to_records_without_objects_returns_false_when_records_have_arrays_in_them():
store = PandasSerializer()
store._to_records = Mock(return_value=(np.rec.array([(1356998400000000000, ['A', 'BC'])],
dtype=[('index', '<M8[ns]'), ('values', 'S2', (2,))]), None))
with patch('arctic.serialization.numpy_records.log') as mock_log:
assert store.can_convert_to_records_without_objects(sentinel.df, 'my_symbol') is False
mock_log.info.assert_called_once_with('Pandas dataframe my_symbol contains >1 dimensional arrays, saving as Blob')
store._to_records.assert_called_once_with(sentinel.df)
def test_can_convert_to_records_without_objects_returns_true_otherwise():
store = PandasSerializer()
store._to_records = Mock(return_value=(np.rec.array([(1356998400000000000, 'a')],
dtype=[('index', '<M8[ns]'), ('values', 'S2')]), None))
with patch('arctic.serialization.numpy_records.log') as mock_log:
assert store.can_convert_to_records_without_objects(sentinel.df, 'my_symbol') is True
assert mock_log.info.call_count == 0
store._to_records.assert_called_once_with(sentinel.df)
| lgpl-2.1 |
justincassidy/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 230 | 2649 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn import cross_validation
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_validation.cross_val_score(pipeline,
X[:, np.newaxis], y, scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
ngoix/OCRF | examples/cluster/plot_cluster_iris.py | 350 | 2593 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show()
| bsd-3-clause |