repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
RiccardoPecora/MP | Lib/site-packages/scipy/signal/waveforms.py | 55 | 11609 | # Author: Travis Oliphant
# 2003
#
# Feb. 2010: Updated by Warren Weckesser:
# Rewrote much of chirp()
# Added sweep_poly()
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
exp, cos, sin, polyval, polyint
def sawtooth(t, width=1):
"""
Return a periodic sawtooth waveform.
The sawtooth waveform has a period 2*pi, rises from -1 to 1 on the
interval 0 to width*2*pi and drops from 1 to -1 on the interval
width*2*pi to 2*pi. `width` must be in the interval [0,1].
Parameters
----------
t : array_like
Time.
width : float, optional
Width of the waveform. Default is 1.
Returns
-------
y : ndarray
Output array containing the sawtooth waveform.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 20*np.pi, 500)
>>> plt.plot(x, sp.signal.sawtooth(x))
"""
t,w = asarray(t), asarray(width)
w = asarray(w + (t-t))
t = asarray(t + (w-w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape,ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y,mask1,nan)
# take t modulo 2*pi
tmod = mod(t,2*pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1-mask1) & (tmod < w*2*pi)
tsub = extract(mask2,tmod)
wsub = extract(mask2,w)
place(y,mask2,tsub / (pi*wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1-mask1) & (1-mask2)
tsub = extract(mask3,tmod)
wsub = extract(mask3,w)
place(y,mask3, (pi*(wsub+1)-tsub)/(pi*(1-wsub)))
return y
def square(t, duty=0.5):
"""
Return a periodic square-wave waveform.
The square wave has a period 2*pi, has value +1 from 0 to 2*pi*duty
and -1 from 2*pi*duty to 2*pi. `duty` must be in the interval [0,1].
Parameters
----------
t : array_like
The input time array.
duty : float, optional
Duty cycle.
Returns
-------
y : array_like
The output square wave.
"""
t,w = asarray(t), asarray(duty)
w = asarray(w + (t-t))
t = asarray(t + (w-w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape,ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y,mask1,nan)
# take t modulo 2*pi
tmod = mod(t,2*pi)
# on the interval 0 to duty*2*pi function is
# 1
mask2 = (1-mask1) & (tmod < w*2*pi)
tsub = extract(mask2,tmod)
wsub = extract(mask2,w)
place(y,mask2,1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1-mask1) & (1-mask2)
tsub = extract(mask3,tmod)
wsub = extract(mask3,w)
place(y,mask3,-1)
return y
def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False, retenv=False):
"""
Return a gaussian modulated sinusoid: exp(-a t^2) exp(1j*2*pi*fc*t).
If `retquad` is True, then return the real and imaginary parts
(in-phase and quadrature).
If `retenv` is True, then return the envelope (unmodulated signal).
Otherwise, return the real part of the modulated sinusoid.
Parameters
----------
t : ndarray, or the string 'cutoff'
Input array.
fc : int, optional
Center frequency (Hz). Default is 1000.
bw : float, optional
Fractional bandwidth in frequency domain of pulse (Hz).
Default is 0.5.
bwr: float, optional
Reference level at which fractional bandwidth is calculated (dB).
Default is -6.
tpr : float, optional
If `t` is 'cutoff', then the function returns the cutoff
time for when the pulse amplitude falls below `tpr` (in dB).
Default is -60.
retquad : bool, optional
If True, return the quadrature (imaginary) as well as the real part
of the signal. Default is False.
retenv : bool, optional
If True, return the envelope of the signal. Default is False.
"""
if fc < 0:
raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
if bw <= 0:
raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
if bwr >= 0:
raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
"be < 0 dB" % bwr)
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
ref = pow(10.0, bwr / 20.0)
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
#
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
a = -(pi*fc*bw)**2 / (4.0*log(ref))
if t == 'cutoff': # compute cut_off point
# Solve exp(-a tc**2) = tref for tc
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
if tpr >= 0:
raise ValueError("Reference level for time cutoff must be < 0 dB")
tref = pow(10.0, tpr / 20.0)
return sqrt(-log(tref)/a)
yenv = exp(-a*t*t)
yI = yenv * cos(2*pi*fc*t)
yQ = yenv * sin(2*pi*fc*t)
if not retquad and not retenv:
return yI
if not retquad and retenv:
return yI, yenv
if retquad and not retenv:
return yI, yQ
if retquad and retenv:
return yI, yQ, yenv
def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
"""Frequency-swept cosine generator.
In the following, 'Hz' should be interpreted as 'cycles per time unit';
there is no assumption here that the time unit is one second. The
important distinction is that the units of rotation are cycles, not
radians.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
f0 : float
Frequency (in Hz) at time t=0.
t1 : float
Time at which `f1` is specified.
f1 : float
Frequency (in Hz) of the waveform at time `t1`.
method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
Kind of frequency sweep. If not given, `linear` is assumed. See
Notes below for more details.
phi : float, optional
Phase offset, in degrees. Default is 0.
vertex_zero : bool, optional
This parameter is only used when `method` is 'quadratic'.
It determines whether the vertex of the parabola that is the graph
of the frequency is at t=0 or t=t1.
Returns
-------
A numpy array containing the signal evaluated at 't' with the requested
time-varying frequency. More precisely, the function returns:
``cos(phase + (pi/180)*phi)``
where `phase` is the integral (from 0 to t) of ``2*pi*f(t)``.
``f(t)`` is defined below.
See Also
--------
scipy.signal.waveforms.sweep_poly
Notes
-----
There are four options for the `method`. The following formulas give
the instantaneous frequency (in Hz) of the signal generated by
`chirp()`. For convenience, the shorter names shown below may also be
used.
linear, lin, li:
``f(t) = f0 + (f1 - f0) * t / t1``
quadratic, quad, q:
The graph of the frequency f(t) is a parabola through (0, f0) and
(t1, f1). By default, the vertex of the parabola is at (0, f0).
If `vertex_zero` is False, then the vertex is at (t1, f1). The
formula is:
if vertex_zero is True:
``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
else:
``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
To use a more general quadratic function, or an arbitrary
polynomial, use the function `scipy.signal.waveforms.sweep_poly`.
logarithmic, log, lo:
``f(t) = f0 * (f1/f0)**(t/t1)``
f0 and f1 must be nonzero and have the same sign.
This signal is also known as a geometric or exponential chirp.
hyperbolic, hyp:
``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
f1 must be positive, and f0 must be greater than f1.
"""
# 'phase' is computed in _chirp_phase, to make testing easier.
phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
# Convert phi to radians.
phi *= pi / 180
return cos(phase + phi)
def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
"""
Calculate the phase used by chirp_phase to generate its output.
See `chirp_phase` for a description of the arguments.
"""
f0 = float(f0)
t1 = float(t1)
f1 = float(f1)
if method in ['linear', 'lin', 'li']:
beta = (f1 - f0) / t1
phase = 2*pi * (f0*t + 0.5*beta*t*t)
elif method in ['quadratic','quad','q']:
beta = (f1 - f0)/(t1**2)
if vertex_zero:
phase = 2*pi * (f0*t + beta * t**3/3)
else:
phase = 2*pi * (f1*t + beta * ((t1 - t)**3 - t1**3)/3)
elif method in ['logarithmic', 'log', 'lo']:
if f0*f1 <= 0.0:
raise ValueError("For a geometric chirp, f0 and f1 must be nonzero " \
"and have the same sign.")
if f0 == f1:
phase = 2*pi * f0 * t
else:
beta = t1 / log(f1/f0)
phase = 2*pi * beta * f0 * (pow(f1/f0, t/t1) - 1.0)
elif method in ['hyperbolic', 'hyp']:
if f1 <= 0.0 or f0 <= f1:
raise ValueError("hyperbolic chirp requires f0 > f1 > 0.0.")
c = f1*t1
df = f0 - f1
phase = 2*pi * (f0 * c / df) * log((df*t + c)/c)
else:
raise ValueError("method must be 'linear', 'quadratic', 'logarithmic', "
"or 'hyperbolic', but a value of %r was given." % method)
return phase
def sweep_poly(t, poly, phi=0):
"""Frequency-swept cosine generator, with a time-dependent frequency
specified as a polynomial.
This function generates a sinusoidal function whose instantaneous
frequency varies with time. The frequency at time `t` is given by
the polynomial `poly`.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
poly : 1D ndarray (or array-like), or instance of numpy.poly1d
The desired frequency expressed as a polynomial. If `poly` is
a list or ndarray of length n, then the elements of `poly` are
the coefficients of the polynomial, and the instantaneous
frequency is
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of numpy.poly1d, then the
instantaneous frequency is
``f(t) = poly(t)``
phi : float, optional
Phase offset, in degrees. Default is 0.
Returns
-------
A numpy array containing the signal evaluated at 't' with the requested
time-varying frequency. More precisely, the function returns
``cos(phase + (pi/180)*phi)``
where `phase` is the integral (from 0 to t) of ``2 * pi * f(t)``;
``f(t)`` is defined above.
See Also
--------
scipy.signal.waveforms.chirp
Notes
-----
.. versionadded:: 0.8.0
"""
# 'phase' is computed in _sweep_poly_phase, to make testing easier.
phase = _sweep_poly_phase(t, poly)
# Convert to radians.
phi *= pi / 180
return cos(phase + phi)
def _sweep_poly_phase(t, poly):
"""
Calculate the phase used by sweep_poly to generate its output.
See `sweep_poly` for a description of the arguments.
"""
# polyint handles lists, ndarrays and instances of poly1d automatically.
intpoly = polyint(poly)
phase = 2*pi * polyval(intpoly, t)
return phase
| gpl-3.0 |
gnychis/grforwarder | gr-utils/src/python/gr_plot_iq.py | 10 | 7206 | #!/usr/bin/env python
#
# Copyright 2007,2008,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
try:
import scipy
except ImportError:
print "Please install SciPy to run this script (http://www.scipy.org/)"
raise SystemExit, 1
try:
from pylab import *
except ImportError:
print "Please install Matplotlib to run this script (http://matplotlib.sourceforge.net/)"
raise SystemExit, 1
from optparse import OptionParser
class draw_iq:
def __init__(self, filename, options):
self.hfile = open(filename, "r")
self.block_length = options.block
self.start = options.start
self.sample_rate = options.sample_rate
self.datatype = scipy.complex64
self.sizeof_data = self.datatype().nbytes # number of bytes per sample in file
self.axis_font_size = 16
self.label_font_size = 18
self.title_font_size = 20
self.text_size = 22
# Setup PLOT
self.fig = figure(1, figsize=(16, 9), facecolor='w')
rcParams['xtick.labelsize'] = self.axis_font_size
rcParams['ytick.labelsize'] = self.axis_font_size
self.text_file = figtext(0.10, 0.94, ("File: %s" % filename), weight="heavy", size=self.text_size)
self.text_file_pos = figtext(0.10, 0.88, "File Position: ", weight="heavy", size=self.text_size)
self.text_block = figtext(0.40, 0.88, ("Block Size: %d" % self.block_length),
weight="heavy", size=self.text_size)
self.text_sr = figtext(0.60, 0.88, ("Sample Rate: %.2f" % self.sample_rate),
weight="heavy", size=self.text_size)
self.make_plots()
self.button_left_axes = self.fig.add_axes([0.45, 0.01, 0.05, 0.05], frameon=True)
self.button_left = Button(self.button_left_axes, "<")
self.button_left_callback = self.button_left.on_clicked(self.button_left_click)
self.button_right_axes = self.fig.add_axes([0.50, 0.01, 0.05, 0.05], frameon=True)
self.button_right = Button(self.button_right_axes, ">")
self.button_right_callback = self.button_right.on_clicked(self.button_right_click)
self.xlim = self.sp_iq.get_xlim()
self.manager = get_current_fig_manager()
connect('key_press_event', self.click)
show()
def get_data(self):
self.text_file_pos.set_text("File Position: %d" % (self.hfile.tell()//self.sizeof_data))
try:
self.iq = scipy.fromfile(self.hfile, dtype=self.datatype, count=self.block_length)
except MemoryError:
print "End of File"
else:
self.reals = scipy.array([r.real for r in self.iq])
self.imags = scipy.array([i.imag for i in self.iq])
self.time = scipy.array([i*(1/self.sample_rate) for i in range(len(self.reals))])
def make_plots(self):
# if specified on the command-line, set file pointer
self.hfile.seek(self.sizeof_data*self.start, 1)
self.get_data()
# Subplot for real and imaginary parts of signal
self.sp_iq = self.fig.add_subplot(2,1,1, position=[0.075, 0.14, 0.85, 0.67])
self.sp_iq.set_title(("I&Q"), fontsize=self.title_font_size, fontweight="bold")
self.sp_iq.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_iq.set_ylabel("Amplitude (V)", fontsize=self.label_font_size, fontweight="bold")
self.plot_iq = plot(self.time, self.reals, 'bo-', self.time, self.imags, 'ro-')
self.sp_iq.set_ylim([1.5*min([self.reals.min(), self.imags.min()]),
1.5*max([self.reals.max(), self.imags.max()])])
self.sp_iq.set_xlim(self.time.min(), self.time.max())
draw()
def update_plots(self):
self.plot_iq[0].set_data([self.time, self.reals])
self.plot_iq[1].set_data([self.time, self.imags])
self.sp_iq.set_ylim([1.5*min([self.reals.min(), self.imags.min()]),
1.5*max([self.reals.max(), self.imags.max()])])
self.sp_iq.set_xlim(self.time.min(), self.time.max())
draw()
def click(self, event):
forward_valid_keys = [" ", "down", "right"]
backward_valid_keys = ["up", "left"]
if(find(event.key, forward_valid_keys)):
self.step_forward()
elif(find(event.key, backward_valid_keys)):
self.step_backward()
def button_left_click(self, event):
self.step_backward()
def button_right_click(self, event):
self.step_forward()
def step_forward(self):
self.get_data()
self.update_plots()
def step_backward(self):
# Step back in file position
if(self.hfile.tell() >= 2*self.sizeof_data*self.block_length ):
self.hfile.seek(-2*self.sizeof_data*self.block_length, 1)
else:
self.hfile.seek(-self.hfile.tell(),1)
self.get_data()
self.update_plots()
def find(item_in, list_search):
try:
return list_search.index(item_in) != None
except ValueError:
return False
def main():
usage="%prog: [options] input_filename"
description = "Takes a GNU Radio complex binary file and displays the I&Q data versus time. You can set the block size to specify how many points to read in at a time and the start position in the file. By default, the system assumes a sample rate of 1, so in time, each sample is plotted versus the sample number. To set a true time axis, set the sample rate (-R or --sample-rate) to the sample rate used when capturing the samples."
parser = OptionParser(conflict_handler="resolve", usage=usage, description=description)
parser.add_option("-B", "--block", type="int", default=1000,
help="Specify the block size [default=%default]")
parser.add_option("-s", "--start", type="int", default=0,
help="Specify where to start in the file [default=%default]")
parser.add_option("-R", "--sample-rate", type="float", default=1.0,
help="Set the sampler rate of the data [default=%default]")
(options, args) = parser.parse_args ()
if len(args) != 1:
parser.print_help()
raise SystemExit, 1
filename = args[0]
dc = draw_iq(filename, options)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
mrshu/scikit-learn | benchmarks/bench_random_projections.py | 2 | 8847 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Bench results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
"('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
"('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
akhilaananthram/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/mlab.py | 69 | 104273 | """
Numerical python functions written for compatability with matlab(TM)
commands with the same names.
Matlab(TM) compatible functions
-------------------------------
:func:`cohere`
Coherence (normalized cross spectral density)
:func:`csd`
Cross spectral density uing Welch's average periodogram
:func:`detrend`
Remove the mean or best fit line from an array
:func:`find`
Return the indices where some condition is true;
numpy.nonzero is similar but more general.
:func:`griddata`
interpolate irregularly distributed data to a
regular grid.
:func:`prctile`
find the percentiles of a sequence
:func:`prepca`
Principal Component Analysis
:func:`psd`
Power spectral density uing Welch's average periodogram
:func:`rk4`
A 4th order runge kutta integrator for 1D or ND systems
:func:`specgram`
Spectrogram (power spectral density over segments of time)
Miscellaneous functions
-------------------------
Functions that don't exist in matlab(TM), but are useful anyway:
:meth:`cohere_pairs`
Coherence over all pairs. This is not a matlab function, but we
compute coherence a lot in my lab, and we compute it for a lot of
pairs. This function is optimized to do this efficiently by
caching the direct FFTs.
:meth:`rk4`
A 4th order Runge-Kutta ODE integrator in case you ever find
yourself stranded without scipy (and the far superior
scipy.integrate tools)
record array helper functions
-------------------------------
A collection of helper methods for numpyrecord arrays
.. _htmlonly::
See :ref:`misc-examples-index`
:meth:`rec2txt`
pretty print a record array
:meth:`rec2csv`
store record array in CSV file
:meth:`csv2rec`
import record array from CSV file with type inspection
:meth:`rec_append_fields`
adds field(s)/array(s) to record array
:meth:`rec_drop_fields`
drop fields from record array
:meth:`rec_join`
join two record arrays on sequence of fields
:meth:`rec_groupby`
summarize data by groups (similar to SQL GROUP BY)
:meth:`rec_summarize`
helper code to filter rec array fields into new fields
For the rec viewer functions(e rec2csv), there are a bunch of Format
objects you can pass into the functions that will do things like color
negative values red, set percent formatting and scaling, etc.
Example usage::
r = csv2rec('somefile.csv', checkrows=0)
formatd = dict(
weight = FormatFloat(2),
change = FormatPercent(2),
cost = FormatThousands(2),
)
rec2excel(r, 'test.xls', formatd=formatd)
rec2csv(r, 'test.csv', formatd=formatd)
scroll = rec2gtk(r, formatd=formatd)
win = gtk.Window()
win.set_size_request(600,800)
win.add(scroll)
win.show_all()
gtk.main()
Deprecated functions
---------------------
The following are deprecated; please import directly from numpy (with
care--function signatures may differ):
:meth:`conv`
convolution (numpy.convolve)
:meth:`corrcoef`
The matrix of correlation coefficients
:meth:`hist`
Histogram (numpy.histogram)
:meth:`linspace`
Linear spaced array from min to max
:meth:`load`
load ASCII file - use numpy.loadtxt
:meth:`meshgrid`
Make a 2D grid from 2 1 arrays (numpy.meshgrid)
:meth:`polyfit`
least squares best polynomial fit of x to y (numpy.polyfit)
:meth:`polyval`
evaluate a vector for a vector of polynomial coeffs (numpy.polyval)
:meth:`save`
save ASCII file - use numpy.savetxt
:meth:`trapz`
trapeziodal integration (trapz(x,y) -> numpy.trapz(y,x))
:meth:`vander`
the Vandermonde matrix (numpy.vander)
"""
from __future__ import division
import csv, warnings, copy, os
import numpy as np
ma = np.ma
from matplotlib import verbose
import matplotlib.nxutils as nxutils
import matplotlib.cbook as cbook
# set is a new builtin function in 2.4; delete the following when
# support for 2.3 is dropped.
try:
set
except NameError:
from sets import Set as set
def linspace(*args, **kw):
warnings.warn("use numpy.linspace", DeprecationWarning)
return np.linspace(*args, **kw)
def meshgrid(x,y):
warnings.warn("use numpy.meshgrid", DeprecationWarning)
return np.meshgrid(x,y)
def mean(x, dim=None):
warnings.warn("Use numpy.mean(x) or x.mean()", DeprecationWarning)
if len(x)==0: return None
return np.mean(x, axis=dim)
def logspace(xmin,xmax,N):
return np.exp(np.linspace(np.log(xmin), np.log(xmax), N))
def _norm(x):
"return sqrt(x dot x)"
return np.sqrt(np.dot(x,x))
def window_hanning(x):
"return x times the hanning window of len(x)"
return np.hanning(len(x))*x
def window_none(x):
"No window function; simply return x"
return x
#from numpy import convolve as conv
def conv(x, y, mode=2):
'convolve x with y'
warnings.warn("Use numpy.convolve(x, y, mode='full')", DeprecationWarning)
return np.convolve(x,y,mode)
def detrend(x, key=None):
if key is None or key=='constant':
return detrend_mean(x)
elif key=='linear':
return detrend_linear(x)
def demean(x, axis=0):
"Return x minus its mean along the specified axis"
x = np.asarray(x)
if axis:
ind = [slice(None)] * axis
ind.append(np.newaxis)
return x - x.mean(axis)[ind]
return x - x.mean(axis)
def detrend_mean(x):
"Return x minus the mean(x)"
return x - x.mean()
def detrend_none(x):
"Return x: no detrending"
return x
def detrend_linear(y):
"Return y minus best fit line; 'linear' detrending "
# This is faster than an algorithm based on linalg.lstsq.
x = np.arange(len(y), dtype=np.float_)
C = np.cov(x, y, bias=1)
b = C[0,1]/C[0,0]
a = y.mean() - b*x.mean()
return y - (b*x + a)
#This is a helper function that implements the commonality between the
#psd, csd, and spectrogram. It is *NOT* meant to be used outside of mlab
def _spectral_helper(x, y, NFFT=256, Fs=2, detrend=detrend_none,
window=window_hanning, noverlap=0, pad_to=None, sides='default',
scale_by_freq=None):
#The checks for if y is x are so that we can use the same function to
#implement the core of psd(), csd(), and spectrogram() without doing
#extra calculations. We return the unaveraged Pxy, freqs, and t.
same_data = y is x
#Make sure we're dealing with a numpy array. If y and x were the same
#object to start with, keep them that way
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
# zero pad x and y up to NFFT if they are shorter than NFFT
if len(x)<NFFT:
n = len(x)
x = np.resize(x, (NFFT,))
x[n:] = 0
if not same_data and len(y)<NFFT:
n = len(y)
y = np.resize(y, (NFFT,))
y[n:] = 0
if pad_to is None:
pad_to = NFFT
if scale_by_freq is None:
warnings.warn("psd, csd, and specgram have changed to scale their "
"densities by the sampling frequency for better MatLab "
"compatibility. You can pass scale_by_freq=False to disable "
"this behavior. Also, one-sided densities are scaled by a "
"factor of 2.")
scale_by_freq = True
# For real x, ignore the negative frequencies unless told otherwise
if (sides == 'default' and np.iscomplexobj(x)) or sides == 'twosided':
numFreqs = pad_to
scaling_factor = 1.
elif sides in ('default', 'onesided'):
numFreqs = pad_to//2 + 1
scaling_factor = 2.
else:
raise ValueError("sides must be one of: 'default', 'onesided', or "
"'twosided'")
# Matlab divides by the sampling frequency so that density function
# has units of dB/Hz and can be integrated by the plotted frequency
# values. Perform the same scaling here.
if scale_by_freq:
scaling_factor /= Fs
if cbook.iterable(window):
assert(len(window) == NFFT)
windowVals = window
else:
windowVals = window(np.ones((NFFT,), x.dtype))
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
Pxy = np.zeros((numFreqs,n), np.complex_)
# do the ffts of the slices
for i in range(n):
thisX = x[ind[i]:ind[i]+NFFT]
thisX = windowVals * detrend(thisX)
fx = np.fft.fft(thisX, n=pad_to)
if same_data:
fy = fx
else:
thisY = y[ind[i]:ind[i]+NFFT]
thisY = windowVals * detrend(thisY)
fy = np.fft.fft(thisY, n=pad_to)
Pxy[:,i] = np.conjugate(fx[:numFreqs]) * fy[:numFreqs]
# Scale the spectrum by the norm of the window to compensate for
# windowing loss; see Bendat & Piersol Sec 11.5.2. Also include
# scaling factors for one-sided densities and dividing by the sampling
# frequency, if desired.
Pxy *= scaling_factor / (np.abs(windowVals)**2).sum()
t = 1./Fs * (ind + NFFT / 2.)
freqs = float(Fs) / pad_to * np.arange(numFreqs)
return Pxy, freqs, t
#Split out these keyword docs so that they can be used elsewhere
kwdocd = dict()
kwdocd['PSD'] ="""
Keyword arguments:
*NFFT*: integer
The number of data points used in each block for the FFT.
Must be even; a power 2 is most efficient. The default value is 256.
*Fs*: scalar
The sampling frequency (samples per time unit). It is used
to calculate the Fourier frequencies, freqs, in cycles per time
unit. The default value is 2.
*detrend*: callable
The function applied to each segment before fft-ing,
designed to remove the mean or linear trend. Unlike in
matlab, where the *detrend* parameter is a vector, in
matplotlib is it a function. The :mod:`~matplotlib.pylab`
module defines :func:`~matplotlib.pylab.detrend_none`,
:func:`~matplotlib.pylab.detrend_mean`, and
:func:`~matplotlib.pylab.detrend_linear`, but you can use
a custom function as well.
*window*: callable or ndarray
A function or a vector of length *NFFT*. To create window
vectors see :func:`window_hanning`, :func:`window_none`,
:func:`numpy.blackman`, :func:`numpy.hamming`,
:func:`numpy.bartlett`, :func:`scipy.signal`,
:func:`scipy.signal.get_window`, etc. The default is
:func:`window_hanning`. If a function is passed as the
argument, it must take a data segment as an argument and
return the windowed version of the segment.
*noverlap*: integer
The number of points of overlap between blocks. The default value
is 0 (no overlap).
*pad_to*: integer
The number of points to which the data segment is padded when
performing the FFT. This can be different from *NFFT*, which
specifies the number of data points used. While not increasing
the actual resolution of the psd (the minimum distance between
resolvable peaks), this can give more points in the plot,
allowing for more detail. This corresponds to the *n* parameter
in the call to fft(). The default is None, which sets *pad_to*
equal to *NFFT*
*sides*: [ 'default' | 'onesided' | 'twosided' ]
Specifies which sides of the PSD to return. Default gives the
default behavior, which returns one-sided for real data and both
for complex data. 'onesided' forces the return of a one-sided PSD,
while 'twosided' forces two-sided.
*scale_by_freq*: boolean
Specifies whether the resulting density values should be scaled
by the scaling frequency, which gives density in units of Hz^-1.
This allows for integration over the returned frequency values.
The default is True for MatLab compatibility.
"""
def psd(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The power spectral density by Welch's average periodogram method.
The vector *x* is divided into *NFFT* length blocks. Each block
is detrended by the function *detrend* and windowed by the function
*window*. *noverlap* gives the length of the overlap between blocks.
The absolute(fft(block))**2 of each segment are averaged to compute
*Pxx*, with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT*, it will be zero padded to *NFFT*.
*x*
Array or sequence containing the data
%(PSD)s
Returns the tuple (*Pxx*, *freqs*).
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
"""
Pxx,freqs = csd(x, x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
return Pxx.real,freqs
psd.__doc__ = psd.__doc__ % kwdocd
def csd(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The cross power spectral density by Welch's average periodogram
method. The vectors *x* and *y* are divided into *NFFT* length
blocks. Each block is detrended by the function *detrend* and
windowed by the function *window*. *noverlap* gives the length
of the overlap between blocks. The product of the direct FFTs
of *x* and *y* are averaged over each segment to compute *Pxy*,
with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero
padded to *NFFT*.
*x*, *y*
Array or sequence containing the data
%(PSD)s
Returns the tuple (*Pxy*, *freqs*).
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
"""
Pxy, freqs, t = _spectral_helper(x, y, NFFT, Fs, detrend, window,
noverlap, pad_to, sides, scale_by_freq)
if len(Pxy.shape) == 2 and Pxy.shape[1]>1:
Pxy = Pxy.mean(axis=1)
return Pxy, freqs
csd.__doc__ = csd.__doc__ % kwdocd
def specgram(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=128, pad_to=None, sides='default', scale_by_freq=None):
"""
Compute a spectrogram of data in *x*. Data are split into *NFFT*
length segements and the PSD of each section is computed. The
windowing function *window* is applied to each segment, and the
amount of overlap of each segment is specified with *noverlap*.
If *x* is real (i.e. non-complex) only the spectrum of the positive
frequencie is returned. If *x* is complex then the complete
spectrum is returned.
%(PSD)s
Returns a tuple (*Pxx*, *freqs*, *t*):
- *Pxx*: 2-D array, columns are the periodograms of
successive segments
- *freqs*: 1-D array of frequencies corresponding to the rows
in Pxx
- *t*: 1-D array of times corresponding to midpoints of
segments.
.. seealso::
:func:`psd`:
:func:`psd` differs in the default overlap; in returning
the mean of the segment periodograms; and in not returning
times.
"""
assert(NFFT > noverlap)
Pxx, freqs, t = _spectral_helper(x, x, NFFT, Fs, detrend, window,
noverlap, pad_to, sides, scale_by_freq)
Pxx = Pxx.real #Needed since helper implements generically
if (np.iscomplexobj(x) and sides == 'default') or sides == 'twosided':
# center the frequency range at zero
freqs = np.concatenate((freqs[NFFT/2:]-Fs,freqs[:NFFT/2]))
Pxx = np.concatenate((Pxx[NFFT/2:,:],Pxx[:NFFT/2,:]),0)
return Pxx, freqs, t
specgram.__doc__ = specgram.__doc__ % kwdocd
_coh_error = """Coherence is calculated by averaging over *NFFT*
length segments. Your signal is too short for your choice of *NFFT*.
"""
def cohere(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
*x*, *y*
Array or sequence containing the data
%(PSD)s
The return value is the tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector. For cohere, scaling the
individual densities by the sampling frequency has no effect, since
the factors cancel out.
.. seealso::
:func:`psd` and :func:`csd`:
For information about the methods used to compute
:math:`P_{xy}`, :math:`P_{xx}` and :math:`P_{yy}`.
"""
if len(x)<2*NFFT:
raise ValueError(_coh_error)
Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Cxy = np.divide(np.absolute(Pxy)**2, Pxx*Pyy)
Cxy.shape = (len(f),)
return Cxy, f
cohere.__doc__ = cohere.__doc__ % kwdocd
def corrcoef(*args):
"""
corrcoef(*X*) where *X* is a matrix returns a matrix of correlation
coefficients for the columns of *X*
corrcoef(*x*, *y*) where *x* and *y* are vectors returns the matrix of
correlation coefficients for *x* and *y*.
Numpy arrays can be real or complex.
The correlation matrix is defined from the covariance matrix *C*
as
.. math::
r_{ij} = \\frac{C_{ij}}{\\sqrt{C_{ii}C_{jj}}}
"""
warnings.warn("Use numpy.corrcoef", DeprecationWarning)
kw = dict(rowvar=False)
return np.corrcoef(*args, **kw)
def polyfit(*args, **kwargs):
u"""
polyfit(*x*, *y*, *N*)
Do a best fit polynomial of order *N* of *y* to *x*. Return value
is a vector of polynomial coefficients [pk ... p1 p0]. Eg, for
*N*=2::
p2*x0^2 + p1*x0 + p0 = y1
p2*x1^2 + p1*x1 + p0 = y1
p2*x2^2 + p1*x2 + p0 = y2
.....
p2*xk^2 + p1*xk + p0 = yk
Method: if *X* is a the Vandermonde Matrix computed from *x* (see
`vandermonds
<http://mathworld.wolfram.com/VandermondeMatrix.html>`_), then the
polynomial least squares solution is given by the '*p*' in
X*p = y
where *X* is a (len(*x*) \N{MULTIPLICATION SIGN} *N* + 1) matrix,
*p* is a *N*+1 length vector, and *y* is a (len(*x*)
\N{MULTIPLICATION SIGN} 1) vector.
This equation can be solved as
.. math::
p = (X_t X)^-1 X_t y
where :math:`X_t` is the transpose of *X* and -1 denotes the
inverse. Numerically, however, this is not a good method, so we
use :func:`numpy.linalg.lstsq`.
For more info, see `least squares fitting
<http://mathworld.wolfram.com/LeastSquaresFittingPolynomial.html>`_,
but note that the *k*'s and *n*'s in the superscripts and
subscripts on that page. The linear algebra is correct, however.
.. seealso::
:func:`polyval`
"""
warnings.warn("use numpy.poyfit", DeprecationWarning)
return np.polyfit(*args, **kwargs)
def polyval(*args, **kwargs):
"""
*y* = polyval(*p*, *x*)
*p* is a vector of polynomial coeffients and *y* is the polynomial
evaluated at *x*.
Example code to remove a polynomial (quadratic) trend from y::
p = polyfit(x, y, 2)
trend = polyval(p, x)
resid = y - trend
.. seealso::
:func:`polyfit`
"""
warnings.warn("use numpy.polyval", DeprecationWarning)
return np.polyval(*args, **kwargs)
def vander(*args, **kwargs):
"""
*X* = vander(*x*, *N* = *None*)
The Vandermonde matrix of vector *x*. The *i*-th column of *X* is the
the *i*-th power of *x*. *N* is the maximum power to compute; if *N* is
*None* it defaults to len(*x*).
"""
warnings.warn("Use numpy.vander()", DeprecationWarning)
return np.vander(*args, **kwargs)
def donothing_callback(*args):
pass
def cohere_pairs( X, ij, NFFT=256, Fs=2, detrend=detrend_none,
window=window_hanning, noverlap=0,
preferSpeedOverMemory=True,
progressCallback=donothing_callback,
returnPxx=False):
u"""
Cxy, Phase, freqs = cohere_pairs(X, ij, ...)
Compute the coherence for all pairs in *ij*. *X* is a
(*numSamples*, *numCols*) numpy array. *ij* is a list of tuples
(*i*, *j*). Each tuple is a pair of indexes into the columns of *X*
for which you want to compute coherence. For example, if *X* has 64
columns, and you want to compute all nonredundant pairs, define *ij*
as::
ij = []
for i in range(64):
for j in range(i+1,64):
ij.append( (i, j) )
The other function arguments, except for *preferSpeedOverMemory*
(see below), are explained in the help string of :func:`psd`.
Return value is a tuple (*Cxy*, *Phase*, *freqs*).
- *Cxy*: a dictionary of (*i*, *j*) tuples -> coherence vector for that
pair. I.e., ``Cxy[(i,j)] = cohere(X[:,i], X[:,j])``. Number of
dictionary keys is ``len(ij)``.
- *Phase*: a dictionary of phases of the cross spectral density at
each frequency for each pair. The keys are ``(i,j)``.
- *freqs*: a vector of frequencies, equal in length to either
the coherence or phase vectors for any (*i*, *j*) key.. Eg,
to make a coherence Bode plot::
subplot(211)
plot( freqs, Cxy[(12,19)])
subplot(212)
plot( freqs, Phase[(12,19)])
For a large number of pairs, :func:`cohere_pairs` can be much more
efficient than just calling :func:`cohere` for each pair, because
it caches most of the intensive computations. If *N* is the
number of pairs, this function is O(N) for most of the heavy
lifting, whereas calling cohere for each pair is
O(N\N{SUPERSCRIPT TWO}). However, because of the caching, it is
also more memory intensive, making 2 additional complex arrays
with approximately the same number of elements as *X*.
The parameter *preferSpeedOverMemory*, if *False*, limits the
caching by only making one, rather than two, complex cache arrays.
This is useful if memory becomes critical. Even when
*preferSpeedOverMemory* is *False*, :func:`cohere_pairs` will
still give significant performace gains over calling
:func:`cohere` for each pair, and will use subtantially less
memory than if *preferSpeedOverMemory* is *True*. In my tests
with a (43000, 64) array over all non-redundant pairs,
*preferSpeedOverMemory* = *True* delivered a 33% performace boost
on a 1.7GHZ Athlon with 512MB RAM compared with
*preferSpeedOverMemory* = *False*. But both solutions were more
than 10x faster than naievly crunching all possible pairs through
cohere.
.. seealso::
:file:`test/cohere_pairs_test.py` in the src tree:
For an example script that shows that this
:func:`cohere_pairs` and :func:`cohere` give the same
results for a given pair.
"""
numRows, numCols = X.shape
# zero pad if X is too short
if numRows < NFFT:
tmp = X
X = np.zeros( (NFFT, numCols), X.dtype)
X[:numRows,:] = tmp
del tmp
numRows, numCols = X.shape
# get all the columns of X that we are interested in by checking
# the ij tuples
seen = {}
for i,j in ij:
seen[i]=1; seen[j] = 1
allColumns = seen.keys()
Ncols = len(allColumns)
del seen
# for real X, ignore the negative frequencies
if np.iscomplexobj(X): numFreqs = NFFT
else: numFreqs = NFFT//2+1
# cache the FFT of every windowed, detrended NFFT length segement
# of every channel. If preferSpeedOverMemory, cache the conjugate
# as well
if cbook.iterable(window):
assert(len(window) == NFFT)
windowVals = window
else:
windowVals = window(np.ones((NFFT,), typecode(X)))
ind = range(0, numRows-NFFT+1, NFFT-noverlap)
numSlices = len(ind)
FFTSlices = {}
FFTConjSlices = {}
Pxx = {}
slices = range(numSlices)
normVal = norm(windowVals)**2
for iCol in allColumns:
progressCallback(i/Ncols, 'Cacheing FFTs')
Slices = np.zeros( (numSlices,numFreqs), dtype=np.complex_)
for iSlice in slices:
thisSlice = X[ind[iSlice]:ind[iSlice]+NFFT, iCol]
thisSlice = windowVals*detrend(thisSlice)
Slices[iSlice,:] = fft(thisSlice)[:numFreqs]
FFTSlices[iCol] = Slices
if preferSpeedOverMemory:
FFTConjSlices[iCol] = conjugate(Slices)
Pxx[iCol] = np.divide(np.mean(absolute(Slices)**2), normVal)
del Slices, ind, windowVals
# compute the coherences and phases for all pairs using the
# cached FFTs
Cxy = {}
Phase = {}
count = 0
N = len(ij)
for i,j in ij:
count +=1
if count%10==0:
progressCallback(count/N, 'Computing coherences')
if preferSpeedOverMemory:
Pxy = FFTSlices[i] * FFTConjSlices[j]
else:
Pxy = FFTSlices[i] * np.conjugate(FFTSlices[j])
if numSlices>1: Pxy = np.mean(Pxy)
Pxy = np.divide(Pxy, normVal)
Cxy[(i,j)] = np.divide(np.absolute(Pxy)**2, Pxx[i]*Pxx[j])
Phase[(i,j)] = np.arctan2(Pxy.imag, Pxy.real)
freqs = Fs/NFFT*np.arange(numFreqs)
if returnPxx:
return Cxy, Phase, freqs, Pxx
else:
return Cxy, Phase, freqs
def entropy(y, bins):
r"""
Return the entropy of the data in *y*.
.. math::
\sum p_i \log_2(p_i)
where :math:`p_i` is the probability of observing *y* in the
:math:`i^{th}` bin of *bins*. *bins* can be a number of bins or a
range of bins; see :func:`numpy.histogram`.
Compare *S* with analytic calculation for a Gaussian::
x = mu + sigma * randn(200000)
Sanalytic = 0.5 * ( 1.0 + log(2*pi*sigma**2.0) )
"""
n,bins = np.histogram(y, bins)
n = n.astype(np.float_)
n = np.take(n, np.nonzero(n)[0]) # get the positive
p = np.divide(n, len(y))
delta = bins[1]-bins[0]
S = -1.0*np.sum(p*log(p)) + log(delta)
#S = -1.0*np.sum(p*log(p))
return S
def hist(y, bins=10, normed=0):
"""
Return the histogram of *y* with *bins* equally sized bins. If
bins is an array, use those bins. Return value is (*n*, *x*)
where *n* is the count for each bin in *x*.
If *normed* is *False*, return the counts in the first element of
the returned tuple. If *normed* is *True*, return the probability
density :math:`\\frac{n}{(len(y)\mathrm{dbin}}`.
If *y* has rank > 1, it will be raveled. If *y* is masked, only the
unmasked values will be used.
Credits: the Numeric 22 documentation
"""
warnings.warn("Use numpy.histogram()", DeprecationWarning)
return np.histogram(y, bins=bins, range=None, normed=normed)
def normpdf(x, *args):
"Return the normal pdf evaluated at *x*; args provides *mu*, *sigma*"
mu, sigma = args
return 1./(np.sqrt(2*np.pi)*sigma)*np.exp(-0.5 * (1./sigma*(x - mu))**2)
def levypdf(x, gamma, alpha):
"Returm the levy pdf evaluated at *x* for params *gamma*, *alpha*"
N = len(x)
if N%2 != 0:
raise ValueError, 'x must be an event length array; try\n' + \
'x = np.linspace(minx, maxx, N), where N is even'
dx = x[1]-x[0]
f = 1/(N*dx)*np.arange(-N/2, N/2, np.float_)
ind = np.concatenate([np.arange(N/2, N, int),
np.arange(0, N/2, int)])
df = f[1]-f[0]
cfl = exp(-gamma*np.absolute(2*pi*f)**alpha)
px = np.fft.fft(np.take(cfl,ind)*df).astype(np.float_)
return np.take(px, ind)
def find(condition):
"Return the indices where ravel(condition) is true"
res, = np.nonzero(np.ravel(condition))
return res
def trapz(x, y):
"""
Trapezoidal integral of *y*(*x*).
"""
warnings.warn("Use numpy.trapz(y,x) instead of trapz(x,y)", DeprecationWarning)
return np.trapz(y, x)
#if len(x)!=len(y):
# raise ValueError, 'x and y must have the same length'
#if len(x)<2:
# raise ValueError, 'x and y must have > 1 element'
#return np.sum(0.5*np.diff(x)*(y[1:]+y[:-1]))
def longest_contiguous_ones(x):
"""
Return the indices of the longest stretch of contiguous ones in *x*,
assuming *x* is a vector of zeros and ones. If there are two
equally long stretches, pick the first.
"""
x = np.ravel(x)
if len(x)==0:
return np.array([])
ind = (x==0).nonzero()[0]
if len(ind)==0:
return np.arange(len(x))
if len(ind)==len(x):
return np.array([])
y = np.zeros( (len(x)+2,), x.dtype)
y[1:-1] = x
dif = np.diff(y)
up = (dif == 1).nonzero()[0];
dn = (dif == -1).nonzero()[0];
i = (dn-up == max(dn - up)).nonzero()[0][0]
ind = np.arange(up[i], dn[i])
return ind
def longest_ones(x):
'''alias for longest_contiguous_ones'''
return longest_contiguous_ones(x)
def prepca(P, frac=0):
"""
Compute the principal components of *P*. *P* is a (*numVars*,
*numObs*) array. *frac* is the minimum fraction of variance that a
component must contain to be included.
Return value is a tuple of the form (*Pcomponents*, *Trans*,
*fracVar*) where:
- *Pcomponents* : a (numVars, numObs) array
- *Trans* : the weights matrix, ie, *Pcomponents* = *Trans* *
*P*
- *fracVar* : the fraction of the variance accounted for by each
component returned
A similar function of the same name was in the Matlab (TM)
R13 Neural Network Toolbox but is not found in later versions;
its successor seems to be called "processpcs".
"""
U,s,v = np.linalg.svd(P)
varEach = s**2/P.shape[1]
totVar = varEach.sum()
fracVar = varEach/totVar
ind = slice((fracVar>=frac).sum())
# select the components that are greater
Trans = U[:,ind].transpose()
# The transformed data
Pcomponents = np.dot(Trans,P)
return Pcomponents, Trans, fracVar[ind]
def prctile(x, p = (0.0, 25.0, 50.0, 75.0, 100.0)):
"""
Return the percentiles of *x*. *p* can either be a sequence of
percentile values or a scalar. If *p* is a sequence, the ith
element of the return sequence is the *p*(i)-th percentile of *x*.
If *p* is a scalar, the largest value of *x* less than or equal to
the *p* percentage point in the sequence is returned.
"""
x = np.array(x).ravel() # we need a copy
x.sort()
Nx = len(x)
if not cbook.iterable(p):
return x[int(p*Nx/100.0)]
p = np.asarray(p)* Nx/100.0
ind = p.astype(int)
ind = np.where(ind>=Nx, Nx-1, ind)
return x.take(ind)
def prctile_rank(x, p):
"""
Return the rank for each element in *x*, return the rank
0..len(*p*). Eg if *p* = (25, 50, 75), the return value will be a
len(*x*) array with values in [0,1,2,3] where 0 indicates the
value is less than the 25th percentile, 1 indicates the value is
>= the 25th and < 50th percentile, ... and 3 indicates the value
is above the 75th percentile cutoff.
*p* is either an array of percentiles in [0..100] or a scalar which
indicates how many quantiles of data you want ranked.
"""
if not cbook.iterable(p):
p = np.arange(100.0/p, 100.0, 100.0/p)
else:
p = np.asarray(p)
if p.max()<=1 or p.min()<0 or p.max()>100:
raise ValueError('percentiles should be in range 0..100, not 0..1')
ptiles = prctile(x, p)
return np.searchsorted(ptiles, x)
def center_matrix(M, dim=0):
"""
Return the matrix *M* with each row having zero mean and unit std.
If *dim* = 1 operate on columns instead of rows. (*dim* is
opposite to the numpy axis kwarg.)
"""
M = np.asarray(M, np.float_)
if dim:
M = (M - M.mean(axis=0)) / M.std(axis=0)
else:
M = (M - M.mean(axis=1)[:,np.newaxis])
M = M / M.std(axis=1)[:,np.newaxis]
return M
def rk4(derivs, y0, t):
"""
Integrate 1D or ND system of ODEs using 4-th order Runge-Kutta.
This is a toy implementation which may be useful if you find
yourself stranded on a system w/o scipy. Otherwise use
:func:`scipy.integrate`.
*y0*
initial state vector
*t*
sample times
*derivs*
returns the derivative of the system and has the
signature ``dy = derivs(yi, ti)``
Example 1 ::
## 2D system
def derivs6(x,t):
d1 = x[0] + 2*x[1]
d2 = -3*x[0] + 4*x[1]
return (d1, d2)
dt = 0.0005
t = arange(0.0, 2.0, dt)
y0 = (1,2)
yout = rk4(derivs6, y0, t)
Example 2::
## 1D system
alpha = 2
def derivs(x,t):
return -alpha*x + exp(-t)
y0 = 1
yout = rk4(derivs, y0, t)
If you have access to scipy, you should probably be using the
scipy.integrate tools rather than this function.
"""
try: Ny = len(y0)
except TypeError:
yout = np.zeros( (len(t),), np.float_)
else:
yout = np.zeros( (len(t), Ny), np.float_)
yout[0] = y0
i = 0
for i in np.arange(len(t)-1):
thist = t[i]
dt = t[i+1] - thist
dt2 = dt/2.0
y0 = yout[i]
k1 = np.asarray(derivs(y0, thist))
k2 = np.asarray(derivs(y0 + dt2*k1, thist+dt2))
k3 = np.asarray(derivs(y0 + dt2*k2, thist+dt2))
k4 = np.asarray(derivs(y0 + dt*k3, thist+dt))
yout[i+1] = y0 + dt/6.0*(k1 + 2*k2 + 2*k3 + k4)
return yout
def bivariate_normal(X, Y, sigmax=1.0, sigmay=1.0,
mux=0.0, muy=0.0, sigmaxy=0.0):
"""
Bivariate Gaussian distribution for equal shape *X*, *Y*.
See `bivariate normal
<http://mathworld.wolfram.com/BivariateNormalDistribution.html>`_
at mathworld.
"""
Xmu = X-mux
Ymu = Y-muy
rho = sigmaxy/(sigmax*sigmay)
z = Xmu**2/sigmax**2 + Ymu**2/sigmay**2 - 2*rho*Xmu*Ymu/(sigmax*sigmay)
denom = 2*np.pi*sigmax*sigmay*np.sqrt(1-rho**2)
return np.exp( -z/(2*(1-rho**2))) / denom
def get_xyz_where(Z, Cond):
"""
*Z* and *Cond* are *M* x *N* matrices. *Z* are data and *Cond* is
a boolean matrix where some condition is satisfied. Return value
is (*x*, *y*, *z*) where *x* and *y* are the indices into *Z* and
*z* are the values of *Z* at those indices. *x*, *y*, and *z* are
1D arrays.
"""
X,Y = np.indices(Z.shape)
return X[Cond], Y[Cond], Z[Cond]
def get_sparse_matrix(M,N,frac=0.1):
"""
Return a *M* x *N* sparse matrix with *frac* elements randomly
filled.
"""
data = np.zeros((M,N))*0.
for i in range(int(M*N*frac)):
x = np.random.randint(0,M-1)
y = np.random.randint(0,N-1)
data[x,y] = np.random.rand()
return data
def dist(x,y):
"""
Return the distance between two points.
"""
d = x-y
return np.sqrt(np.dot(d,d))
def dist_point_to_segment(p, s0, s1):
"""
Get the distance of a point to a segment.
*p*, *s0*, *s1* are *xy* sequences
This algorithm from
http://softsurfer.com/Archive/algorithm_0102/algorithm_0102.htm#Distance%20to%20Ray%20or%20Segment
"""
p = np.asarray(p, np.float_)
s0 = np.asarray(s0, np.float_)
s1 = np.asarray(s1, np.float_)
v = s1 - s0
w = p - s0
c1 = np.dot(w,v);
if ( c1 <= 0 ):
return dist(p, s0);
c2 = np.dot(v,v)
if ( c2 <= c1 ):
return dist(p, s1);
b = c1 / c2
pb = s0 + b * v;
return dist(p, pb)
def segments_intersect(s1, s2):
"""
Return *True* if *s1* and *s2* intersect.
*s1* and *s2* are defined as::
s1: (x1, y1), (x2, y2)
s2: (x3, y3), (x4, y4)
"""
(x1, y1), (x2, y2) = s1
(x3, y3), (x4, y4) = s2
den = ((y4-y3) * (x2-x1)) - ((x4-x3)*(y2-y1))
n1 = ((x4-x3) * (y1-y3)) - ((y4-y3)*(x1-x3))
n2 = ((x2-x1) * (y1-y3)) - ((y2-y1)*(x1-x3))
if den == 0:
# lines parallel
return False
u1 = n1/den
u2 = n2/den
return 0.0 <= u1 <= 1.0 and 0.0 <= u2 <= 1.0
def fftsurr(x, detrend=detrend_none, window=window_none):
"""
Compute an FFT phase randomized surrogate of *x*.
"""
if cbook.iterable(window):
x=window*detrend(x)
else:
x = window(detrend(x))
z = np.fft.fft(x)
a = 2.*np.pi*1j
phase = a * np.random.rand(len(x))
z = z*np.exp(phase)
return np.fft.ifft(z).real
def liaupunov(x, fprime):
"""
*x* is a very long trajectory from a map, and *fprime* returns the
derivative of *x*.
Returns :
.. math::
\lambda = \\frac{1}{n}\\sum \\ln|f^'(x_i)|
.. seealso::
Sec 10.5 Strogatz (1994) "Nonlinear Dynamics and Chaos".
`Wikipedia article on Lyapunov Exponent
<http://en.wikipedia.org/wiki/Lyapunov_exponent>`_.
.. note::
What the function here calculates may not be what you really want;
*caveat emptor*.
It also seems that this function's name is badly misspelled.
"""
return np.mean(np.log(np.absolute(fprime(x))))
class FIFOBuffer:
"""
A FIFO queue to hold incoming *x*, *y* data in a rotating buffer
using numpy arrays under the hood. It is assumed that you will
call asarrays much less frequently than you add data to the queue
-- otherwise another data structure will be faster.
This can be used to support plots where data is added from a real
time feed and the plot object wants to grab data from the buffer
and plot it to screen less freqeuently than the incoming.
If you set the *dataLim* attr to
:class:`~matplotlib.transforms.BBox` (eg
:attr:`matplotlib.Axes.dataLim`), the *dataLim* will be updated as
new data come in.
TODO: add a grow method that will extend nmax
.. note::
mlab seems like the wrong place for this class.
"""
def __init__(self, nmax):
"""
Buffer up to *nmax* points.
"""
self._xa = np.zeros((nmax,), np.float_)
self._ya = np.zeros((nmax,), np.float_)
self._xs = np.zeros((nmax,), np.float_)
self._ys = np.zeros((nmax,), np.float_)
self._ind = 0
self._nmax = nmax
self.dataLim = None
self.callbackd = {}
def register(self, func, N):
"""
Call *func* every time *N* events are passed; *func* signature
is ``func(fifo)``.
"""
self.callbackd.setdefault(N, []).append(func)
def add(self, x, y):
"""
Add scalar *x* and *y* to the queue.
"""
if self.dataLim is not None:
xys = ((x,y),)
self.dataLim.update(xys, -1) #-1 means use the default ignore setting
ind = self._ind % self._nmax
#print 'adding to fifo:', ind, x, y
self._xs[ind] = x
self._ys[ind] = y
for N,funcs in self.callbackd.items():
if (self._ind%N)==0:
for func in funcs:
func(self)
self._ind += 1
def last(self):
"""
Get the last *x*, *y* or *None*. *None* if no data set.
"""
if self._ind==0: return None, None
ind = (self._ind-1) % self._nmax
return self._xs[ind], self._ys[ind]
def asarrays(self):
"""
Return *x* and *y* as arrays; their length will be the len of
data added or *nmax*.
"""
if self._ind<self._nmax:
return self._xs[:self._ind], self._ys[:self._ind]
ind = self._ind % self._nmax
self._xa[:self._nmax-ind] = self._xs[ind:]
self._xa[self._nmax-ind:] = self._xs[:ind]
self._ya[:self._nmax-ind] = self._ys[ind:]
self._ya[self._nmax-ind:] = self._ys[:ind]
return self._xa, self._ya
def update_datalim_to_current(self):
"""
Update the *datalim* in the current data in the fifo.
"""
if self.dataLim is None:
raise ValueError('You must first set the dataLim attr')
x, y = self.asarrays()
self.dataLim.update_numerix(x, y, True)
def movavg(x,n):
"""
Compute the len(*n*) moving average of *x*.
"""
w = np.empty((n,), dtype=np.float_)
w[:] = 1.0/n
return np.convolve(x, w, mode='valid')
def save(fname, X, fmt='%.18e',delimiter=' '):
"""
Save the data in *X* to file *fname* using *fmt* string to convert the
data to strings.
*fname* can be a filename or a file handle. If the filename ends
in '.gz', the file is automatically saved in compressed gzip
format. The :func:`load` function understands gzipped files
transparently.
Example usage::
save('test.out', X) # X is an array
save('test1.out', (x,y,z)) # x,y,z equal sized 1D arrays
save('test2.out', x) # x is 1D
save('test3.out', x, fmt='%1.4e') # use exponential notation
*delimiter* is used to separate the fields, eg. *delimiter* ','
for comma-separated values.
"""
if cbook.is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname,'wb')
else:
fh = file(fname,'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = np.asarray(X)
origShape = None
if X.ndim == 1:
origShape = X.shape
X.shape = len(X), 1
for row in X:
fh.write(delimiter.join([fmt%val for val in row]) + '\n')
if origShape is not None:
X.shape = origShape
def load(fname,comments='#',delimiter=None, converters=None,skiprows=0,
usecols=None, unpack=False, dtype=np.float_):
"""
Load ASCII data from *fname* into an array and return the array.
The data must be regular, same number of values in every row
*fname* can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'.
matfile data is not supported; for that, use :mod:`scipy.io.mio`
module.
Example usage::
X = load('test.dat') # data in two columns
t = X[:,0]
y = X[:,1]
Alternatively, you can do the same with "unpack"; see below::
X = load('test.dat') # a matrix of data
x = load('test.dat') # a single column of data
- *comments*: the character used to indicate the start of a comment
in the file
- *delimiter* is a string-like character used to seperate values
in the file. If *delimiter* is unspecified or *None*, any
whitespace string is a separator.
- *converters*, if not *None*, is a dictionary mapping column number to
a function that will convert that column to a float (or the optional
*dtype* if specified). Eg, if column 0 is a date string::
converters = {0:datestr2num}
- *skiprows* is the number of rows from the top to skip.
- *usecols*, if not *None*, is a sequence of integer column indexes to
extract where 0 is the first column, eg ``usecols=[1,4,5]`` to extract
just the 2nd, 5th and 6th columns
- *unpack*, if *True*, will transpose the matrix allowing you to unpack
into named arguments on the left hand side::
t,y = load('test.dat', unpack=True) # for two column data
x,y,z = load('somefile.dat', usecols=[3,5,7], unpack=True)
- *dtype*: the array will have this dtype. default: ``numpy.float_``
.. seealso::
See :file:`examples/pylab_examples/load_converter.py` in the source tree:
Exercises many of these options.
"""
if converters is None: converters = {}
fh = cbook.to_filehandle(fname)
X = []
if delimiter==' ':
# space splitting is a special case since x.split() is what
# you want, not x.split(' ')
def splitfunc(x):
return x.split()
else:
def splitfunc(x):
return x.split(delimiter)
converterseq = None
for i,line in enumerate(fh):
if i<skiprows: continue
line = line.split(comments, 1)[0].strip()
if not len(line): continue
if converterseq is None:
converterseq = [converters.get(j,float)
for j,val in enumerate(splitfunc(line))]
if usecols is not None:
vals = splitfunc(line)
row = [converterseq[j](vals[j]) for j in usecols]
else:
row = [converterseq[j](val)
for j,val in enumerate(splitfunc(line))]
thisLen = len(row)
X.append(row)
X = np.array(X, dtype)
r,c = X.shape
if r==1 or c==1:
X.shape = max(r,c),
if unpack: return X.transpose()
else: return X
def slopes(x,y):
"""
SLOPES calculate the slope y'(x) Given data vectors X and Y SLOPES
calculates Y'(X), i.e the slope of a curve Y(X). The slope is
estimated using the slope obtained from that of a parabola through
any three consecutive points.
This method should be superior to that described in the appendix
of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel
W. Stineman (Creative Computing July 1980) in at least one aspect:
Circles for interpolation demand a known aspect ratio between x-
and y-values. For many functions, however, the abscissa are given
in different dimensions, so an aspect ratio is completely
arbitrary.
The parabola method gives very similar results to the circle
method for most regular cases but behaves much better in special
cases
Norbert Nemec, Institute of Theoretical Physics, University or
Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de
(inspired by a original implementation by Halldor Bjornsson,
Icelandic Meteorological Office, March 2006 halldor at vedur.is)
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
yp=np.zeros(y.shape, np.float_)
dx=x[1:] - x[:-1]
dy=y[1:] - y[:-1]
dydx = dy/dx
yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1])
yp[0] = 2.0 * dy[0]/dx[0] - yp[1]
yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2]
return yp
def stineman_interp(xi,x,y,yp=None):
"""
STINEMAN_INTERP Well behaved data interpolation. Given data
vectors X and Y, the slope vector YP and a new abscissa vector XI
the function stineman_interp(xi,x,y,yp) uses Stineman
interpolation to calculate a vector YI corresponding to XI.
Here's an example that generates a coarse sine curve, then
interpolates over a finer abscissa:
x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)
xi = linspace(0,2*pi,40);
yi = stineman_interp(xi,x,y,yp);
plot(x,y,'o',xi,yi)
The interpolation method is described in the article A
CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
W. Stineman. The article appeared in the July 1980 issue of
Creative Computing with a note from the editor stating that while
they were
not an academic journal but once in a while something serious
and original comes in adding that this was
"apparently a real solution" to a well known problem.
For yp=None, the routine automatically determines the slopes using
the "slopes" routine.
X is assumed to be sorted in increasing order
For values xi[j] < x[0] or xi[j] > x[-1], the routine tries a
extrapolation. The relevance of the data obtained from this, of
course, questionable...
original implementation by Halldor Bjornsson, Icelandic
Meteorolocial Office, March 2006 halldor at vedur.is
completely reworked and optimized for Python by Norbert Nemec,
Institute of Theoretical Physics, University or Regensburg, April
2006 Norbert.Nemec at physik.uni-regensburg.de
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
assert x.shape == y.shape
N=len(y)
if yp is None:
yp = slopes(x,y)
else:
yp=np.asarray(yp, np.float_)
xi=np.asarray(xi, np.float_)
yi=np.zeros(xi.shape, np.float_)
# calculate linear slopes
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
s = dy/dx #note length of s is N-1 so last element is #N-2
# find the segment each xi is in
# this line actually is the key to the efficiency of this implementation
idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1]
# the y-values that would come out from a linear interpolation:
sidx = s.take(idx)
xidx = x.take(idx)
yidx = y.take(idx)
xidxp1 = x.take(idx+1)
yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx)- sidx) * (xi - xidx) # using the yp slope of the left point
dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1) # using the yp slope of the right point
dy1dy2 = dy1*dy2
# The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand
# in Python
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1,
((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),
0.0,
1/(dy1+dy2),))
return yi
def inside_poly(points, verts):
"""
points is a sequence of x,y points
verts is a sequence of x,y vertices of a poygon
return value is a sequence of indices into points for the points
that are inside the polygon
"""
res, = np.nonzero(nxutils.points_inside_poly(points, verts))
return res
def poly_below(ymin, xs, ys):
"""
given a arrays *xs* and *ys*, return the vertices of a polygon
that has a scalar lower bound *ymin* and an upper bound at the *ys*.
intended for use with Axes.fill, eg::
xv, yv = poly_below(0, x, y)
ax.fill(xv, yv)
"""
return poly_between(xs, ys, xmin)
def poly_between(x, ylower, yupper):
"""
given a sequence of x, ylower and yupper, return the polygon that
fills the regions between them. ylower or yupper can be scalar or
iterable. If they are iterable, they must be equal in length to x
return value is x, y arrays for use with Axes.fill
"""
Nx = len(x)
if not cbook.iterable(ylower):
ylower = ylower*np.ones(Nx)
if not cbook.iterable(yupper):
yupper = yupper*np.ones(Nx)
x = np.concatenate( (x, x[::-1]) )
y = np.concatenate( (yupper, ylower[::-1]) )
return x,y
### the following code was written and submitted by Fernando Perez
### from the ipython numutils package under a BSD license
# begin fperez functions
"""
A set of convenient utilities for numerical work.
Most of this module requires numpy or is meant to be used with it.
Copyright (c) 2001-2004, Fernando Perez. <Fernando.Perez@colorado.edu>
All rights reserved.
This license was generated from the BSD license template as found in:
http://www.opensource.org/licenses/bsd-license.php
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the IPython project nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import operator
import math
#*****************************************************************************
# Globals
#****************************************************************************
# function definitions
exp_safe_MIN = math.log(2.2250738585072014e-308)
exp_safe_MAX = 1.7976931348623157e+308
def exp_safe(x):
"""
Compute exponentials which safely underflow to zero.
Slow, but convenient to use. Note that numpy provides proper
floating point exception handling with access to the underlying
hardware.
"""
if type(x) is np.ndarray:
return exp(np.clip(x,exp_safe_MIN,exp_safe_MAX))
else:
return math.exp(x)
def amap(fn,*args):
"""
amap(function, sequence[, sequence, ...]) -> array.
Works like :func:`map`, but it returns an array. This is just a
convenient shorthand for ``numpy.array(map(...))``.
"""
return np.array(map(fn,*args))
#from numpy import zeros_like
def zeros_like(a):
"""
Return an array of zeros of the shape and typecode of *a*.
"""
warnings.warn("Use numpy.zeros_like(a)", DeprecationWarning)
return np.zeros_like(a)
#from numpy import sum as sum_flat
def sum_flat(a):
"""
Return the sum of all the elements of *a*, flattened out.
It uses ``a.flat``, and if *a* is not contiguous, a call to
``ravel(a)`` is made.
"""
warnings.warn("Use numpy.sum(a) or a.sum()", DeprecationWarning)
return np.sum(a)
#from numpy import mean as mean_flat
def mean_flat(a):
"""
Return the mean of all the elements of *a*, flattened out.
"""
warnings.warn("Use numpy.mean(a) or a.mean()", DeprecationWarning)
return np.mean(a)
def rms_flat(a):
"""
Return the root mean square of all the elements of *a*, flattened out.
"""
return np.sqrt(np.mean(np.absolute(a)**2))
def l1norm(a):
"""
Return the *l1* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sum(np.absolute(a))
def l2norm(a):
"""
Return the *l2* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sqrt(np.sum(np.absolute(a)**2))
def norm_flat(a,p=2):
"""
norm(a,p=2) -> l-p norm of a.flat
Return the l-p norm of *a*, considered as a flat array. This is NOT a true
matrix norm, since arrays of arbitrary rank are always flattened.
*p* can be a number or the string 'Infinity' to get the L-infinity norm.
"""
# This function was being masked by a more general norm later in
# the file. We may want to simply delete it.
if p=='Infinity':
return np.amax(np.absolute(a))
else:
return (np.sum(np.absolute(a)**p))**(1.0/p)
def frange(xini,xfin=None,delta=None,**kw):
"""
frange([start,] stop[, step, keywords]) -> array of floats
Return a numpy ndarray containing a progression of floats. Similar to
:func:`numpy.arange`, but defaults to a closed interval.
``frange(x0, x1)`` returns ``[x0, x0+1, x0+2, ..., x1]``; *start*
defaults to 0, and the endpoint *is included*. This behavior is
different from that of :func:`range` and
:func:`numpy.arange`. This is deliberate, since :func:`frange`
will probably be more useful for generating lists of points for
function evaluation, and endpoints are often desired in this
use. The usual behavior of :func:`range` can be obtained by
setting the keyword *closed* = 0, in this case, :func:`frange`
basically becomes :func:numpy.arange`.
When *step* is given, it specifies the increment (or
decrement). All arguments can be floating point numbers.
``frange(x0,x1,d)`` returns ``[x0,x0+d,x0+2d,...,xfin]`` where
*xfin* <= *x1*.
:func:`frange` can also be called with the keyword *npts*. This
sets the number of points the list should contain (and overrides
the value *step* might have been given). :func:`numpy.arange`
doesn't offer this option.
Examples::
>>> frange(3)
array([ 0., 1., 2., 3.])
>>> frange(3,closed=0)
array([ 0., 1., 2.])
>>> frange(1,6,2)
array([1, 3, 5]) or 1,3,5,7, depending on floating point vagueries
>>> frange(1,6.5,npts=5)
array([ 1. , 2.375, 3.75 , 5.125, 6.5 ])
"""
#defaults
kw.setdefault('closed',1)
endpoint = kw['closed'] != 0
# funny logic to allow the *first* argument to be optional (like range())
# This was modified with a simpler version from a similar frange() found
# at http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66472
if xfin == None:
xfin = xini + 0.0
xini = 0.0
if delta == None:
delta = 1.0
# compute # of points, spacing and return final list
try:
npts=kw['npts']
delta=(xfin-xini)/float(npts-endpoint)
except KeyError:
npts = int(round((xfin-xini)/delta)) + endpoint
#npts = int(floor((xfin-xini)/delta)*(1.0+1e-10)) + endpoint
# round finds the nearest, so the endpoint can be up to
# delta/2 larger than xfin.
return np.arange(npts)*delta+xini
# end frange()
#import numpy.diag as diagonal_matrix
def diagonal_matrix(diag):
"""
Return square diagonal matrix whose non-zero elements are given by the
input array.
"""
warnings.warn("Use numpy.diag(d)", DeprecationWarning)
return np.diag(diag)
def identity(n, rank=2, dtype='l', typecode=None):
"""
Returns the identity matrix of shape (*n*, *n*, ..., *n*) (rank *r*).
For ranks higher than 2, this object is simply a multi-index Kronecker
delta::
/ 1 if i0=i1=...=iR,
id[i0,i1,...,iR] = -|
\ 0 otherwise.
Optionally a *dtype* (or typecode) may be given (it defaults to 'l').
Since rank defaults to 2, this function behaves in the default case (when
only *n* is given) like ``numpy.identity(n)`` -- but surprisingly, it is
much faster.
"""
if typecode is not None:
warnings.warn("Use dtype kwarg instead of typecode",
DeprecationWarning)
dtype = typecode
iden = np.zeros((n,)*rank, dtype)
for i in range(n):
idx = (i,)*rank
iden[idx] = 1
return iden
def base_repr (number, base = 2, padding = 0):
"""
Return the representation of a *number* in any given *base*.
"""
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if number < base: \
return (padding - 1) * chars [0] + chars [int (number)]
max_exponent = int (math.log (number)/math.log (base))
max_power = long (base) ** max_exponent
lead_digit = int (number/max_power)
return chars [lead_digit] + \
base_repr (number - max_power * lead_digit, base, \
max (padding - 1, max_exponent))
def binary_repr(number, max_length = 1025):
"""
Return the binary representation of the input *number* as a
string.
This is more efficient than using :func:`base_repr` with base 2.
Increase the value of max_length for very large numbers. Note that
on 32-bit machines, 2**1023 is the largest integer power of 2
which can be converted to a Python float.
"""
#assert number < 2L << max_length
shifts = map (operator.rshift, max_length * [number], \
range (max_length - 1, -1, -1))
digits = map (operator.mod, shifts, max_length * [2])
if not digits.count (1): return 0
digits = digits [digits.index (1):]
return ''.join (map (repr, digits)).replace('L','')
def log2(x,ln2 = math.log(2.0)):
"""
Return the log(*x*) in base 2.
This is a _slow_ function but which is guaranteed to return the correct
integer value if the input is an integer exact power of 2.
"""
try:
bin_n = binary_repr(x)[1:]
except (AssertionError,TypeError):
return math.log(x)/ln2
else:
if '1' in bin_n:
return math.log(x)/ln2
else:
return len(bin_n)
def ispower2(n):
"""
Returns the log base 2 of *n* if *n* is a power of 2, zero otherwise.
Note the potential ambiguity if *n* == 1: 2**0 == 1, interpret accordingly.
"""
bin_n = binary_repr(n)[1:]
if '1' in bin_n:
return 0
else:
return len(bin_n)
def isvector(X):
"""
Like the Matlab (TM) function with the same name, returns *True*
if the supplied numpy array or matrix *X* looks like a vector,
meaning it has a one non-singleton axis (i.e., it can have
multiple axes, but all must have length 1, except for one of
them).
If you just want to see if the array has 1 axis, use X.ndim == 1.
"""
return np.prod(X.shape)==np.max(X.shape)
#from numpy import fromfunction as fromfunction_kw
def fromfunction_kw(function, dimensions, **kwargs):
"""
Drop-in replacement for :func:`numpy.fromfunction`.
Allows passing keyword arguments to the desired function.
Call it as (keywords are optional)::
fromfunction_kw(MyFunction, dimensions, keywords)
The function ``MyFunction`` is responsible for handling the
dictionary of keywords it will receive.
"""
warnings.warn("Use numpy.fromfunction()", DeprecationWarning)
return np.fromfunction(function, dimensions, **kwargs)
### end fperez numutils code
def rem(x,y):
"""
Deprecated - see :func:`numpy.remainder`
"""
raise NotImplementedError('Deprecated - see numpy.remainder')
def norm(x,y=2):
"""
Deprecated - see :func:`numpy.linalg.norm`
"""
raise NotImplementedError('Deprecated - see numpy.linalg.norm')
def orth(A):
"""
Deprecated - needs clean room implementation
"""
raise NotImplementedError('Deprecated - needs clean room implementation')
def rank(x):
"""
Deprecated - see :func:`numpy.rank`
"""
raise NotImplementedError('Deprecated - see numpy.rank')
def sqrtm(x):
"""
Deprecated - needs clean room implementation
"""
raise NotImplementedError('Deprecated - see scipy.linalg.sqrtm')
def mfuncC(f, x):
"""
Deprecated
"""
raise NotImplementedError('Deprecated - needs clean room implementation')
def approx_real(x):
"""
Deprecated - needs clean room implementation
"""
raise NotImplementedError('Deprecated - needs clean room implementation')
#helpers for loading, saving, manipulating and viewing numpy record arrays
def safe_isnan(x):
':func:`numpy.isnan` for arbitrary types'
if cbook.is_string_like(x):
return False
try: b = np.isnan(x)
except NotImplementedError: return False
except TypeError: return False
else: return b
def safe_isinf(x):
':func:`numpy.isinf` for arbitrary types'
if cbook.is_string_like(x):
return False
try: b = np.isinf(x)
except NotImplementedError: return False
except TypeError: return False
else: return b
def rec_view(rec):
"""
Return a view of an ndarray as a recarray
.. seealso::
http://projects.scipy.org/pipermail/numpy-discussion/2008-August/036429.html
"""
return rec.view(np.recarray)
#return rec.view(dtype=(np.record, rec.dtype), type=np.recarray)
def rec_append_field(rec, name, arr, dtype=None):
"""
Return a new record array with field name populated with data from
array *arr*. This function is Deprecated. Please use
:func:`rec_append_fields`.
"""
warnings.warn("use rec_append_fields", DeprecationWarning)
return rec_append_fields(rec, name, arr, dtype)
def rec_append_fields(rec, names, arrs, dtypes=None):
"""
Return a new record array with field names populated with data
from arrays in *arrs*. If appending a single field, then *names*,
*arrs* and *dtypes* do not have to be lists. They can just be the
values themselves.
"""
if (not cbook.is_string_like(names) and cbook.iterable(names) \
and len(names) and cbook.is_string_like(names[0])):
if len(names) != len(arrs):
raise ValueError, "number of arrays do not match number of names"
else: # we have only 1 name and 1 array
names = [names]
arrs = [arrs]
arrs = map(np.asarray, arrs)
if dtypes is None:
dtypes = [a.dtype for a in arrs]
elif not cbook.iterable(dtypes):
dtypes = [dtypes]
if len(arrs) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(arrs)
else:
raise ValueError, "dtypes must be None, a single dtype or a list"
newdtype = np.dtype(rec.dtype.descr + zip(names, dtypes))
newrec = np.empty(rec.shape, dtype=newdtype)
for field in rec.dtype.fields:
newrec[field] = rec[field]
for name, arr in zip(names, arrs):
newrec[name] = arr
return rec_view(newrec)
def rec_drop_fields(rec, names):
"""
Return a new numpy record array with fields in *names* dropped.
"""
names = set(names)
Nr = len(rec)
newdtype = np.dtype([(name, rec.dtype[name]) for name in rec.dtype.names
if name not in names])
newrec = np.empty(Nr, dtype=newdtype)
for field in newdtype.names:
newrec[field] = rec[field]
return rec_view(newrec)
def rec_groupby(r, groupby, stats):
"""
*r* is a numpy record array
*groupby* is a sequence of record array attribute names that
together form the grouping key. eg ('date', 'productcode')
*stats* is a sequence of (*attr*, *func*, *outname*) tuples which
will call ``x = func(attr)`` and assign *x* to the record array
output with attribute *outname*. For example::
stats = ( ('sales', len, 'numsales'), ('sales', np.mean, 'avgsale') )
Return record array has *dtype* names for each attribute name in
the the *groupby* argument, with the associated group values, and
for each outname name in the *stats* argument, with the associated
stat summary output.
"""
# build a dictionary from groupby keys-> list of indices into r with
# those keys
rowd = dict()
for i, row in enumerate(r):
key = tuple([row[attr] for attr in groupby])
rowd.setdefault(key, []).append(i)
# sort the output by groupby keys
keys = rowd.keys()
keys.sort()
rows = []
for key in keys:
row = list(key)
# get the indices for this groupby key
ind = rowd[key]
thisr = r[ind]
# call each stat function for this groupby slice
row.extend([func(thisr[attr]) for attr, func, outname in stats])
rows.append(row)
# build the output record array with groupby and outname attributes
attrs, funcs, outnames = zip(*stats)
names = list(groupby)
names.extend(outnames)
return np.rec.fromrecords(rows, names=names)
def rec_summarize(r, summaryfuncs):
"""
*r* is a numpy record array
*summaryfuncs* is a list of (*attr*, *func*, *outname*) tuples
which will apply *func* to the the array *r*[attr] and assign the
output to a new attribute name *outname*. The returned record
array is identical to *r*, with extra arrays for each element in
*summaryfuncs*.
"""
names = list(r.dtype.names)
arrays = [r[name] for name in names]
for attr, func, outname in summaryfuncs:
names.append(outname)
arrays.append(np.asarray(func(r[attr])))
return np.rec.fromarrays(arrays, names=names)
def rec_join(key, r1, r2, jointype='inner', defaults=None, r1postfix='1', r2postfix='2'):
"""
Join record arrays *r1* and *r2* on *key*; *key* is a tuple of
field names -- if *key* is a string it is assumed to be a single
attribute name. If *r1* and *r2* have equal values on all the keys
in the *key* tuple, then their fields will be merged into a new
record array containing the intersection of the fields of *r1* and
*r2*.
*r1* (also *r2*) must not have any duplicate keys.
The *jointype* keyword can be 'inner', 'outer', 'leftouter'. To
do a rightouter join just reverse *r1* and *r2*.
The *defaults* keyword is a dictionary filled with
``{column_name:default_value}`` pairs.
The keywords *r1postfix* and *r2postfix* are postfixed to column names
(other than keys) that are both in *r1* and *r2*.
"""
if cbook.is_string_like(key):
key = (key, )
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s'%name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s'%name)
def makekey(row):
return tuple([row[name] for name in key])
r1d = dict([(makekey(row),i) for i,row in enumerate(r1)])
r2d = dict([(makekey(row),i) for i,row in enumerate(r2)])
r1keys = set(r1d.keys())
r2keys = set(r2d.keys())
common_keys = r1keys & r2keys
r1ind = np.array([r1d[k] for k in common_keys])
r2ind = np.array([r2d[k] for k in common_keys])
common_len = len(common_keys)
left_len = right_len = 0
if jointype == "outer" or jointype == "leftouter":
left_keys = r1keys.difference(r2keys)
left_ind = np.array([r1d[k] for k in left_keys])
left_len = len(left_ind)
if jointype == "outer":
right_keys = r2keys.difference(r1keys)
right_ind = np.array([r2d[k] for k in right_keys])
right_len = len(right_ind)
def key_desc(name):
'if name is a string key, use the larger size of r1 or r2 before merging'
dt1 = r1.dtype[name]
if dt1.type != np.string_:
return (name, dt1.descr[0][1])
dt2 = r1.dtype[name]
assert dt2==dt1
if dt1.num>dt2.num:
return (name, dt1.descr[0][1])
else:
return (name, dt2.descr[0][1])
keydesc = [key_desc(name) for name in key]
def mapped_r1field(name):
"""
The column name in *newrec* that corresponds to the column in *r1*.
"""
if name in key or name not in r2.dtype.names: return name
else: return name + r1postfix
def mapped_r2field(name):
"""
The column name in *newrec* that corresponds to the column in *r2*.
"""
if name in key or name not in r1.dtype.names: return name
else: return name + r2postfix
r1desc = [(mapped_r1field(desc[0]), desc[1]) for desc in r1.dtype.descr if desc[0] not in key]
r2desc = [(mapped_r2field(desc[0]), desc[1]) for desc in r2.dtype.descr if desc[0] not in key]
newdtype = np.dtype(keydesc + r1desc + r2desc)
newrec = np.empty(common_len + left_len + right_len, dtype=newdtype)
if jointype != 'inner' and defaults is not None: # fill in the defaults enmasse
newrec_fields = newrec.dtype.fields.keys()
for k, v in defaults.items():
if k in newrec_fields:
newrec[k] = v
for field in r1.dtype.names:
newfield = mapped_r1field(field)
if common_len:
newrec[newfield][:common_len] = r1[field][r1ind]
if (jointype == "outer" or jointype == "leftouter") and left_len:
newrec[newfield][common_len:(common_len+left_len)] = r1[field][left_ind]
for field in r2.dtype.names:
newfield = mapped_r2field(field)
if field not in key and common_len:
newrec[newfield][:common_len] = r2[field][r2ind]
if jointype == "outer" and right_len:
newrec[newfield][-right_len:] = r2[field][right_ind]
newrec.sort(order=key)
return rec_view(newrec)
def csv2rec(fname, comments='#', skiprows=0, checkrows=0, delimiter=',',
converterd=None, names=None, missing='', missingd=None,
use_mrecords=True):
"""
Load data from comma/space/tab delimited file in *fname* into a
numpy record array and return the record array.
If *names* is *None*, a header row is required to automatically
assign the recarray names. The headers will be lower cased,
spaces will be converted to underscores, and illegal attribute
name characters removed. If *names* is not *None*, it is a
sequence of names to use for the column names. In this case, it
is assumed there is no header row.
- *fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
- *comments*: the character used to indicate the start of a comment
in the file
- *skiprows*: is the number of rows from the top to skip
- *checkrows*: is the number of rows to check to validate the column
data type. When set to zero all rows are validated.
- *converted*: if not *None*, is a dictionary mapping column number or
munged column name to a converter function.
- *names*: if not None, is a list of header names. In this case, no
header will be read from the file
- *missingd* is a dictionary mapping munged column names to field values
which signify that the field does not contain actual data and should
be masked, e.g. '0000-00-00' or 'unused'
- *missing*: a string whose value signals a missing field regardless of
the column it appears in
- *use_mrecords*: if True, return an mrecords.fromrecords record array if any of the data are missing
If no rows are found, *None* is returned -- see :file:`examples/loadrec.py`
"""
if converterd is None:
converterd = dict()
if missingd is None:
missingd = {}
import dateutil.parser
import datetime
parsedate = dateutil.parser.parse
fh = cbook.to_filehandle(fname)
class FH:
"""
For space-delimited files, we want different behavior than
comma or tab. Generally, we want multiple spaces to be
treated as a single separator, whereas with comma and tab we
want multiple commas to return multiple (empty) fields. The
join/strip trick below effects this.
"""
def __init__(self, fh):
self.fh = fh
def close(self):
self.fh.close()
def seek(self, arg):
self.fh.seek(arg)
def fix(self, s):
return ' '.join(s.split())
def next(self):
return self.fix(self.fh.next())
def __iter__(self):
for line in self.fh:
yield self.fix(line)
if delimiter==' ':
fh = FH(fh)
reader = csv.reader(fh, delimiter=delimiter)
def process_skiprows(reader):
if skiprows:
for i, row in enumerate(reader):
if i>=(skiprows-1): break
return fh, reader
process_skiprows(reader)
def ismissing(name, val):
"Should the value val in column name be masked?"
if val == missing or val == missingd.get(name) or val == '':
return True
else:
return False
def with_default_value(func, default):
def newfunc(name, val):
if ismissing(name, val):
return default
else:
return func(val)
return newfunc
def mybool(x):
if x=='True': return True
elif x=='False': return False
else: raise ValueError('invalid bool')
dateparser = dateutil.parser.parse
mydateparser = with_default_value(dateparser, datetime.date(1,1,1))
myfloat = with_default_value(float, np.nan)
myint = with_default_value(int, -1)
mystr = with_default_value(str, '')
mybool = with_default_value(mybool, None)
def mydate(x):
# try and return a date object
d = dateparser(x)
if d.hour>0 or d.minute>0 or d.second>0:
raise ValueError('not a date')
return d.date()
mydate = with_default_value(mydate, datetime.date(1,1,1))
def get_func(name, item, func):
# promote functions in this order
funcmap = {mybool:myint,myint:myfloat, myfloat:mydate, mydate:mydateparser, mydateparser:mystr}
try: func(name, item)
except:
if func==mystr:
raise ValueError('Could not find a working conversion function')
else: return get_func(name, item, funcmap[func]) # recurse
else: return func
# map column names that clash with builtins -- TODO - extend this list
itemd = {
'return' : 'return_',
'file' : 'file_',
'print' : 'print_',
}
def get_converters(reader):
converters = None
for i, row in enumerate(reader):
if i==0:
converters = [mybool]*len(row)
if checkrows and i>checkrows:
break
#print i, len(names), len(row)
#print 'converters', zip(converters, row)
for j, (name, item) in enumerate(zip(names, row)):
func = converterd.get(j)
if func is None:
func = converterd.get(name)
if func is None:
#if not item.strip(): continue
func = converters[j]
if len(item.strip()):
func = get_func(name, item, func)
else:
# how should we handle custom converters and defaults?
func = with_default_value(func, None)
converters[j] = func
return converters
# Get header and remove invalid characters
needheader = names is None
if needheader:
for row in reader:
#print 'csv2rec', row
if len(row) and row[0].startswith(comments):
continue
headers = row
break
# remove these chars
delete = set("""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
delete.add('"')
names = []
seen = dict()
for i, item in enumerate(headers):
item = item.strip().lower().replace(' ', '_')
item = ''.join([c for c in item if c not in delete])
if not len(item):
item = 'column%d'%i
item = itemd.get(item, item)
cnt = seen.get(item, 0)
if cnt>0:
names.append(item + '_%d'%cnt)
else:
names.append(item)
seen[item] = cnt+1
else:
if cbook.is_string_like(names):
names = [n.strip() for n in names.split(',')]
# get the converter functions by inspecting checkrows
converters = get_converters(reader)
if converters is None:
raise ValueError('Could not find any valid data in CSV file')
# reset the reader and start over
fh.seek(0)
reader = csv.reader(fh, delimiter=delimiter)
process_skiprows(reader)
if needheader:
skipheader = reader.next()
# iterate over the remaining rows and convert the data to date
# objects, ints, or floats as approriate
rows = []
rowmasks = []
for i, row in enumerate(reader):
if not len(row): continue
if row[0].startswith(comments): continue
rows.append([func(name, val) for func, name, val in zip(converters, names, row)])
rowmasks.append([ismissing(name, val) for name, val in zip(names, row)])
fh.close()
if not len(rows):
return None
if use_mrecords and np.any(rowmasks):
try: from numpy.ma import mrecords
except ImportError:
raise RuntimeError('numpy 1.05 or later is required for masked array support')
else:
r = mrecords.fromrecords(rows, names=names, mask=rowmasks)
else:
r = np.rec.fromrecords(rows, names=names)
return r
# a series of classes for describing the format intentions of various rec views
class FormatObj:
def tostr(self, x):
return self.toval(x)
def toval(self, x):
return str(x)
def fromstr(self, s):
return s
class FormatString(FormatObj):
def tostr(self, x):
val = repr(x)
return val[1:-1]
#class FormatString(FormatObj):
# def tostr(self, x):
# return '"%r"'%self.toval(x)
class FormatFormatStr(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def tostr(self, x):
if x is None: return 'None'
return self.fmt%self.toval(x)
class FormatFloat(FormatFormatStr):
def __init__(self, precision=4, scale=1.):
FormatFormatStr.__init__(self, '%%1.%df'%precision)
self.precision = precision
self.scale = scale
def toval(self, x):
if x is not None:
x = x * self.scale
return x
def fromstr(self, s):
return float(s)/self.scale
class FormatInt(FormatObj):
def tostr(self, x):
return '%d'%int(x)
def toval(self, x):
return int(x)
def fromstr(self, s):
return int(s)
class FormatBool(FormatObj):
def toval(self, x):
return str(x)
def fromstr(self, s):
return bool(s)
class FormatPercent(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=100.)
class FormatThousands(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-3)
class FormatMillions(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-6)
class FormatDate(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def toval(self, x):
if x is None: return 'None'
return x.strftime(self.fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x).date()
class FormatDatetime(FormatDate):
def __init__(self, fmt='%Y-%m-%d %H:%M:%S'):
FormatDate.__init__(self, fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x)
defaultformatd = {
np.bool_ : FormatBool(),
np.int16 : FormatInt(),
np.int32 : FormatInt(),
np.int64 : FormatInt(),
np.float32 : FormatFloat(),
np.float64 : FormatFloat(),
np.object_ : FormatObj(),
np.string_ : FormatString(),
}
def get_formatd(r, formatd=None):
'build a formatd guaranteed to have a key for every dtype name'
if formatd is None:
formatd = dict()
for i, name in enumerate(r.dtype.names):
dt = r.dtype[name]
format = formatd.get(name)
if format is None:
format = defaultformatd.get(dt.type, FormatObj())
formatd[name] = format
return formatd
def csvformat_factory(format):
format = copy.deepcopy(format)
if isinstance(format, FormatFloat):
format.scale = 1. # override scaling for storage
format.fmt = '%r'
return format
def rec2txt(r, header=None, padding=3, precision=3):
"""
Returns a textual representation of a record array.
*r*: numpy recarray
*header*: list of column headers
*padding*: space between each column
*precision*: number of decimal places to use for floats.
Set to an integer to apply to all floats. Set to a
list of integers to apply precision individually.
Precision for non-floats is simply ignored.
Example::
precision=[0,2,3]
Output::
ID Price Return
ABC 12.54 0.234
XYZ 6.32 -0.076
"""
if cbook.is_numlike(precision):
precision = [precision]*len(r.dtype)
def get_type(item,atype=int):
tdict = {None:int, int:float, float:str}
try: atype(str(item))
except: return get_type(item,tdict[atype])
return atype
def get_justify(colname, column, precision):
ntype = type(column[0])
if ntype==np.str or ntype==np.str_ or ntype==np.string0 or ntype==np.string_:
length = max(len(colname),column.itemsize)
return 0, length+padding, "%s" # left justify
if ntype==np.int or ntype==np.int16 or ntype==np.int32 or ntype==np.int64 or ntype==np.int8 or ntype==np.int_:
length = max(len(colname),np.max(map(len,map(str,column))))
return 1, length+padding, "%d" # right justify
# JDH: my powerbook does not have np.float96 using np 1.3.0
"""
In [2]: np.__version__
Out[2]: '1.3.0.dev5948'
In [3]: !uname -a
Darwin Macintosh-5.local 9.4.0 Darwin Kernel Version 9.4.0: Mon Jun 9 19:30:53 PDT 2008; root:xnu-1228.5.20~1/RELEASE_I386 i386 i386
In [4]: np.float96
---------------------------------------------------------------------------
AttributeError Traceback (most recent call la
"""
if ntype==np.float or ntype==np.float32 or ntype==np.float64 or (hasattr(np, 'float96') and (ntype==np.float96)) or ntype==np.float_:
fmt = "%." + str(precision) + "f"
length = max(len(colname),np.max(map(len,map(lambda x:fmt%x,column))))
return 1, length+padding, fmt # right justify
return 0, max(len(colname),np.max(map(len,map(str,column))))+padding, "%s"
if header is None:
header = r.dtype.names
justify_pad_prec = [get_justify(header[i],r.__getitem__(colname),precision[i]) for i, colname in enumerate(r.dtype.names)]
justify_pad_prec_spacer = []
for i in range(len(justify_pad_prec)):
just,pad,prec = justify_pad_prec[i]
if i == 0:
justify_pad_prec_spacer.append((just,pad,prec,0))
else:
pjust,ppad,pprec = justify_pad_prec[i-1]
if pjust == 0 and just == 1:
justify_pad_prec_spacer.append((just,pad-padding,prec,0))
elif pjust == 1 and just == 0:
justify_pad_prec_spacer.append((just,pad,prec,padding))
else:
justify_pad_prec_spacer.append((just,pad,prec,0))
def format(item, just_pad_prec_spacer):
just, pad, prec, spacer = just_pad_prec_spacer
if just == 0:
return spacer*' ' + str(item).ljust(pad)
else:
if get_type(item) == float:
item = (prec%float(item))
elif get_type(item) == int:
item = (prec%int(item))
return item.rjust(pad)
textl = []
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(header)]))
for i, row in enumerate(r):
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(row)]))
if i==0:
textl[0] = textl[0].rstrip()
text = os.linesep.join(textl)
return text
def rec2csv(r, fname, delimiter=',', formatd=None, missing='',
missingd=None):
"""
Save the data from numpy recarray *r* into a
comma-/space-/tab-delimited file. The record array dtype names
will be used for column headers.
*fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
.. seealso::
:func:`csv2rec`:
For information about *missing* and *missingd*, which can
be used to fill in masked values into your CSV file.
"""
if missingd is None:
missingd = dict()
def with_mask(func):
def newfunc(val, mask, mval):
if mask:
return mval
else:
return func(val)
return newfunc
formatd = get_formatd(r, formatd)
funcs = []
for i, name in enumerate(r.dtype.names):
funcs.append(with_mask(csvformat_factory(formatd[name]).tostr))
fh, opened = cbook.to_filehandle(fname, 'w', return_opened=True)
writer = csv.writer(fh, delimiter=delimiter)
header = r.dtype.names
writer.writerow(header)
# Our list of specials for missing values
mvals = []
for name in header:
mvals.append(missingd.get(name, missing))
ismasked = False
if len(r):
row = r[0]
ismasked = hasattr(row, '_fieldmask')
for row in r:
if ismasked:
row, rowmask = row.item(), row._fieldmask.item()
else:
rowmask = [False] * len(row)
writer.writerow([func(val, mask, mval) for func, val, mask, mval
in zip(funcs, row, rowmask, mvals)])
if opened:
fh.close()
def griddata(x,y,z,xi,yi):
"""
``zi = griddata(x,y,z,xi,yi)`` fits a surface of the form *z* =
*f*(*x*, *y*) to the data in the (usually) nonuniformly spaced
vectors (*x*, *y*, *z*). :func:`griddata` interpolates this
surface at the points specified by (*xi*, *yi*) to produce
*zi*. *xi* and *yi* must describe a regular grid, can be either 1D
or 2D, but must be monotonically increasing.
A masked array is returned if any grid points are outside convex
hull defined by input data (no extrapolation is done).
Uses natural neighbor interpolation based on Delaunay
triangulation. By default, this algorithm is provided by the
:mod:`matplotlib.delaunay` package, written by Robert Kern. The
triangulation algorithm in this package is known to fail on some
nearly pathological cases. For this reason, a separate toolkit
(:mod:`mpl_tookits.natgrid`) has been created that provides a more
robust algorithm fof triangulation and interpolation. This
toolkit is based on the NCAR natgrid library, which contains code
that is not redistributable under a BSD-compatible license. When
installed, this function will use the :mod:`mpl_toolkits.natgrid`
algorithm, otherwise it will use the built-in
:mod:`matplotlib.delaunay` package.
The natgrid matplotlib toolkit can be downloaded from
http://sourceforge.net/project/showfiles.php?group_id=80706&package_id=142792
"""
try:
from mpl_toolkits.natgrid import _natgrid, __version__
_use_natgrid = True
except ImportError:
import matplotlib.delaunay as delaunay
from matplotlib.delaunay import __version__
_use_natgrid = False
if not griddata._reported:
if _use_natgrid:
verbose.report('using natgrid version %s' % __version__)
else:
verbose.report('using delaunay version %s' % __version__)
griddata._reported = True
if xi.ndim != yi.ndim:
raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
if xi.ndim != 1 and xi.ndim != 2:
raise TypeError("inputs xi and yi must be 1D or 2D.")
if not len(x)==len(y)==len(z):
raise TypeError("inputs x,y,z must all be 1D arrays of the same length")
# remove masked points.
if hasattr(z,'mask'):
x = x.compress(z.mask == False)
y = y.compress(z.mask == False)
z = z.compressed()
if _use_natgrid: # use natgrid toolkit if available.
if xi.ndim == 2:
xi = xi[0,:]
yi = yi[:,0]
# override default natgrid internal parameters.
_natgrid.seti('ext',0)
_natgrid.setr('nul',np.nan)
# cast input arrays to doubles (this makes a copy)
x = x.astype(np.float)
y = y.astype(np.float)
z = z.astype(np.float)
xo = xi.astype(np.float)
yo = yi.astype(np.float)
if min(xo[1:]-xo[0:-1]) < 0 or min(yo[1:]-yo[0:-1]) < 0:
raise ValueError, 'output grid defined by xi,yi must be monotone increasing'
# allocate array for output (buffer will be overwritten by nagridd)
zo = np.empty((yo.shape[0],xo.shape[0]), np.float)
_natgrid.natgridd(x,y,z,xo,yo,zo)
else: # use Robert Kern's delaunay package from scikits (default)
if xi.ndim != yi.ndim:
raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
if xi.ndim != 1 and xi.ndim != 2:
raise TypeError("inputs xi and yi must be 1D or 2D.")
if xi.ndim == 1:
xi,yi = np.meshgrid(xi,yi)
# triangulate data
tri = delaunay.Triangulation(x,y)
# interpolate data
interp = tri.nn_interpolator(z)
zo = interp(xi,yi)
# mask points on grid outside convex hull of input data.
if np.any(np.isnan(zo)):
zo = np.ma.masked_where(np.isnan(zo),zo)
return zo
griddata._reported = False
##################################################
# Linear interpolation algorithms
##################################################
def less_simple_linear_interpolation( x, y, xi, extrap=False ):
"""
This function provides simple (but somewhat less so than
:func:`cbook.simple_linear_interpolation`) linear interpolation.
:func:`simple_linear_interpolation` will give a list of point
between a start and an end, while this does true linear
interpolation at an arbitrary set of points.
This is very inefficient linear interpolation meant to be used
only for a small number of points in relatively non-intensive use
cases. For real linear interpolation, use scipy.
"""
if cbook.is_scalar(xi): xi = [xi]
x = np.asarray(x)
y = np.asarray(y)
xi = np.asarray(xi)
s = list(y.shape)
s[0] = len(xi)
yi = np.tile( np.nan, s )
for ii,xx in enumerate(xi):
bb = x == xx
if np.any(bb):
jj, = np.nonzero(bb)
yi[ii] = y[jj[0]]
elif xx<x[0]:
if extrap:
yi[ii] = y[0]
elif xx>x[-1]:
if extrap:
yi[ii] = y[-1]
else:
jj, = np.nonzero(x<xx)
jj = max(jj)
yi[ii] = y[jj] + (xx-x[jj])/(x[jj+1]-x[jj]) * (y[jj+1]-y[jj])
return yi
def slopes(x,y):
"""
:func:`slopes` calculates the slope *y*'(*x*)
The slope is estimated using the slope obtained from that of a
parabola through any three consecutive points.
This method should be superior to that described in the appendix
of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel
W. Stineman (Creative Computing July 1980) in at least one aspect:
Circles for interpolation demand a known aspect ratio between
*x*- and *y*-values. For many functions, however, the abscissa
are given in different dimensions, so an aspect ratio is
completely arbitrary.
The parabola method gives very similar results to the circle
method for most regular cases but behaves much better in special
cases.
Norbert Nemec, Institute of Theoretical Physics, University or
Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de
(inspired by a original implementation by Halldor Bjornsson,
Icelandic Meteorological Office, March 2006 halldor at vedur.is)
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
yp=np.zeros(y.shape, np.float_)
dx=x[1:] - x[:-1]
dy=y[1:] - y[:-1]
dydx = dy/dx
yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1])
yp[0] = 2.0 * dy[0]/dx[0] - yp[1]
yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2]
return yp
def stineman_interp(xi,x,y,yp=None):
"""
Given data vectors *x* and *y*, the slope vector *yp* and a new
abscissa vector *xi*, the function :func:`stineman_interp` uses
Stineman interpolation to calculate a vector *yi* corresponding to
*xi*.
Here's an example that generates a coarse sine curve, then
interpolates over a finer abscissa::
x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)
xi = linspace(0,2*pi,40);
yi = stineman_interp(xi,x,y,yp);
plot(x,y,'o',xi,yi)
The interpolation method is described in the article A
CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
W. Stineman. The article appeared in the July 1980 issue of
Creative Computing with a note from the editor stating that while
they were:
not an academic journal but once in a while something serious
and original comes in adding that this was
"apparently a real solution" to a well known problem.
For *yp* = *None*, the routine automatically determines the slopes
using the :func:`slopes` routine.
*x* is assumed to be sorted in increasing order.
For values ``xi[j] < x[0]`` or ``xi[j] > x[-1]``, the routine
tries an extrapolation. The relevance of the data obtained from
this, of course, is questionable...
Original implementation by Halldor Bjornsson, Icelandic
Meteorolocial Office, March 2006 halldor at vedur.is
Completely reworked and optimized for Python by Norbert Nemec,
Institute of Theoretical Physics, University or Regensburg, April
2006 Norbert.Nemec at physik.uni-regensburg.de
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
assert x.shape == y.shape
N=len(y)
if yp is None:
yp = slopes(x,y)
else:
yp=np.asarray(yp, np.float_)
xi=np.asarray(xi, np.float_)
yi=np.zeros(xi.shape, np.float_)
# calculate linear slopes
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
s = dy/dx #note length of s is N-1 so last element is #N-2
# find the segment each xi is in
# this line actually is the key to the efficiency of this implementation
idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1]
# the y-values that would come out from a linear interpolation:
sidx = s.take(idx)
xidx = x.take(idx)
yidx = y.take(idx)
xidxp1 = x.take(idx+1)
yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx)- sidx) * (xi - xidx) # using the yp slope of the left point
dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1) # using the yp slope of the right point
dy1dy2 = dy1*dy2
# The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand
# in Python
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1,
((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),
0.0,
1/(dy1+dy2),))
return yi
##################################################
# Code related to things in and around polygons
##################################################
def inside_poly(points, verts):
"""
*points* is a sequence of *x*, *y* points.
*verts* is a sequence of *x*, *y* vertices of a polygon.
Return value is a sequence of indices into points for the points
that are inside the polygon.
"""
res, = np.nonzero(nxutils.points_inside_poly(points, verts))
return res
def poly_below(xmin, xs, ys):
"""
Given a sequence of *xs* and *ys*, return the vertices of a
polygon that has a horizontal base at *xmin* and an upper bound at
the *ys*. *xmin* is a scalar.
Intended for use with :meth:`matplotlib.axes.Axes.fill`, eg::
xv, yv = poly_below(0, x, y)
ax.fill(xv, yv)
"""
if ma.isMaskedArray(xs) or ma.isMaskedArray(ys):
nx = ma
else:
nx = np
xs = nx.asarray(xs)
ys = nx.asarray(ys)
Nx = len(xs)
Ny = len(ys)
assert(Nx==Ny)
x = xmin*nx.ones(2*Nx)
y = nx.ones(2*Nx)
x[:Nx] = xs
y[:Nx] = ys
y[Nx:] = ys[::-1]
return x, y
def poly_between(x, ylower, yupper):
"""
Given a sequence of *x*, *ylower* and *yupper*, return the polygon
that fills the regions between them. *ylower* or *yupper* can be
scalar or iterable. If they are iterable, they must be equal in
length to *x*.
Return value is *x*, *y* arrays for use with
:meth:`matplotlib.axes.Axes.fill`.
"""
if ma.isMaskedArray(ylower) or ma.isMaskedArray(yupper) or ma.isMaskedArray(x):
nx = ma
else:
nx = np
Nx = len(x)
if not cbook.iterable(ylower):
ylower = ylower*nx.ones(Nx)
if not cbook.iterable(yupper):
yupper = yupper*nx.ones(Nx)
x = nx.concatenate( (x, x[::-1]) )
y = nx.concatenate( (yupper, ylower[::-1]) )
return x,y
def is_closed_polygon(X):
"""
Tests whether first and last object in a sequence are the same. These are
presumably coordinates on a polygonal curve, in which case this function
tests if that curve is closed.
"""
return np.all(X[0] == X[-1])
def contiguous_regions(mask):
"""
return a list of (ind0, ind1) such that mask[ind0:ind1].all() is
True and we cover all such regions
TODO: this is a pure python implementation which probably has a much faster numpy impl
"""
in_region = None
boundaries = []
for i, val in enumerate(mask):
if in_region is None and val:
in_region = i
elif in_region is not None and not val:
boundaries.append((in_region, i))
in_region = None
if in_region is not None:
boundaries.append((in_region, i+1))
return boundaries
##################################################
# Vector and path length geometry calculations
##################################################
def vector_lengths( X, P=2., axis=None ):
"""
Finds the length of a set of vectors in *n* dimensions. This is
like the :func:`numpy.norm` function for vectors, but has the ability to
work over a particular axis of the supplied array or matrix.
Computes ``(sum((x_i)^P))^(1/P)`` for each ``{x_i}`` being the
elements of *X* along the given axis. If *axis* is *None*,
compute over all elements of *X*.
"""
X = np.asarray(X)
return (np.sum(X**(P),axis=axis))**(1./P)
def distances_along_curve( X ):
"""
Computes the distance between a set of successive points in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. The distances between
successive rows is computed. Distance is the standard Euclidean
distance.
"""
X = np.diff( X, axis=0 )
return vector_lengths(X,axis=1)
def path_length(X):
"""
Computes the distance travelled along a polygonal curve in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. Returns an array of
length *M* consisting of the distance along the curve at each point
(i.e., the rows of *X*).
"""
X = distances_along_curve(X)
return np.concatenate( (np.zeros(1), np.cumsum(X)) )
def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y):
"""
Converts a quadratic Bezier curve to a cubic approximation.
The inputs are the *x* and *y* coordinates of the three control
points of a quadratic curve, and the output is a tuple of *x* and
*y* coordinates of the four control points of the cubic curve.
"""
# c0x, c0y = q0x, q0y
c1x, c1y = q0x + 2./3. * (q1x - q0x), q0y + 2./3. * (q1y - q0y)
c2x, c2y = c1x + 1./3. * (q2x - q0x), c1y + 1./3. * (q2y - q0y)
# c3x, c3y = q2x, q2y
return q0x, q0y, c1x, c1y, c2x, c2y, q2x, q2y
| agpl-3.0 |
gerritgr/LumPy | ClusterEngine.py | 1 | 15922 | import model_parser
import numpy as np
from utilities import *
import sys
import sympy
import time
import pandas as pd
import sys
def indicators_to_dict(indicators, odes):
results = dict()
for i in range(len(odes)):
m = odes[i]
results[m] = indicators[i]
return results
def clustering(model):
degree_distribution = model['degree_distribution']
cluster_number = model['bin_num']
if 'neighborhood' not in model:
model['neighborhood'] = list(generate_neighbours(model['k_max'], len(model['states'])))
m_vectors = model['neighborhood']
cluster_method = model['heuristic']
logger.info('Start Clustering.')
if cluster_number >= len(m_vectors):
result = dict()
for i, m in enumerate(m_vectors):
result[m] = i
return result
#if cluster_number == 1:
# return {m:0 for m in m_vectors}
#assert(cluster_number > 1)
if isinstance(cluster_method, str):
cluster_method = globals()[cluster_method]
#try:
# result = cluster_method(degree_distribution, cluster_number, m_vectors)
#except:
result = cluster_method(model)
logger.info('Clustering Done.')
return result
def cluster(degree_distribution, cluster_number, m_vectors):
from scipy.spatial.distance import pdist, euclidean
from scipy.cluster.hierarchy import linkage, fcluster
def dist(a, b):
if list(a) == list(b):
return 0.0
degree_dist = (np.sum(a) - np.sum(b))**2 * 10
prob_k = np.max(degree_distribution[int(np.sum(a))], degree_distribution[int(np.sum(b))])
eucl_distance = euclidean(a,b)
rel_eucl_distance = eucl_distance/np.sqrt(np.sum(a+b))
return degree_dist + rel_eucl_distance * prob_k
odes = list(m_vectors)
Xd = pdist(odes, dist)
Z = linkage(Xd, 'complete')
indicators = fcluster(Z, cluster_number, criterion='maxclust')
results = dict()
for i in range(len(odes)):
m = odes[i]
results[m] = indicators[i]
return results
def cluster_alt(degree_distribution, cluster_number, m_vectors):
from scipy.spatial.distance import pdist, euclidean
from scipy.cluster.hierarchy import linkage, fcluster
def dist(a, b):
if list(a) == list(b):
return 0.0
degree_dist = (np.sum(a) - np.sum(b))**2 * 10
number_of_vectors_a = elemsin_k_vec_with_sum_m(len(a), np.sum(a))
number_of_vectors_b = elemsin_k_vec_with_sum_m(len(b), np.sum(b))
number_of_vectors = max(number_of_vectors_a, number_of_vectors_b)
prob_mass_k_a = degree_distribution[int(np.sum(a))]
prob_mass_k_b = degree_distribution[int(np.sum(b))]
prob_mass_k = max(prob_mass_k_a, prob_mass_k_b )
mass = prob_mass_k / number_of_vectors
eucl_distance = euclidean(a,b)
rel_eucl_distance = eucl_distance/np.sqrt(np.sum(a+b))
return mass * degree_dist + mass * rel_eucl_distance
odes = list(m_vectors)
Xd = pdist(odes, dist)
Z = linkage(Xd, 'complete')
indicators = fcluster(Z, cluster_number, criterion='maxclust')
results = dict()
for i in range(len(odes)):
m = odes[i]
results[m] = indicators[i]
return results
if __name__ == "__main__":
degree_distribution = [0.1,0.1,0.1,0.1,0.6]
cluster_number = 7
m_vectors = list()
for k in range(5):
m_vectors += list(m_k_of(k,2))
for cluster_method in [cluster, cluster_alt]:
r = cluster_method(degree_distribution, cluster_number, m_vectors)
print(r)
inv_map = {}
for k, v in r.items():
inv_map[v] = inv_map.get(v, [])
inv_map[v].append(k)
print(inv_map)
degree_distribution = [0.1,0.1,0.1,0.1,0.6]
cluster_number = 7
m_vectors = list()
for k in range(15):
m_vectors += list(m_k_of(k,2))
print(cluster2(None, None, m_vectors))
def cluster1(degree_distribution, cluster_number, m_vectors):
from scipy.spatial.distance import pdist, euclidean
from scipy.cluster.hierarchy import linkage, fcluster
odes = list(m_vectors)
Xd = pdist(odes, lambda x,y: euclidean(x,y)*1.0/(np.sum(x+y)))
Z = linkage(Xd, 'complete')
indicators = fcluster(Z, cluster_number, criterion='maxclust')
return indicators_to_dict(indicators,odes)
def cluster_direction(degree_distribution, cluster_number, m_vectors):
from scipy.spatial.distance import pdist, euclidean
from scipy.cluster.hierarchy import linkage, fcluster
odes = list(m_vectors)
def norma(v):
return v if np.sum(v) == 0.0 else create_normalized_np(v)
Xd = pdist(odes, lambda x,y: euclidean(norma(x), norma(y))+0.1*euclidean(x,y))
Z = linkage(Xd, 'complete')
indicators = fcluster(Z, cluster_number, criterion='maxclust')
return indicators_to_dict(indicators,odes)
def cluster2(degree_distribution, cluster_number, m_vectors):
logger.warn('This method ignores cluster_number and degree_distribution.')
results = dict()
for m in m_vectors:
m_new = ''
k = int(np.sum(m))
for v in m:
if k <= 8:
m_new += '_'+str(v)
else:
m_new += '_'+str(int(v/3))
results[m] = '{}{}'.format(k,m_new)
return results
def cluster3(degree_distribution, cluster_number, m_vectors):
logger.warn('This method ignores cluster_number and degree_distribution.')
results = dict()
for m in m_vectors:
k = int(np.sum(m))
if k == 0:
k = 0.1
c_id = [str(int(v/k*10)) for v in m]
results[m] = '_'.join(c_id)
return results
def cluster_vec(degree_distribution, cluster_number, m_vectors):
from scipy.spatial.distance import euclidean
logger.warn('This method ignores cluster_number and degree_distribution.')
base = m_k_of(max(5,int(cluster_number)), len(m_vectors[0])) #arbitrary before 10
#base = cluster_number
base = [create_normalized_np(v, True) for v in base]
results = dict()
for m in m_vectors:
k = int(np.sum(m))
if k == 0: # values does not matter for 0-vector bu cannot be 0
k = 0.1
m_normal = [v/k for v in m]
distances = [euclidean(b_v, m_normal) for b_v in base]
closest_base = distances.index(min(distances))
results[m] = '{}_{}'.format(k, closest_base)
return results
def cluster_vec2(degree_distribution, cluster_number, m_vectors):
from scipy.spatial.distance import euclidean
logger.warn('This method ignores cluster_number and degree_distribution.')
base = m_k_of(max(5,int(cluster_number)), len(m_vectors[0])) #arbitrary before 10
#base = cluster_number
base = [create_normalized_np(v, True) for v in base]
results = dict()
for m in m_vectors:
k = int(np.sum(m))
if k == 0: # values does not matter for 0-vector bu cannot be 0
k = 0.1
m_normal = [v/k for v in m]
distances = [euclidean(b_v, m_normal) for b_v in base]
closest_base = distances.index(min(distances))
k_clustered = k if k <= 8 else int(k/3)*3 # vorher 5 und 2
results[m] = '{}_{}'.format(k_clustered, closest_base)
return results
def cluster_vec3(degree_distribution, cluster_number, m_vectors):
from scipy.spatial.distance import euclidean
logger.warn('This method ignores cluster_number and degree_distribution.')
base = m_k_of(max(2,int(cluster_number)), len(m_vectors[0])) #arbitrary before 10
#base = cluster_number
base = [create_normalized_np(v, True) for v in base]
results = dict()
for m in m_vectors:
k = int(np.sum(m))
if k == 0: # values does not matter for 0-vector bu cannot be 0
k = 0.1
m_normal = [v/k for v in m]
distances = [euclidean(b_v, m_normal) for b_v in base]
closest_base = distances.index(min(distances))
k_clustered = k if k <= 15 else int(k/3)*3 # vorher 5 und 2
results[m] = '{}_{}'.format(k_clustered, closest_base)
return results
def cluster_vec3s(degree_distribution, cluster_number, m_vectors):
from scipy.spatial.distance import euclidean
logger.warn('This method ignores cluster_number and degree_distribution.')
base = m_k_of(max(2,int(cluster_number)), len(m_vectors[0])) #arbitrary before 10
#base = cluster_number
base = [create_normalized_np(v, True) for v in base]
results = dict()
for m in m_vectors:
k = int(np.sum(m))
if k == 0: # values does not matter for 0-vector bu cannot be 0
k = 0.1
m_normal = [v/k for v in m]
distances = [euclidean(b_v, m_normal) for b_v in base]
closest_base = distances.index(min(distances))
k_clustered = k if k <= 4 else int(k/3)*3 # vorher 5 und 2
results[m] = '{}_{}'.format(k_clustered, closest_base)
return results
def cluster_vec4(degree_distribution, cluster_number, m_vectors):
from scipy.spatial.distance import euclidean
logger.warn('This method ignores cluster_number and degree_distribution.')
base = m_k_of(max(2,int(cluster_number)), len(m_vectors[0])) #arbitrary before 10
#base = cluster_number
base = [create_normalized_np(v, True) for v in base]
results = dict()
for m in m_vectors:
k = int(np.sum(m))
if k == 0: # values does not matter for 0-vector bu cannot be 0
k = 0.1
m_normal = [v/k for v in m]
distances = [euclidean(b_v, m_normal) for b_v in base]
closest_base = distances.index(min(distances))
k_clustered = k if k <= 12 else int(k/2)*2 # vorher 5 und 2
results[m] = '{}_{}'.format(k_clustered, closest_base)
return results
def cluster_vec5(degree_distribution, cluster_number, m_vectors):
from scipy.spatial.distance import euclidean
logger.warn('This method ignores cluster_number and degree_distribution.')
base = m_k_of(10, len(m_vectors[0])) #arbitrary before 10
#base = cluster_number
base = [create_normalized_np(v, True) for v in base]
results = dict()
for m in m_vectors:
k = int(np.sum(m))
if k == 0: # values does not matter for 0-vector bu cannot be 0
k = 0.1
m_normal = [v/k for v in m]
distances = [euclidean(b_v, m_normal) for b_v in base]
closest_base = distances.index(min(distances))
k_clustered = k if k <= 7 else int(k/5)*2 # vorher 5 und 2
results[m] = '{}_{}'.format(k_clustered, closest_base)
return results
def cluster_subspace(model):
from scipy.spatial.distance import euclidean
logger.warn('This method ignores cluster_number and degree_distribution.')
merge = int(model.get('merge', 5))
subspace = int(model.get('subspace', 7))
start_clustering = int(model.get('start_clustering', 13))
m_vectors = model['neighborhood']
base = m_k_of(subspace, len(m_vectors[0])) #arbitrary before 10
#base = cluster_number
base = [create_normalized_np(v, True) for v in base]
results = dict()
for m in m_vectors:
k = int(np.sum(m))
if k == 0: # values does not matter for 0-vector bu cannot be 0
k = 0.1
m_normal = [v/k for v in m]
distances = [euclidean(b_v, m_normal) for b_v in base]
closest_base = distances.index(min(distances))
k_clustered = k if k <= start_clustering else int(k/merge)*merge # vorher 5 und 2
results[m] = '{}_{}'.format(k_clustered, closest_base)
return results
def cluster_subspace5(model):
from scipy.spatial.distance import euclidean
logger.warn('This method ignores cluster_number and degree_distribution.')
m_vectors = model['neighborhood']
base = m_k_of(5, len(m_vectors[0])) #arbitrary before 10
base = [create_normalized_np(v, True) for v in base]
degree_cluster = cluster_degrees(model)
results = dict()
for m in m_vectors:
k = int(np.sum(m))
if k == 0: # values does not matter for 0-vector bu cannot be 0
k = 1
m_normal = [v/k for v in m]
distances = [euclidean(b_v, m_normal) for b_v in base]
closest_base = distances.index(min(distances))
results[m] = '{}_{}'.format(degree_cluster[k], closest_base)
return results
def cluster_subspace7(model):
from scipy.spatial.distance import euclidean
logger.warn('This method ignores cluster_number and degree_distribution.')
m_vectors = model['neighborhood']
base = m_k_of(7, len(m_vectors[0])) #arbitrary before 10
base = [create_normalized_np(v, True) for v in base]
degree_cluster = cluster_degrees(model)
results = dict()
for m in m_vectors:
k = int(np.sum(m))
if k == 0: # values does not matter for 0-vector bu cannot be 0
k = 1
m_normal = [v/k for v in m]
distances = [euclidean(b_v, m_normal) for b_v in base]
closest_base = distances.index(min(distances))
results[m] = '{}_{}'.format(degree_cluster[k], closest_base)
return results
def cluster_subspaceS(model):
from scipy.spatial.distance import euclidean
logger.warn('This method ignores cluster_number and degree_distribution.')
m_vectors = model['neighborhood']
base = m_k_of(int(model.get('subspace', 30)), len(m_vectors[0])) #arbitrary before 10
base = [create_normalized_np(v, True) for v in base]
degree_cluster = cluster_degrees(model)
results = dict()
for m in m_vectors:
k = int(np.sum(m))
if k == 0: # values does not matter for 0-vector bu cannot be 0
k = 1
m_normal = [v/k for v in m]
distances = [euclidean(b_v, m_normal) for b_v in base]
closest_base = distances.index(min(distances))
results[m] = '{}_{}'.format(degree_cluster[k], closest_base)
return results
def cluster_subspaceX(model):
from scipy.spatial.distance import euclidean, cosine
logger.warn('This method ignores cluster_number.')
m_vectors = model['neighborhood']
base = m_k_of(int(model.get('subspace', model['bin_num'])), len(m_vectors[0])) #arbitrary before 10
base = [create_normalized_np(v, True) for v in base]
degree_cluster = cluster_degrees(model)
results = dict()
for m in m_vectors:
k = int(np.sum(m)+0.0000001)
m_normal = [v/k for v in m] if k > 0 else [0.001 for v in m]
m1 = m if k>0 else np.ones(len(m))
#distances = [euclidean(b_v, m_normal) for b_v in base]
distances = [cosine(b_v, m1) for b_v in base] # todo
closest_base = distances.index(min(distances))
results[m] = '{}_{}'.format(degree_cluster[k], closest_base)
return results
def cluster_subspaceXX(model):
model['subspace'] = int(model['bin_num']*0.5) # gamma = bin_num / k
# gamma * k = bin_num
# k = bin_num / gamma
return cluster_subspaceX(model)
def cluster_subspaceXY(model):
model['subspace'] = int(model['bin_num']*1.0)
return cluster_subspaceX(model)
def cluster_subspaceXZ(model):
model['subspace'] = int(model['bin_num']*2.0)
return cluster_subspaceX(model)
def cluster_fixedSubSpace9(model):
model['subspace'] = 9
return cluster_subspaceX(model)
def cluster_fixedSubSpace10(model):
model['subspace'] = 10
return cluster_subspaceX(model)
def cluster_fixedSubSpace20(model):
model['subspace'] = 20
return cluster_subspaceX(model)
def cluster_fixedBinNum5(model):
moodel['subspace'] = int(model['bin_num']*1.0)
model['bin_num'] = 5
return cluster_subspaceX(model)
def cluster_fixedBinNum10(model):
moodel['subspace'] = int(model['bin_num']*1.0)
model['bin_num'] = 10
return cluster_subspaceX(model)
def cluster_fixedBinNum20(model):
moodel['subspace'] = int(model['bin_num']*1.0)
model['bin_num'] = 20
return cluster_subspaceX(model)
def plot_clustering(cluster_results, outpath):
global created_images
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("white")
mpl.rc('xtick', labelsize=14)
mpl.rc('ytick', labelsize=14)
import numpy as np
plt.clf()
ax = plt.subplot(111, aspect='equal')
cluster_ids = sorted(list(set(cluster_results.values())))
vectors = cluster_results.keys()
# for more than 2 dimensions: only consider slice
x = [v[0] for v in vectors if np.sum(v) == v[0]+v[1]]
y = [v[1] for v in vectors if np.sum(v) == v[0]+v[1]]
ax.set_ylim([-.1, max(x+y)*1.05])
ax.set_xlim([-.1, max(x+y)*1.05])
colors_per_cluster = np.random.rand(len(cluster_ids))
#colors_per_cluster = np.linspace(0,1,len(cluster_ids))
colors = list()
for v in vectors:
c = cluster_results[v]
c_pos = cluster_ids.index(c)
colors.append(colors_per_cluster[c_pos])
colors = [plt.get_cmap('hsv')(c) for c in colors ]
#colors = [str(color) for color in colors]
#plt.show()
plt.scatter(x, y, c=colors, alpha=0.8, s = 10)
plt.savefig(outpath, format='pdf', bbox_inches='tight')
#------------------------------------------------------
# Heuristics for Degree Clustering
#------------------------------------------------------
def cluster_degrees(model):
import DegreeClusterEngine as dce
#clustering = dce.hierarchical2d_woblist(model)
clustering = dce.loss_clustering(model)
return clustering
#return dce.greedy(model)
#return {k: int(k) for k in range(model['k_max']+1)}
| gpl-3.0 |
kcavagnolo/astroML | book_figures/chapter9/fig_bayes_DB_2d.py | 3 | 2283 | """
2D Bayes Decision Boundary
--------------------------
Plot a schematic of a two-dimensional decision boundary
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.patches import Ellipse
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Set up diagram
mu1 = (0.25, 0.25)
mu2 = (0.85, 0.7)
sigma1 = (0.5, 0.5)
sigma2 = (0.25, 0.5)
y_boundary = np.linspace(-0.1, 1.1, 100)
x_boundary = (0.5 + 0.4 * (y_boundary - 0.9) ** 2)
#------------------------------------------------------------
# Set up plot
fig = plt.figure(figsize=(5, 5), facecolor='w')
ax = fig.add_axes([0, 0, 1, 1], frameon=False, xticks=[], yticks=[])
# draw axes
plt.annotate(r'$x_1$', (-0.08, -0.02), (1.05, -0.02),
ha='center', va='center',
arrowprops=dict(arrowstyle='<-', color='k'))
plt.annotate(r'$x_2$', (-0.02, -0.08), (-0.02, 1.05),
ha='center', va='center',
arrowprops=dict(arrowstyle='<-', color='k'))
# draw ellipses, points, and boundaries
ax.scatter(mu1[:1], mu1[1:], c='k')
ax.scatter(mu2[:1], mu2[1:], c='k')
ax.add_patch(Ellipse(mu1, sigma1[0], sigma1[1], fc='none', ec='k'))
ax.add_patch(Ellipse(mu2, sigma2[0], sigma2[1], fc='none', ec='k'))
ax.text(mu1[0] + 0.02, mu1[1] + 0.02, r'$\mu_1$')
ax.text(mu2[0] + 0.02, mu2[1] + 0.02, r'$\mu_2$')
ax.plot(x_boundary, y_boundary, '--k')
ax.text(0.53, 0.28, "decision boundary", rotation=-70,
ha='left', va='bottom')
ax.set_xlim(-0.1, 1.1)
ax.set_ylim(-0.1, 1.1)
plt.show()
| bsd-2-clause |
tmhm/scikit-learn | examples/text/hashing_vs_dict_vectorizer.py | 284 | 3265 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause |
BasuruK/sGlass | imports.py | 1 | 5956 | """
Manage this file with utmost care, as this file contains all the imports used throughout the application.
Load all the AI models and imports in the sections mentioned below and do not change already added variables
or anything other unless it belongs to your component.
When implementing your module, it is advised to follow OOP approach but not necessarily.
import the imports.py (this file) at the beginning of the file and put an alias as IMPORT_MANAGER and use this to access
all the imports and models located in this file (imports.py)
"""
print("Initializing the Application")
# Use this section to load all the imports
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # '2' - Turn off Warnings, '3' - Turn of Errors, '1' - Turn off Info logs
import time
start_time = time.time()
import cv2
from gtts import gTTS
from PIL import Image
import math
import random
import skimage.io as imutils
import tensorflow as tf
from keras.preprocessing import image as keras_preprocess
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from keras.models import load_model
from keras.applications import VGG19
import threading
# Use this section to import Machine Learning and AI Models
# Load Outdoor Object Recognition Model
print("Loading AI Models")
outdoor_objects_classifier = load_model('Outdoor_Object_Recognition_Engine/prediction_model.h5')
# Load Indoor Object Recognition Models
# CNN
def load_cnn_model():
hist = load_model('Indoor_Object_Recognition_Engine/IOIM/Indoor_Object_Recognition.h5')
return hist
# Hand Gesture
def load_hand_model():
hist = load_model('Indoor_Object_Recognition_Engine/Hand_Gesture/Hand_Gesture_Recognition.h5')
return hist
# Load VGG19 model
baseVgg19Model = VGG19(weights='imagenet')
# Load Description Generator Model
description_generator_model = load_model('Description_Generator/Data/pretrainedmodel/descgenrator.30-2.19.hdf5')
print("All Modules Loaded within: ", time.time() - start_time)
print("Application Started")
# If there are any global methods you need to access throughout the program, place them here
# Returns a randomized color code for the use of Bounding Box creation on GBPD algorithm
def randomize_color():
cnames = {
'aliceblue': '#F0F8FF', 'antiquewhite': '#FAEBD7', 'aqua': '#00FFFF', 'aquamarine': '#7FFFD4',
'azure': '#F0FFFF'
, 'beige': '#F5F5DC', 'bisque': '#FFE4C4', 'black': '#000000',
'blanchedalmond': '#FFEBCD', 'blue': '#0000FF', 'blueviolet': '#8A2BE2', 'brown': '#A52A2A',
'burlywood': '#DEB887',
'cadetblue': '#5F9EA0', 'chartreuse': '#7FFF00', 'chocolate': '#D2691E', 'coral': '#FF7F50',
'cornflowerblue': '#6495ED',
'cornsilk': '#FFF8DC', 'crimson': '#DC143C', 'cyan': '#00FFFF', 'darkblue': '#00008B', 'darkcyan': '#008B8B',
'darkgoldenrod': '#B8860B', 'darkgray': '#A9A9A9', 'darkgreen': '#006400', 'darkkhaki': '#BDB76B',
'darkmagenta': '#8B008B', 'darkolivegreen': '#556B2F', 'darkorange': '#FF8C00', 'darkorchid': '#9932CC',
'darkred': '#8B0000', 'darksalmon': '#E9967A', 'darkseagreen': '#8FBC8F', 'darkslateblue': '#483D8B',
'darkslategray': '#2F4F4F', 'darkturquoise': '#00CED1', 'darkviolet': '#9400D3', 'deeppink': '#FF1493',
'deepskyblue': '#00BFFF', 'dimgray': '#696969', 'dodgerblue': '#1E90FF', 'firebrick': '#B22222',
'floralwhite': '#FFFAF0', 'forestgreen': '#228B22', 'fuchsia': '#FF00FF', 'gainsboro': '#DCDCDC',
'ghostwhite': '#F8F8FF', 'gold': '#FFD700', 'goldenrod': '#DAA520', 'gray': '#808080', 'green': '#008000',
'greenyellow': '#ADFF2F', 'honeydew': '#F0FFF0', 'hotpink': '#FF69B4', 'indianred': '#CD5C5C',
'indigo': '#4B0082',
'ivory': '#FFFFF0', 'khaki': '#F0E68C', 'lavender': '#E6E6FA', 'lavenderblush': '#FFF0F5',
'lawngreen': '#7CFC00',
'lemonchiffon': '#FFFACD', 'lightblue': '#ADD8E6', 'lightcoral': '#F08080', 'lightcyan': '#E0FFFF',
'lightgoldenrodyellow': '#FAFAD2', 'lightgreen': '#90EE90', 'lightgray': '#D3D3D3', 'lightpink': '#FFB6C1',
'lightsalmon': '#FFA07A', 'lightseagreen': '#20B2AA', 'lightskyblue': '#87CEFA', 'lightslategray': '#778899',
'lightsteelblue': '#B0C4DE', 'lightyellow': '#FFFFE0', 'lime': '#00FF00', 'limegreen': '#32CD32',
'linen': '#FAF0E6',
'magenta': '#FF00FF', 'maroon': '#800000', 'mediumaquamarine': '#66CDAA', 'mediumblue': '#0000CD',
'mediumorchid': '#BA55D3', 'mediumpurple': '#9370DB', 'mediumseagreen': '#3CB371', 'mediumslateblue': '#7B68EE',
'mediumspringgreen': '#00FA9A', 'mediumturquoise': '#48D1CC', 'mediumvioletred': '#C71585',
'midnightblue': '#191970', 'mintcream': '#F5FFFA', 'mistyrose': '#FFE4E1', 'moccasin': '#FFE4B5',
'navajowhite': '#FFDEAD', 'navy': '#000080', 'oldlace': '#FDF5E6', 'olive': '#808000', 'olivedrab': '#6B8E23',
'orange': '#FFA500', 'orangered': '#FF4500', 'orchid': '#DA70D6', 'palegoldenrod': '#EEE8AA',
'palegreen': '#98FB98', 'paleturquoise': '#AFEEEE', 'palevioletred': '#DB7093', 'papayawhip': '#FFEFD5',
'peachpuff': '#FFDAB9', 'peru': '#CD853F', 'pink': '#FFC0CB', 'plum': '#DDA0DD', 'powderblue': '#B0E0E6',
'purple': '#800080', 'red': '#FF0000', 'rosybrown': '#BC8F8F', 'royalblue': '#4169E1', 'saddlebrown': '#8B4513',
'salmon': '#FA8072', 'sandybrown': '#FAA460', 'seagreen': '#2E8B57', 'seashell': '#FFF5EE', 'sienna': '#A0522D',
'silver': '#C0C0C0', 'skyblue': '#87CEEB', 'slateblue': '#6A5ACD', 'slategray': '#708090', 'snow': '#FFFAFA',
'springgreen': '#00FF7F', 'steelblue': '#4682B4', 'tan': '#D2B48C', 'teal': '#008080', 'thistle': '#D8BFD8',
'tomato': '#FF6347', 'turquoise': '#40E0D0', 'violet': '#EE82EE', 'wheat': '#F5DEB3', 'white': '#FFFFFF',
'whitesmoke': '#F5F5F5', 'yellow': '#FFFF00', 'yellowgreen': '#9ACD32'}
return random.sample(cnames.items(), 1)[0][1]
| gpl-3.0 |
xuewei4d/scikit-learn | examples/model_selection/plot_grid_search_refit_callable.py | 25 | 3648 | """
==================================================
Balance model complexity and cross-validated score
==================================================
This example balances model complexity and cross-validated score by
finding a decent accuracy within 1 standard deviation of the best accuracy
score while minimising the number of PCA components [1].
The figure shows the trade-off between cross-validated score and the number
of PCA components. The balanced case is when n_components=10 and accuracy=0.88,
which falls into the range within 1 standard deviation of the best accuracy
score.
[1] Hastie, T., Tibshirani, R.,, Friedman, J. (2001). Model Assessment and
Selection. The Elements of Statistical Learning (pp. 219-260). New York,
NY, USA: Springer New York Inc..
"""
# Author: Wenhao Zhang <wenhaoz@ucla.edu>
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
def lower_bound(cv_results):
"""
Calculate the lower bound within 1 standard deviation
of the best `mean_test_scores`.
Parameters
----------
cv_results : dict of numpy(masked) ndarrays
See attribute cv_results_ of `GridSearchCV`
Returns
-------
float
Lower bound within 1 standard deviation of the
best `mean_test_score`.
"""
best_score_idx = np.argmax(cv_results['mean_test_score'])
return (cv_results['mean_test_score'][best_score_idx]
- cv_results['std_test_score'][best_score_idx])
def best_low_complexity(cv_results):
"""
Balance model complexity with cross-validated score.
Parameters
----------
cv_results : dict of numpy(masked) ndarrays
See attribute cv_results_ of `GridSearchCV`.
Return
------
int
Index of a model that has the fewest PCA components
while has its test score within 1 standard deviation of the best
`mean_test_score`.
"""
threshold = lower_bound(cv_results)
candidate_idx = np.flatnonzero(cv_results['mean_test_score'] >= threshold)
best_idx = candidate_idx[cv_results['param_reduce_dim__n_components']
[candidate_idx].argmin()]
return best_idx
pipe = Pipeline([
('reduce_dim', PCA(random_state=42)),
('classify', LinearSVC(random_state=42, C=0.01)),
])
param_grid = {
'reduce_dim__n_components': [6, 8, 10, 12, 14]
}
grid = GridSearchCV(pipe, cv=10, n_jobs=1, param_grid=param_grid,
scoring='accuracy', refit=best_low_complexity)
X, y = load_digits(return_X_y=True)
grid.fit(X, y)
n_components = grid.cv_results_['param_reduce_dim__n_components']
test_scores = grid.cv_results_['mean_test_score']
plt.figure()
plt.bar(n_components, test_scores, width=1.3, color='b')
lower = lower_bound(grid.cv_results_)
plt.axhline(np.max(test_scores), linestyle='--', color='y',
label='Best score')
plt.axhline(lower, linestyle='--', color='.5', label='Best score - 1 std')
plt.title("Balance model complexity and cross-validated score")
plt.xlabel('Number of PCA components used')
plt.ylabel('Digit classification accuracy')
plt.xticks(n_components.tolist())
plt.ylim((0, 1.0))
plt.legend(loc='upper left')
best_index_ = grid.best_index_
print("The best_index_ is %d" % best_index_)
print("The n_components selected is %d" % n_components[best_index_])
print("The corresponding accuracy score is %.2f"
% grid.cv_results_['mean_test_score'][best_index_])
plt.show()
| bsd-3-clause |
megarcia/WxCD | source/process_NCEI_00.py | 2 | 28889 | """
Python script 'process_NCEI_00.py'
by Matthew Garcia, PhD student
Dept. of Forest and Wildlife Ecology
University of Wisconsin - Madison
matt.e.garcia@gmail.com
Copyright (C) 2015-2016 by Matthew Garcia
Licensed Gnu GPL v3; see 'LICENSE_GnuGPLv3.txt' for complete terms
Send questions, bug reports, any related requests to matt.e.garcia@gmail.com
See also 'README.md', 'DISCLAIMER.txt', 'CITATION.txt', 'ACKNOWLEDGEMENTS.txt'
Treat others as you would be treated. Pay it forward. Valar dohaeris.
PURPOSE: QA/QC of daily meteorological station data in NOAA/NCEI datasets
DEPENDENCIES: h5py, numpy, pandas
USAGE: '$ python process_NCEI_00.py NCEI_WLS_20000101-20101231.csv ./data'
INPUT: Station meteorological data from NOAA/NCEI in '.csv' format (one file)
NOTE: The labels in <metvals> (line 104) are the minimum information that you
should request from the NCEI data server. Check your '.csv' file header
line to make sure these columns are present. Additional columns will be
removed (inside this routine, not from your original data csv file) to
reduce memory footprint.
OUTPUT: One '.csv' file with the 'cleaned' version of the input dataset
One '.csv' file with an accounting of the 'errors' cleaned,
by station and variable
One '.csv' file with station metadata information
One '.h5' file with preliminary metadata
TODO: add more data cleaning functionality, e.g. wild P reports
examine station-by-station error statistics for spatial patterns
extract names/locations of stations for mapping, incl. differentiation
of data provided (T only, P only, T & P)
"""
import sys
import datetime
import h5py as hdf
import numpy as np
import pandas as pd
def message(char_string):
"""
prints a string to the terminal and flushes the buffer
"""
print char_string
sys.stdout.flush()
return
def find_outliers(dset, stnlist, idxlist, nsigma):
"""
Simplified (simplistic?) implementation of Chauvenet's criterion for
outlier detection
Reference: https://en.wikipedia.org/wiki/Chauvenet%27s_criterion
Primary assumption: normally distributed data (may not be correct!)
nsigma >= 1.96 --> p < 0.05 --> 95% chance of data being an outlier
2.58 --> p < 0.01 --> 99%
2.81 --> p < 0.005 --> 99.5%
3.27 --> p < 0.001 --> 99.9%
"""
outlier_stns = []
outlier_idxs = []
nobs = len(dset)
dset_mean = np.mean(dset)
dset_std = np.std(dset)
dset_z = (dset - dset_mean) / dset_std
for j in range(nobs):
if abs(dset_z[j]) > nsigma:
outlier_stns.append(stnlist[j])
outlier_idxs.append(int(idxlist[j]))
return outlier_stns, outlier_idxs
outlier_threshold = 3.27
#
message(' ')
message('process_NCEI_00.py started at %s' %
datetime.datetime.now().isoformat())
message(' ')
#
if len(sys.argv) < 3:
message('input warning: no data directory path indicated, using ./data')
path = './data'
else:
path = sys.argv[2]
#
if len(sys.argv) < 2:
message('input error: need CSV file containing NCEI weather data')
sys.exit(1)
else:
NCEIfname = '%s/%s' % (path, sys.argv[1])
cleaneddatafile = '%s_cleaned.csv' % NCEIfname[:-4]
errorsdatafile = '%s_errors.csv' % NCEIfname[:-4]
stnmetadatafile = '%s_stnmeta.csv' % NCEIfname[:-4]
h5outfname = '%s_processed.h5' % NCEIfname[:-4]
#
message('reading input data file %s' % NCEIfname)
stndata_df = pd.read_csv(NCEIfname, low_memory=False)
ndatarows, ndatacols = np.shape(stndata_df)
message('- read %d total data rows with %d columns' % (ndatarows, ndatacols))
#
metvals = ['STATION', 'STATION_NAME', 'ELEVATION', 'LATITUDE', 'LONGITUDE',
'DATE', 'PRCP', 'PRCP_M_FLAG', 'PRCP_Q_FLAG', 'TMAX', 'TMAX_M_FLAG',
'TMAX_Q_FLAG', 'TMIN', 'TMIN_M_FLAG', 'TMIN_Q_FLAG']
idxs = list(stndata_df.columns.values)
for i, var in enumerate(idxs):
if var == 'PRCP':
if 'Measurement Flag' in idxs[i + 1]:
idxs[i + 1] = 'PRCP_M_FLAG'
if 'Quality Flag' in idxs[i + 2]:
idxs[i + 2] = 'PRCP_Q_FLAG'
if var == 'TMAX':
if 'Measurement Flag' in idxs[i + 1]:
idxs[i + 1] = 'TMAX_M_FLAG'
if 'Quality Flag' in idxs[i + 2]:
idxs[i + 2] = 'TMAX_Q_FLAG'
if var == 'TMIN':
if 'Measurement Flag' in idxs[i + 1]:
idxs[i + 1] = 'TMIN_M_FLAG'
if 'Quality Flag' in idxs[i + 2]:
idxs[i + 2] = 'TMIN_Q_FLAG'
stndata_df.columns = idxs
idxs = list(stndata_df.columns.values)
idxs_extras = list(set(idxs) - set(metvals))
idxs_missing = list(set(metvals) - (set(idxs) - set(idxs_extras)))
if len(idxs_missing) > 0:
message('NOTE: 1+ necessary data columns is absent from your dataset')
message(' columns needed: %s' % str(metvals))
message(' columns missing: %s' % str(idxs_missing))
sys.exit(1)
#
for idx in idxs:
if idx not in metvals:
stndata_df = stndata_df.drop(idx, axis=1)
ndatarows, ndatacols = np.shape(stndata_df)
message('-- reduced to %d columns' % ndatacols)
#
# sort by station and date, assign index values, and add matching IDX column
stndata_df = stndata_df.sort_values(by=['STATION', 'DATE'])
stndata_df.index = pd.Index(np.arange(ndatarows))
stndata_df['IDX'] = np.arange(ndatarows)
message('-- assigned index and added index column')
message(' ')
#
message('original input dataset has:')
ndatarows, ndatacols = np.shape(stndata_df)
message('- %d total data rows' % ndatarows)
#
# dataset summary by stations
stns_all = stndata_df['STATION']
stn_id = list(sorted(stns_all.unique()))
message('- %d unique stations' % len(stn_id))
stn_counts = pd.value_counts(stns_all, sort=True)
min_dates = min(stn_counts)
n_min_dates = sum(stn_counts == min_dates)
message('-- minimum %d date entries for %d stations' %
(min_dates, n_min_dates))
max_dates = max(stn_counts)
n_max_dates = sum(stn_counts == max_dates)
message('-- maximum %d date entries for %d stations' %
(max_dates, n_max_dates))
avg_dates = np.mean(stn_counts)
message('-- average %.1f date entries per station' % avg_dates)
med_dates = np.median(stn_counts)
message('-- median %d date entries per station' % med_dates)
#
# dataset summary by dates
dates_all = stndata_df['DATE']
dates = list(sorted(dates_all.unique()))
message('- %d unique dates from %s to %s' %
(len(dates), str(dates[0]), str(dates[-1])))
date_counts = pd.value_counts(dates_all, sort=True)
min_stns = min(date_counts)
n_min_stns = sum(date_counts == min_stns)
message('-- minimum %d station entries for %d dates' % (min_stns, n_min_stns))
max_stns = max(date_counts)
n_max_stns = sum(date_counts == max_stns)
message('-- maximum %d station entries for %d dates' % (max_stns, n_max_stns))
avg_stns = np.mean(date_counts)
message('-- average %.1f station entries per date' % avg_stns)
med_stns = np.median(date_counts)
message('-- median %d station entries per date' % med_stns)
message(' ')
#
message('cleaning input dataset')
message(' ')
noperations = 0
#
# drop entries with no location information
unk_lats_idxs = list(stndata_df.ix[stndata_df['LATITUDE'] == 'unknown', 'IDX'])
unk_lats_stns = list(stndata_df.ix[stndata_df['LATITUDE'] ==
'unknown', 'STATION'].unique())
message('found %d records at %d stations with unknown latitude' %
(len(unk_lats_idxs), len(unk_lats_stns)))
unk_lons_idxs = list(stndata_df.ix[stndata_df['LONGITUDE'] ==
'unknown', 'IDX'])
unk_lons_stns = list(stndata_df.ix[stndata_df['LONGITUDE'] ==
'unknown', 'STATION'].unique())
message('found %d records at %d stations with unknown longitude' %
(len(unk_lons_idxs), len(unk_lons_stns)))
# set union, either lat or lon is missing
unk_locs_idxs = list(set(unk_lats_idxs) | set(unk_lons_idxs))
if len(unk_locs_idxs) > 0:
stndata_df = stndata_df.drop(unk_locs_idxs)
message('dropped %d total records for lack of location information' %
len(unk_locs_idxs))
noperations += len(unk_locs_idxs)
#
# re-index
stndata_df = stndata_df.drop('IDX', axis=1)
stndata_df = stndata_df.sort_values(by=['STATION', 'DATE'])
ndatarows, ndatacols = np.shape(stndata_df)
stns_all = stndata_df['STATION']
stn_id = list(sorted(stns_all.unique()))
dates_all = stndata_df['DATE']
dates = list(sorted(dates_all.unique()))
stndata_df.index = pd.Index(np.arange(ndatarows))
stndata_df['IDX'] = np.arange(ndatarows)
message('re-indexed remaining data records')
message(' ')
#
# *actual* null value is -9999, not 9999 as listed in the GHCND documentation
# find and adjust any 9999 observation values to -9999
prcp_null_idx = list(stndata_df.ix[stndata_df['PRCP'] == 9999, 'IDX'])
if len(prcp_null_idx) > 0:
stndata_df.set_value((prcp_null_idx), 'PRCP', -9999)
message('PRCP = 9999 set to PRCP = -9999 for %d observations' %
len(prcp_null_idx))
noperations += len(prcp_null_idx)
tmax_null_idx = list(stndata_df.ix[stndata_df['TMAX'] == 9999, 'IDX'])
if len(tmax_null_idx) > 0:
stndata_df.set_value((tmax_null_idx), 'TMAX', -9999)
message('TMAX = 9999 set to TMAX = -9999 for %d observations' %
len(tmax_null_idx))
noperations += len(tmax_null_idx)
tmin_null_idx = list(stndata_df.ix[stndata_df['TMIN'] == 9999, 'IDX'])
if len(tmin_null_idx) > 0:
stndata_df.set_value((tmin_null_idx), 'TMIN', -9999)
message('TMIN = 9999 set to TMIN = -9999 for %d observations' %
len(tmin_null_idx))
noperations += len(tmin_null_idx)
#
# set up DataFrame for error accounting and reporting
stnerr_df = pd.DataFrame(np.zeros((len(stn_id), 13)).astype(int),
index=np.arange(len(stn_id)),
columns=['STATION', 'IDX', 'PRCP_T_ADJ', 'PRCP_M_ERR',
'PRCP_Q_ERR', 'PRCP_ZERO_ERR', 'TMAX_M_ERR',
'TMAX_Q_ERR', 'TMIN_M_ERR', 'TMIN_Q_ERR',
'T_REV_ERR', 'TMAX_OUTLIER', 'TMIN_OUTLIER'])
stnerr_df['STATION'] = stn_id
stnerr_df['IDX'] = np.arange(len(stn_id))
message('established error accounting table')
message(' ')
#
# process PRCP measurement flags
# - flags 'B' and 'D' are ok
# - flag 'T' means that a value of 0 *should* be 1 (= 0.1 mm ==> 0.01 cm)
# - flag 'P' means that value *should* be missing (-9999 instead of 0)
prcp_flags_all = stndata_df['PRCP_M_FLAG']
prcp_flags = list(sorted(prcp_flags_all.unique()))
message('found %d unique PRCP_M_FLAGS: %s' %
(len(prcp_flags), str(prcp_flags)))
if len(prcp_flags) > 1:
if 'B' in prcp_flags:
message('- NOTE: PRCP flag B found, no data adjustments were made')
if 'D' in prcp_flags:
message('- NOTE: PRCP flag D found, no data adjustments were made')
if 'T' in prcp_flags:
prcp_t_idx = list(stndata_df.ix[stndata_df['PRCP_M_FLAG'] ==
'T', 'IDX'])
stndata_df.set_value((prcp_t_idx), 'PRCP', 1)
message('- trace PRCP --> PRCP = 1 (= 0.01 cm) for %d observations' %
len(prcp_t_idx))
noperations += len(prcp_t_idx)
prcp_t_stns_all = stndata_df.ix[stndata_df['PRCP_M_FLAG'] ==
'T', 'STATION']
prcp_t_stns = list(prcp_t_stns_all.unique())
message('-- accounting PRCP adjustments for %d stations' %
len(prcp_t_stns))
for stn in prcp_t_stns:
stnerr_idx = stnerr_df.ix[stnerr_df['STATION'] == stn, 'IDX']
stnerr_count = sum(prcp_t_stns_all == stn)
stnerr_df.set_value((stnerr_idx), 'PRCP_T_ADJ', stnerr_count)
if 'P' in prcp_flags:
prcp_m_idx = list(stndata_df.ix[stndata_df['PRCP_M_FLAG'] ==
'P', 'IDX'])
stndata_df.set_value((prcp_m_idx), 'PRCP', -9999)
message('- presumed PRCP = 0 --> PRCP = -9999 for %d observations' %
len(prcp_m_idx))
noperations += len(prcp_m_idx)
prcp_m_stns_all = stndata_df.ix[stndata_df['PRCP_M_FLAG'] ==
'P', 'STATION']
prcp_m_stns = list(prcp_m_stns_all.unique())
message('-- accounting PRCP adjustments for %d stations' %
len(prcp_m_stns))
for stn in prcp_m_stns:
stnerr_idx = stnerr_df.ix[stnerr_df['STATION'] == stn, 'IDX']
stnerr_count = sum(prcp_m_stns_all == stn)
stnerr_df.set_value((stnerr_idx), 'PRCP_M_ERR', stnerr_count)
if len(prcp_flags) > 5:
message('- NOTE: PRCP flag other than B/D/T/P was found, \
but no data adjustments were made')
message(' ')
#
# process PRCP quality flags
# - anything but a blank here means that the observation failed one of
# NOAA/NCEI's own QA checks
prcp_flags_all = stndata_df['PRCP_Q_FLAG']
prcp_flags = list(sorted(prcp_flags_all.unique()))
message('found %d unique PRCP_Q_FLAGS: %s' %
(len(prcp_flags), str(prcp_flags)))
if len(prcp_flags) > 1:
prcp_q_idx = list(stndata_df.ix[stndata_df['PRCP_Q_FLAG'] != ' ', 'IDX'])
stndata_df.set_value((prcp_q_idx), 'PRCP', -9999)
message('- QA-failed PRCP set to PRCP = -9999 for %d observations' %
len(prcp_q_idx))
noperations += len(prcp_q_idx)
prcp_q_stns_all = stndata_df.ix[stndata_df['PRCP_Q_FLAG'] !=
' ', 'STATION']
prcp_q_stns = list(prcp_q_stns_all.unique())
message('-- accounting QA-based PRCP adjustments for %d stations' %
len(prcp_q_stns))
for stn in prcp_q_stns:
stnerr_idx = stnerr_df.ix[stnerr_df['STATION'] == stn, 'IDX']
stnerr_count = sum(prcp_q_stns_all == stn)
stnerr_df.set_value((stnerr_idx), 'PRCP_Q_ERR', stnerr_count)
message(' ')
#
# process TMAX measurement flags
# - flag 'L' means that the observation may have been recorded some time later
# than the actual TMAX occurrence
tmax_flags_all = stndata_df['TMAX_M_FLAG']
tmax_flags = list(sorted(tmax_flags_all.unique()))
message('found %d unique TMAX_M_FLAGS: %s' %
(len(tmax_flags), str(tmax_flags)))
if len(tmax_flags) > 1:
if 'L' in tmax_flags:
tmax_m_idx = list(stndata_df.ix[stndata_df['TMAX_M_FLAG'] ==
'L', 'IDX'])
stndata_df.set_value((tmax_m_idx), 'TMAX', -9999)
message('- lagged TMAX set to TMAX = -9999 for %d observations' %
len(tmax_m_idx))
noperations += len(tmax_m_idx)
tmax_m_stns_all = stndata_df.ix[stndata_df['TMAX_M_FLAG'] ==
'L', 'STATION']
tmax_m_stns = list(tmax_m_stns_all.unique())
message('-- accounting TMAX adjustments for %d stations' %
len(tmax_m_stns))
for stn in tmax_m_stns:
stnerr_idx = stnerr_df.ix[stnerr_df['STATION'] == stn, 'IDX']
stnerr_count = sum(tmax_m_stns_all == stn)
stnerr_df.set_value((stnerr_idx), 'TMAX_M_ERR', stnerr_count)
if (len(tmax_flags) > 2) or ('L' not in tmax_flags):
message('- NOTE: a TMAX flag other than L was found, but no data \
adjustments were made')
message(' ')
#
# process TMAX quality flags
# - anything but a blank here means that the observation failed one of
# NOAA/NCEI's own QA checks
tmax_flags_all = stndata_df['TMAX_Q_FLAG']
tmax_flags = list(sorted(tmax_flags_all.unique()))
n_tmax_flags = len(tmax_flags)
message('found %d unique TMAX_Q_FLAGS: %s' % (n_tmax_flags, str(tmax_flags)))
if n_tmax_flags > 1:
tmax_q_idx = list(stndata_df.ix[stndata_df['TMAX_Q_FLAG'] != ' ', 'IDX'])
stndata_df.set_value((tmax_q_idx), 'TMAX', -9999)
message('- QA-failed TMAX set to TMAX = -9999 for %d observations' %
len(tmax_q_idx))
noperations += len(tmax_q_idx)
tmax_q_stns_all = stndata_df.ix[stndata_df['TMAX_Q_FLAG'] !=
' ', 'STATION']
tmax_q_stns = list(tmax_q_stns_all.unique())
message('-- accounting QA-based TMAX adjustments for %d stations' %
len(tmax_q_stns))
for stn in tmax_q_stns:
stnerr_idx = stnerr_df.ix[stnerr_df['STATION'] == stn, 'IDX']
stnerr_count = sum(tmax_q_stns_all == stn)
stnerr_df.set_value((stnerr_idx), 'TMAX_Q_ERR', stnerr_count)
message(' ')
#
# process TMIN measurement flags
# - flag 'L' means that the observation may have been recorded some time later
# than the actual TMIN occurrence
tmin_flags_all = stndata_df['TMIN_M_FLAG']
tmin_flags = list(sorted(tmin_flags_all.unique()))
n_tmin_flags = len(tmin_flags)
message('found %d unique TMIN_M_FLAGS: %s' % (n_tmin_flags, str(tmin_flags)))
if n_tmin_flags > 1:
if 'L' in tmin_flags:
tmin_m_idx = list(stndata_df.ix[stndata_df['TMIN_M_FLAG'] ==
'L', 'IDX'])
stndata_df.set_value((tmin_m_idx), 'TMIN', -9999)
message('- lagged TMIN set to TMIN = -9999 for %d observations' %
len(tmin_m_idx))
noperations += len(tmin_m_idx)
tmin_m_stns_all = stndata_df.ix[stndata_df['TMIN_M_FLAG'] ==
'L', 'STATION']
tmin_m_stns = list(tmin_m_stns_all.unique())
message('-- accounting TMIN adjustments for %d stations' %
len(tmin_m_stns))
for stn in tmin_m_stns:
stnerr_idx = stnerr_df.ix[stnerr_df['STATION'] == stn, 'IDX']
stnerr_count = sum(tmin_m_stns_all == stn)
stnerr_df.set_value((stnerr_idx), 'TMIN_M_ERR', stnerr_count)
if (n_tmin_flags > 2) or ('L' not in tmin_flags):
message('- NOTE: a TMIN flag other than L was found, but no data \
adjustments were made')
message(' ')
#
# process TMIN quality flags
# - anything but a blank here means that the observation failed one of
# NOAA/NCEI's own QA checks
tmin_flags_all = stndata_df['TMIN_Q_FLAG']
tmin_flags = list(sorted(tmin_flags_all.unique()))
n_tmin_flags = len(tmin_flags)
message('found %d unique TMIN_Q_FLAGS: %s' % (n_tmin_flags, str(tmin_flags)))
if n_tmin_flags > 1:
tmin_q_idx = stndata_df.ix[stndata_df['TMIN_Q_FLAG'] != ' ', 'IDX']
stndata_df.set_value((tmin_q_idx), 'TMIN', -9999)
message('- QA-failed TMIN set to TMIN = -9999 for %d observations' %
len(tmin_q_idx))
noperations += len(tmin_q_idx)
tmin_q_stns_all = stndata_df.ix[stndata_df['TMIN_Q_FLAG'] !=
' ', 'STATION']
tmin_q_stns = list(tmin_q_stns_all.unique())
message('-- accounting QA-based TMIN adjustments for %d stations' %
len(tmin_q_stns))
for stn in tmin_q_stns:
stnerr_idx = stnerr_df.ix[stnerr_df['STATION'] == stn, 'IDX']
stnerr_count = sum(tmin_q_stns_all == stn)
stnerr_df.set_value((stnerr_idx), 'TMIN_Q_ERR', stnerr_count)
message(' ')
#
# find stations with potentially erroneous P values
# (evaluation criterion: all of that station's P values = 0)
# (outlier detection of extreme values not yet implemented)
message('checking PRCP values for %d individual stations' % len(stn_id))
for stn in stn_id:
stn_df = stndata_df[stndata_df['STATION'] == stn]
stn_idxs = list(stn_df['IDX'])
stn_df = stn_df[stn_df['PRCP'] != -9999]
stn_prcp = np.array(stn_df['PRCP'])
if sum(stn_prcp) <= 0:
message('- station %s may have erroneous PRCP data, setting all %d of \
its PRCP values to -9999' % (stn, len(stn_idxs)))
stndata_df.set_value((stn_idxs), 'PRCP', -9999)
noperations += len(stn_idxs)
stnerr_idx = stnerr_df.ix[stnerr_df['STATION'] == stn, 'IDX']
stnerr_df.set_value((stnerr_idx), 'PRCP_ZERO_ERR', len(stn_idxs))
else:
message('- station %s PRCP data seem OK (no check for extreme values \
yet)' % stn)
message(' ')
#
# find entries with potentially erroneous locations/values
# (evaluation criteria: reversed Tmax and Tmin values)
# ( outlier detection of extreme T values)
message('checking PRCP/TMAX/TMIN values for %d individual dates' % len(dates))
for date in dates:
date_df = stndata_df[stndata_df['DATE'] == date]
stn_vals = list(date_df['STATION'])
lats_vals = list(date_df['LATITUDE'])
lons_vals = list(date_df['LONGITUDE'])
prcp_vals = np.array(date_df['PRCP'])
tmax_vals = np.array(date_df['TMAX'])
tmin_vals = np.array(date_df['TMIN'])
idx_vals = list(date_df['IDX'])
#
# find transposed Tmax/Tmin values
# (only works if valid values are present for both)
nvals = len(idx_vals)
transposed = 0
for i in range(nvals):
if (tmax_vals[i] != -9999) and (tmin_vals[i] != -9999):
if tmin_vals[i] > tmax_vals[i]:
stndata_df.set_value((int(idx_vals[i])), 'TMAX', tmin_vals[i])
stndata_df.set_value((int(idx_vals[i])), 'TMIN', tmax_vals[i])
transposed += 1
stnerr_idx = stnerr_df.ix[stnerr_df['STATION'] ==
stn_vals[i], 'IDX']
stnerr_val = stnerr_df.ix[stnerr_df['STATION'] ==
stn_vals[i], 'T_REV_ERR']
stnerr_df.set_value((stnerr_idx), 'T_REV_ERR',
(stnerr_val + 1))
if transposed > 0:
message('- date %d has %d entries where Tmax < Tmin, now reversed' %
(date, transposed))
noperations += transposed
#
# find outlier Tmax values (not spatial, purely arithmetic)
valid_tmax_vals = date_df[date_df['TMAX'] != -9999]
tmax_vals = np.array(valid_tmax_vals['TMAX'])
stn_vals = list(valid_tmax_vals['STATION'])
idx_vals = list(valid_tmax_vals['IDX'])
tmax_outlier_stns, tmax_outlier_idxs = find_outliers(tmax_vals, stn_vals,
idx_vals,
outlier_threshold)
if len(tmax_outlier_stns) > 0:
stndata_df.set_value((tmax_outlier_idxs), 'TMAX', -9999)
for stn in tmax_outlier_stns:
stnerr_idx = stnerr_df.ix[stnerr_df['STATION'] == stn, 'IDX']
stnerr_val = stnerr_df.ix[stnerr_df['STATION'] ==
stn, 'TMAX_OUTLIER']
stnerr_df.set_value((stnerr_idx), 'TMAX_OUTLIER', (stnerr_val + 1))
message('- date %d has %d outlier Tmax values (p < 0.001), now set to \
-9999' % (date, len(tmax_outlier_stns)))
noperations += len(tmax_outlier_stns)
#
# find outlier Tmin values (not spatial, purely arithmetic)
valid_tmin_vals = date_df[date_df['TMIN'] != -9999]
tmin_vals = np.array(valid_tmin_vals['TMIN'])
stn_vals = list(valid_tmin_vals['STATION'])
idx_vals = list(valid_tmin_vals['IDX'])
tmin_outlier_stns, tmin_outlier_idxs = find_outliers(tmin_vals, stn_vals,
idx_vals,
outlier_threshold)
if len(tmin_outlier_stns) > 0:
stndata_df.set_value((tmin_outlier_idxs), 'TMAX', -9999)
for stn in tmin_outlier_stns:
stnerr_idx = stnerr_df.ix[stnerr_df['STATION'] == stn, 'IDX']
stnerr_val = stnerr_df.ix[stnerr_df['STATION'] ==
stn, 'TMIN_OUTLIER']
stnerr_df.set_value((stnerr_idx), 'TMIN_OUTLIER', (stnerr_val + 1))
message('- date %d has %d outlier Tmin values (p < 0.001), now set to \
-9999' % (date, len(tmin_outlier_stns)))
noperations += len(tmin_outlier_stns)
#
# if no apparent errors/outliers found, report that
if (transposed + len(tmax_outlier_stns) + len(tmin_outlier_stns)) == 0:
message('- date %d data seem OK' % date)
message(' ')
#
# drop entries with no useful meteorological data (just for convenience)
no_prcp_data_idxs = list(stndata_df.ix[stndata_df['PRCP'] == -9999, 'IDX'])
no_tmax_data_idxs = list(stndata_df.ix[stndata_df['TMAX'] == -9999, 'IDX'])
no_tmin_data_idxs = list(stndata_df.ix[stndata_df['TMIN'] == -9999, 'IDX'])
# set intersection, see if all met data are missing
no_data_idxs = list(set(no_prcp_data_idxs) &
set(no_tmax_data_idxs) &
set(no_tmin_data_idxs))
if len(no_data_idxs) > 0:
stndata_df = stndata_df.drop(no_data_idxs)
message('dropped %d records for lack of useful meteorological data' %
len(no_data_idxs))
noperations += len(no_data_idxs)
#
# re-index
stndata_df = stndata_df.drop('IDX', axis=1)
stndata_df = stndata_df.sort_values(by=['STATION', 'DATE'])
ndatarows, ndatacols = np.shape(stndata_df)
stndata_df.index = pd.Index(np.arange(ndatarows))
stndata_df['IDX'] = np.arange(ndatarows)
message('re-indexed remaining data records')
message(' ')
#
message('a total of %d data value operations were performed' % noperations)
message(' ')
#
ndatarows, ndatacols = np.shape(stndata_df)
message('cleaned dataset has:')
message('- %d total data rows' % ndatarows)
#
# dataset summary by stations
stns_all = stndata_df['STATION']
stn_id = sorted(stns_all.unique())
message('- %d unique stations' % len(stn_id))
stn_counts = pd.value_counts(stns_all, sort=True)
min_dates = min(stn_counts)
n_min_dates = sum(stn_counts == min_dates)
message('-- minimum %d date entries for %d stations' %
(min_dates, n_min_dates))
max_dates = max(stn_counts)
n_max_dates = sum(stn_counts == max_dates)
message('-- maximum %d date entries for %d stations' %
(max_dates, n_max_dates))
avg_dates = np.mean(stn_counts)
message('-- average %.1f date entries per station' % avg_dates)
med_dates = np.median(stn_counts)
message('-- median %d date entries per station' % med_dates)
#
# dataset summary by dates
dates_all = stndata_df['DATE']
dates = sorted(dates_all.unique())
message('- %d unique dates from %s to %s' %
(len(dates), str(dates[0]), str(dates[-1])))
date_counts = pd.value_counts(dates_all, sort=True)
min_stns = min(date_counts)
n_min_stns = sum(date_counts == min_stns)
message('-- minimum %d station entries for %d dates' %
(min_stns, n_min_stns))
max_stns = max(date_counts)
n_max_stns = sum(date_counts == max_stns)
message('-- maximum %d station entries for %d dates' %
(max_stns, n_max_stns))
avg_stns = np.mean(date_counts)
message('-- average %.1f station entries per date' % avg_stns)
med_stns = np.median(date_counts)
message('-- median %d station entries per date' % med_stns)
message(' ')
#
# dataset summary by variable
message('- %d total stations' % len(stn_id))
message('- %d unique dates from %s to %s' %
(len(dates), str(dates[0]), str(dates[-1])))
lats = np.zeros((len(stn_id)))
lons = np.zeros((len(stn_id)))
ndates = np.zeros((len(stn_id))).astype(int)
nprcp = np.zeros((len(stn_id))).astype(int)
ntmax = np.zeros((len(stn_id))).astype(int)
ntmin = np.zeros((len(stn_id))).astype(int)
for i in range(len(stn_id)):
message('-- processing station %s' % stn_id[i])
stndata_subset = stndata_df[stndata_df['STATION'] == stn_id[i]]
lats[i] = list(stndata_subset['LATITUDE'])[-1]
lons[i] = list(stndata_subset['LONGITUDE'])[-1]
ndates[i] = len(list(stndata_subset['DATE']))
nprcp[i] = sum(list(stndata_subset['PRCP'] != -9999))
ntmax[i] = sum(list(stndata_subset['TMAX'] != -9999))
ntmin[i] = sum(list(stndata_subset['TMIN'] != -9999))
stnmeta_df = pd.DataFrame({'STATION': stn_id, 'LATITUDE': lats,
'LONGITUDE': lons, 'NDATES': ndates,
'NPRCP': nprcp, 'NTMAX': ntmax, 'NTMIN': ntmin})
message(' ')
#
# save cleaned csv dataset
stndata_df.to_csv(cleaneddatafile)
message('saved cleaned NCEI dataset to %s' % cleaneddatafile)
stnerr_df.to_csv(errorsdatafile)
message('saved station-by-station error counts to %s' % errorsdatafile)
stnmeta_df.to_csv(stnmetadatafile)
message('saved station metadataset to %s' % stnmetadatafile)
message(' ')
#
# initialize HDF5 file for use in next script "process_NCEI_01.py"
with hdf.File(h5outfname, 'w') as h5file:
h5file.create_dataset('meta/filename', data=h5outfname)
h5file.create_dataset('meta/created',
data=datetime.datetime.now().isoformat())
h5file.create_dataset('meta/by', data='M. Garcia, UWisconsin-Madison FWE')
h5file.create_dataset('meta/last_updated',
data=datetime.datetime.now().isoformat())
h5file.create_dataset('meta/at', data='unique stations list, dates list')
h5file.create_dataset('stn_id', data=stn_id)
h5file.create_dataset('dates', data=dates)
message('saved processing metadata to %s' % h5outfname)
message(' ')
#
message('process_NCEI_00.py completed at %s' %
datetime.datetime.now().isoformat())
message(' ')
sys.exit(0)
# end process_NCEI_00.py
| gpl-3.0 |
glennq/scikit-learn | examples/linear_model/plot_ridge_path.py | 55 | 2138 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
This example also shows the usefulness of applying Ridge regression
to highly ill-conditioned matrices. For such matrices, a slight
change in the target variable can cause huge variances in the
calculated weights. In such cases, it is useful to set a certain
regularization (alpha) to reduce this variation (noise).
When alpha is very large, the regularization effect dominates the
squared loss function and the coefficients tend to zero.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations. In practise it is necessary to tune alpha
in such a way that a balance is maintained between both.
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
waterponey/scikit-learn | examples/cluster/plot_kmeans_digits.py | 42 | 4491 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(82 * '_')
print('init\t\ttime\tinertia\thomo\tcompl\tv-meas\tARI\tAMI\tsilhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('%-9s\t%.2fs\t%i\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(82 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, x_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
mblondel/scikit-learn | sklearn/feature_selection/tests/test_from_model.py | 244 | 1593 | import numpy as np
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.svm import LinearSVC
iris = load_iris()
def test_transform_linear_model():
for clf in (LogisticRegression(C=0.1),
LinearSVC(C=0.01, dual=False),
SGDClassifier(alpha=0.001, n_iter=50, shuffle=True,
random_state=0)):
for thresh in (None, ".09*mean", "1e-5 * median"):
for func in (np.array, sp.csr_matrix):
X = func(iris.data)
clf.set_params(penalty="l1")
clf.fit(X, iris.target)
X_new = clf.transform(X, thresh)
if isinstance(clf, SGDClassifier):
assert_true(X_new.shape[1] <= X.shape[1])
else:
assert_less(X_new.shape[1], X.shape[1])
clf.set_params(penalty="l2")
clf.fit(X_new, iris.target)
pred = clf.predict(X_new)
assert_greater(np.mean(pred == iris.target), 0.7)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=None)
clf.fit(iris.data, iris.target)
assert_raises(ValueError, clf.transform, iris.data, "gobbledigook")
assert_raises(ValueError, clf.transform, iris.data, ".5 * gobbledigook")
| bsd-3-clause |
RobertABT/heightmap | build/matplotlib/lib/matplotlib/tri/tritools.py | 4 | 12652 | """
Tools for triangular grids.
"""
from __future__ import print_function
from matplotlib.tri import Triangulation
import numpy as np
class TriAnalyzer(object):
"""
Define basic tools for triangular mesh analysis and improvement.
A TriAnalizer encapsulates a :class:`~matplotlib.tri.Triangulation`
object and provides basic tools for mesh analysis and mesh improvement.
Parameters
----------
triangulation : :class:`~matplotlib.tri.Triangulation` object
The encapsulated triangulation to analyze.
Attributes
----------
scale_factors
"""
def __init__(self, triangulation):
if not isinstance(triangulation, Triangulation):
raise ValueError("Expected a Triangulation object")
self._triangulation = triangulation
@property
def scale_factors(self):
"""
Factors to rescale the triangulation into a unit square.
Returns *k*, tuple of 2 scale factors.
Returns
-------
k : tuple of 2 floats (kx, ky)
Tuple of floats that would rescale the triangulation :
``[triangulation.x * kx, triangulation.y * ky]``
fits exactly inside a unit square.
"""
compressed_triangles = self._triangulation.get_masked_triangles()
node_used = (np.bincount(np.ravel(compressed_triangles)) != 0)
x = self._triangulation.x[node_used]
y = self._triangulation.y[node_used]
ux = np.max(x)-np.min(x)
uy = np.max(y)-np.min(y)
return (1./float(ux), 1./float(uy))
def circle_ratios(self, rescale=True):
"""
Returns a measure of the triangulation triangles flatness.
The ratio of the incircle radius over the circumcircle radius is a
widely used indicator of a triangle flatness.
It is always ``<= 0.5`` and ``== 0.5`` only for equilateral
triangles. Circle ratios below 0.01 denote very flat triangles.
To avoid unduly low values due to a difference of scale between the 2
axis, the triangular mesh can first be rescaled to fit inside a unit
square with :attr:`scale_factors` (Only if *rescale* is True, which is
its default value).
Parameters
----------
rescale : boolean, optional
If True, a rescaling will be internally performed (based on
:attr:`scale_factors`, so that the (unmasked) triangles fit
exactly inside a unit square mesh. Default is True.
Returns
-------
circle_ratios : masked array
Ratio of the incircle radius over the
circumcircle radius, for each 'rescaled' triangle of the
encapsulated triangulation.
Values corresponding to masked triangles are masked out.
"""
# Coords rescaling
if rescale:
(kx, ky) = self.scale_factors
else:
(kx, ky) = (1.0, 1.0)
pts = np.vstack([self._triangulation.x*kx,
self._triangulation.y*ky]).T
tri_pts = pts[self._triangulation.triangles]
# Computes the 3 side lengths
a = tri_pts[:, 1, :] - tri_pts[:, 0, :]
b = tri_pts[:, 2, :] - tri_pts[:, 1, :]
c = tri_pts[:, 0, :] - tri_pts[:, 2, :]
a = np.sqrt(a[:, 0]**2 + a[:, 1]**2)
b = np.sqrt(b[:, 0]**2 + b[:, 1]**2)
c = np.sqrt(c[:, 0]**2 + c[:, 1]**2)
# circumcircle and incircle radii
s = (a+b+c)*0.5
prod = s*(a+b-s)*(a+c-s)*(b+c-s)
# We have to deal with flat triangles with infinite circum_radius
bool_flat = (prod == 0.)
if np.any(bool_flat):
# Pathologic flow
ntri = tri_pts.shape[0]
circum_radius = np.empty(ntri, dtype=np.float64)
circum_radius[bool_flat] = np.inf
abc = a*b*c
circum_radius[~bool_flat] = abc[~bool_flat] / (
4.0*np.sqrt(prod[~bool_flat]))
else:
# Normal optimized flow
circum_radius = (a*b*c) / (4.0*np.sqrt(prod))
in_radius = (a*b*c) / (4.0*circum_radius*s)
circle_ratio = in_radius/circum_radius
mask = self._triangulation.mask
if mask is None:
return circle_ratio
else:
return np.ma.array(circle_ratio, mask=mask)
def get_flat_tri_mask(self, min_circle_ratio=0.01, rescale=True):
"""
Eliminates excessively flat border triangles from the triangulation.
Returns a mask *new_mask* which allows to clean the encapsulated
triangulation from its border-located flat triangles
(according to their :meth:`circle_ratios`).
This mask is meant to be subsequently applied to the triangulation
using :func:`matplotlib.tri.Triangulation.set_mask` .
*new_mask* is an extension of the initial triangulation mask
in the sense that an initially masked triangle will remain masked.
The *new_mask* array is computed recursively ; at each step flat
triangles are removed only if they share a side with the current
mesh border. Thus no new holes in the triangulated domain will be
created.
Parameters
----------
min_circle_ratio : float, optional
Border triangles with incircle/circumcircle radii ratio r/R will
be removed if r/R < *min_circle_ratio*. Default value: 0.01
rescale : boolean, optional
If True, a rescaling will first be internally performed (based on
:attr:`scale_factors` ), so that the (unmasked) triangles fit
exactly inside a unit square mesh. This rescaling accounts for the
difference of scale which might exist between the 2 axis. Default
(and recommended) value is True.
Returns
-------
new_mask : array-like of booleans
Mask to apply to encapsulated triangulation.
All the initially masked triangles remain masked in the
*new_mask*.
Notes
-----
The rationale behind this function is that a Delaunay
triangulation - of an unstructured set of points - sometimes contains
almost flat triangles at its border, leading to artifacts in plots
(especially for high-resolution contouring).
Masked with computed *new_mask*, the encapsulated
triangulation would contain no more unmasked border triangles
with a circle ratio below *min_circle_ratio*, thus improving the
mesh quality for subsequent plots or interpolation.
Examples
--------
Please refer to the following illustrating example:
.. plot:: mpl_examples/pylab_examples/tricontour_smooth_delaunay.py
"""
# Recursively computes the mask_current_borders, true if a triangle is
# at the border of the mesh OR touching the border through a chain of
# invalid aspect ratio masked_triangles.
ntri = self._triangulation.triangles.shape[0]
mask_bad_ratio = self.circle_ratios(rescale) < min_circle_ratio
current_mask = self._triangulation.mask
if current_mask is None:
current_mask = np.zeros(ntri, dtype=np.bool)
valid_neighbors = np.copy(self._triangulation.neighbors)
renum_neighbors = np.arange(ntri, dtype=np.int32)
nadd = -1
while nadd != 0:
# The active wavefront is the triangles from the border (unmasked
# but with a least 1 neighbor equal to -1
wavefront = ((np.min(valid_neighbors, axis=1) == -1)
& ~current_mask)
# The element from the active wavefront will be masked if their
# circle ratio is bad.
added_mask = np.logical_and(wavefront, mask_bad_ratio)
current_mask = (added_mask | current_mask)
nadd = np.sum(added_mask)
# now we have to update the tables valid_neighbors
valid_neighbors[added_mask, :] = -1
renum_neighbors[added_mask] = -1
valid_neighbors = np.where(valid_neighbors == -1, -1,
renum_neighbors[valid_neighbors])
return np.ma.filled(current_mask, True)
def _get_compressed_triangulation(self, return_tri_renum=False,
return_node_renum=False):
"""
Compress (if masked) the encapsulated triangulation.
Returns minimal-length triangles array (*compressed_triangles*) and
coordinates arrays (*compressed_x*, *compressed_y*) that can still
describe the unmasked triangles of the encapsulated triangulation.
Parameters
----------
return_tri_renum : boolean, optional
Indicates whether a renumbering table to translate the triangle
numbers from the encapsulated triangulation numbering into the
new (compressed) renumbering will be returned.
return_node_renum : boolean, optional
Indicates whether a renumbering table to translate the nodes
numbers from the encapsulated triangulation numbering into the
new (compressed) renumbering will be returned.
Returns
-------
compressed_triangles : array-like
the returned compressed triangulation triangles
compressed_x : array-like
the returned compressed triangulation 1st coordinate
compressed_y : array-like
the returned compressed triangulation 2nd coordinate
tri_renum : array-like of integers
renumbering table to translate the triangle numbers from the
encapsulated triangulation into the new (compressed) renumbering.
-1 for masked triangles (deleted from *compressed_triangles*).
Returned only if *return_tri_renum* is True.
node_renum : array-like of integers
renumbering table to translate the point numbers from the
encapsulated triangulation into the new (compressed) renumbering.
-1 for unused points (i.e. those deleted from *compressed_x* and
*compressed_y*). Returned only if *return_node_renum* is True.
"""
# Valid triangles and renumbering
tri_mask = self._triangulation.mask
compressed_triangles = self._triangulation.get_masked_triangles()
ntri = self._triangulation.triangles.shape[0]
tri_renum = self._total_to_compress_renum(tri_mask, ntri)
# Valid nodes and renumbering
node_mask = (np.bincount(np.ravel(compressed_triangles)) == 0)
compressed_x = self._triangulation.x[~node_mask]
compressed_y = self._triangulation.y[~node_mask]
node_renum = self._total_to_compress_renum(node_mask)
# Now renumbering the valid triangles nodes
compressed_triangles = node_renum[compressed_triangles]
# 4 cases possible for return
if not return_tri_renum:
if not return_node_renum:
return compressed_triangles, compressed_x, compressed_y
else:
return (compressed_triangles, compressed_x, compressed_y,
node_renum)
else:
if not return_node_renum:
return (compressed_triangles, compressed_x, compressed_y,
tri_renum)
else:
return (compressed_triangles, compressed_x, compressed_y,
tri_renum, node_renum)
@staticmethod
def _total_to_compress_renum(mask, n=None):
"""
Parameters
----------
mask : 1d boolean array or None
mask
n : integer
length of the mask. Useful only id mask can be None
Returns
-------
renum : integer array
array so that (`valid_array` being a compressed array
based on a `masked_array` with mask *mask*) :
- For all i such as mask[i] = False:
valid_array[renum[i]] = masked_array[i]
- For all i such as mask[i] = True:
renum[i] = -1 (invalid value)
"""
if n is None:
n = np.size(mask)
if mask is not None:
renum = -np.ones(n, dtype=np.int32) # Default num is -1
valid = np.arange(n, dtype=np.int32).compress(~mask, axis=0)
renum[valid] = np.arange(np.size(valid, 0), dtype=np.int32)
return renum
else:
return np.arange(n, dtype=np.int32)
| mit |
RNAer/Calour | calour/ratio_experiment.py | 1 | 14235 | '''
ratio experiment (:mod:`calour.ratio_experiment`)
=======================================================
.. currentmodule:: calour.ratio_experiment
Classes
^^^^^^^
.. autosummary::
:toctree: generated
RatioExperiment
'''
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, Calour development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from logging import getLogger
import numpy as np
import pandas as pd
import scipy.stats
from statsmodels.stats.multitest import multipletests
from .experiment import Experiment
from .util import _to_list
from .analysis import _new_experiment_from_pvals, _CALOUR_DIRECTION, _CALOUR_STAT
logger = getLogger(__name__)
class RatioExperiment(Experiment):
'''This class stores log-ratio data and corresponding analysis methods.
This is a child class of :class:`.Experiment`.
Parameters
----------
data : numpy.ndarray or scipy.sparse.csr_matrix
The log-ratio table for OTUs or ASVs.
Samples are in rows and features in columns
Note: values can be negative or np.nan as this is log-ratio.
np.nan indicates ratio is not applicable between 2 samples for a feature.
sample_metadata : pandas.DataFrame
The metadata on the samples
feature_metadata : pandas.DataFrame
The metadata on the features
description : str
name of experiment
sparse : bool
store the data array in :class:`scipy.sparse.csr_matrix`
or :class:`numpy.ndarray`
databases: iterable of str, optional
database interface names to show by default in heatmap() function
by default use 'dbbact'
Attributes
----------
data : numpy.ndarray or scipy.sparse.csr_matrix
The log ratio table for OTUs or ASVs.
Samples are in row and features in column. values are float (can be negative)
with np.nan indicating ratio for the specific feature does not exist.
sample_metadata : pandas.DataFrame
The metadata on the samples
feature_metadata : pandas.DataFrame
The metadata on the features
shape : tuple of (int, int)
the dimension of data
sparse : bool
store the data as sparse matrix (scipy.sparse.csr_matrix) or dense numpy array.
info : dict
information about the experiment (data md5, filenames, etc.)
description : str
name of the experiment
databases : dict
keys are the database names (i.e. 'dbbact' / 'gnps')
values are the database specific data for the experiment (i.e. annotations for dbbact)
See Also
--------
Experiment
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def heatmap(self, *args, **kwargs):
'''Plot a heatmap for the ratio experiment.
This method accepts exactly the same parameters as input with
its parent class method and does exactly the sample plotting.
The only difference is that by default, it uses the diverging
colormap 'coolwarm' and `bad_color` parameter is set to white. You can
always set it to other colormap/bad_color as explained in
:meth:`.Experiment.heatmap`.
See Also
--------
Experiment.heatmap
'''
if 'cmap' not in kwargs:
kwargs['cmap'] = 'coolwarm'
if 'bad_color' not in kwargs:
kwargs['bad_color'] = 'white'
if 'clim' not in kwargs:
min_val = np.abs(np.nanmin(self.data))
max_val = np.abs(np.nanmax(self.data))
clim_val = np.max([max_val, min_val])
kwargs['clim'] = (-clim_val, clim_val)
super().heatmap(*args, **kwargs)
@classmethod
def from_exp(self, exp, common_field, group_field, value1, value2=None, threshold=5, sample_md_suffix=None):
'''Create a RatioExperiment object from two groups of samples in an experiment.
ratios are calculated for each unique value of common_field. For each such value, 2 groups of samples (group1/group2) are created by selecting all samples with value1/value2
in the group_field. The ratio for the common field value for each feature is calculated by taking the log2 of mean(group1)/mean(group2) for the feature.
Parameters
----------
exp: calour.Experiment
The experiment to take the 2 groups of samples from.
common_field: str
Name of the sample metadata field to calculate the ratio within.
For example, to calculate the ratio between 2 timepoints (e.g. before and after treatment) for each subject,
common_field would be something like 'subject_id'.
group_field: str
Name of the sample metadata field on which to divide samples to 2 groups for ratio calculation.
For example, to calculate the ratio between before and after treatment for each individual,
group_field with be 'treatment' that has 'before_treatment' and 'after_treatment'.
value1 : str or iterable of str
Values for field that will be assigned to the first (nominator) sample group. For example 'before_treatment'.
If more than one sample matches value1 in field group_field (for a given common_field value), use the mean of the frequency
(of each feature) as the nominator value for the sample group.
value2: str or iterable of str, default=None
If not None, values for field that will be assigned to the second (denominator) sample group. For example 'after_treatment'
If None, use all samples not in the first sample group as the second sample group.
Similar to value1, if more than 1 sample matches, use their mean frequency as the denominator value.
threshold: float or None, optional
If not None, assign each data value<threshold to threshold. If both nominator and denominator are < threshold,
the resulting ratio will assigned be np.nan.
For example, in an AmpliconExperiment, ratios calculated between low read numbers are not reliable. So it is advised to set threshold to approx. 10
sample_md_suffix: tuple of (str, str), default=None
The suffix to add to the ratio experiment for each sample metadata column name, indicating if it is from value1 or value2 group.
If none, append value1 or value2 to the column name respectively.
In the ratio experiment, since each sample is the ratio of 2 sample groups, the sample metadata can originate from the first or second group. Therefore
the 2 values will be stored in 2 columns for each original metadata column.
Returns
-------
RatioExperiment
The samples are all the unique values of common_field (that have >= 1 sample matching value1 and value2 in group_field).
The data values for each feature are the ratio of mean data values in samples of value1 in
group_field / data values in samples of value2 in group_field
sample_metadata contains two columns for each original sample_metadata column: the value for group1 and for group2
The index for each ratio sample is the index of the group1 sample
'''
new_field_val = '%s / ' % value1
exp.sparse = False
value1 = _to_list(value1)
if value2 is None:
new_field_val += 'Other'
value2 = [str(x) for x in set(exp.sample_metadata[group_field].unique()).difference(set(value1))]
else:
new_field_val += '%s' % value2
value2 = _to_list(value2)
ratio_mat = np.zeros([len(exp.sample_metadata[common_field].unique()), exp.shape[1]])
# new_sample_metadata = pd.DataFrame()
if sample_md_suffix is None:
sample_md_suffix = (".".join(value1), ".".join(value2))
new_columns = [x + '_%s' % sample_md_suffix[0] for x in exp.sample_metadata.columns]
new_columns.extend([x + '_%s' % sample_md_suffix[1] for x in exp.sample_metadata.columns])
new_sample_metadata = pd.DataFrame(columns=new_columns)
# used to count the number of samples that have values in both groups
# we then keep only these columns in the ratio_mat
found_indices = []
for idx, cexp in enumerate(exp.iterate(common_field, axis=0)):
# cfield = cexp.sample_metadata[common_field].iloc[0]
group1 = cexp.filter_samples(group_field, value1)
group2 = cexp.filter_samples(group_field, value2)
if group1.shape[0] == 0 or group2.shape[0] == 0:
continue
# replace all <= threshold values if needed
if threshold is not None:
group1.data[group1.data <= threshold] = threshold
group2.data[group2.data <= threshold] = threshold
mean_g1 = np.mean(group1.data, axis=0)
mean_g2 = np.mean(group2.data, axis=0)
ratios = np.log2(mean_g1 / mean_g2)
# Take care of features where both nominator and denominator are below the min_threshold. We should get np.nan
if threshold is not None:
ratios[np.logical_and(mean_g1 == threshold, mean_g2 == threshold)] = np.nan
ratio_mat[idx, :] = ratios
cmetadata = pd.DataFrame(columns=new_columns)
# the new index for this ratio is the sampleID of the nominator sample
csamp_id = group1.sample_metadata['_sample_id'][0]
for ccol in group1.sample_metadata.columns:
u1 = group1.sample_metadata[ccol].unique()
if len(u1) == 1:
cmetadata.at[csamp_id, ccol + '_%s' % sample_md_suffix[0]] = u1[0]
else:
cmetadata.at[csamp_id, ccol + '_%s' % sample_md_suffix[0]] = 'NA (multiple values)'
u2 = group2.sample_metadata[ccol].unique()
if len(u2) == 1:
cmetadata.at[csamp_id, ccol + '_%s' % sample_md_suffix[1]] = u2[0]
else:
cmetadata.at[csamp_id, ccol + '_%s' % sample_md_suffix[1]] = 'NA (multiple values)'
# cmetadata.at[group_field] = new_field_val
new_sample_metadata = new_sample_metadata.append(cmetadata)
found_indices.append(idx)
# keep only samples that were actually added to the ratio_mat
ratio_mat = ratio_mat[found_indices, :]
logger.info('Calculated ratios for %d unique sample groups' % len(found_indices))
ratio_exp = RatioExperiment(data=ratio_mat, sample_metadata=new_sample_metadata, feature_metadata=exp.feature_metadata,
sparse=False, databases=exp.databases, description=exp.description, info=exp.info)
return ratio_exp
def get_sign_pvals(self, alpha=0.1, min_present=5):
'''Get FDR corrected p-values for rejecting the null hypothesis that the signs of the ratios originate from a p=0.5 binomial distribution.
This test is used in order to identify features that increase/decrease significantly. For example, if the RatioExperiments is created for
pre- and post-treatment samples of individuals (ratio is pre/post), get_sign_pvals can be used to identify features that significantly
increase/decrease following the treatment.
NOTE: The test is performed only on the non nan feature values.
Parameters
----------
alpha: float, optional
The required FDR control level
min_present: int, optional
The minimal number of samples where the ratio is not nan or zero in order to include in the test.
Used as filtering to achieve better FDR power (less hypothesis to test)
Returns
-------
RatioExperiment
Only features with higher than random number of positive or negative ratios.
Features are sorted by the effect size (and by p-value for similar effect size).
The feature_metadata contains 4 new fields: '__calour_stat', '_calour_pval', '_calour_qval', '_calour_direction'
, similar to calour.analysis.diff_abundance().
'''
exp = self.copy()
# need to convert to non-sparse in order to use np.isfinite()
exp.sparse = False
keep = []
pvals = np.ones(exp.shape[1])
esize = np.zeros(exp.shape[1])
npos = np.zeros(exp.shape[1])
nneg = np.zeros(exp.shape[1])
for idx in range(exp.shape[1]):
cdat = exp.data[:, idx]
cnpos = np.sum(cdat[np.isfinite(cdat)] > 0)
cnneg = np.sum(cdat[np.isfinite(cdat)] < 0)
npos[idx] = cnpos
nneg[idx] = cnneg
# test if we have enough non-zero samples
if npos[idx] + nneg[idx] >= min_present:
# calculate the binomial p-value and effect size for the feature
pvals[idx] = scipy.stats.binom_test(cnpos, cnpos + cnneg)
esize[idx] = (cnpos - cnneg) / (cnpos + cnneg)
keep.append(idx)
logger.debug('keeping %d features with enough ratios' % len(keep))
exp = exp.reorder(keep, axis='f')
if len(keep) == 0:
logger.warning('No significant features found')
return exp
pvals = pvals[keep]
esize = esize[keep]
# multiple testing correction using Benjamini-Hochberg FDR
# note we cannot use dsFDR as this is not a 2 group test
reject, qvals, *_ = multipletests(pvals, alpha=alpha, method='fdr_bh')
newexp = _new_experiment_from_pvals(exp, None, reject, esize, pvals, qvals)
# set the effect direction field
newexp.feature_metadata[_CALOUR_DIRECTION] = ['positive' if x > 0 else 'negative' for x in newexp.feature_metadata[_CALOUR_STAT]]
logger.info('found %d significant' % len(newexp.feature_metadata))
return newexp
| bsd-3-clause |
niketanpansare/systemml | src/main/python/systemml/defmatrix.py | 7 | 48505 | # -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
__all__ = [
'setSparkContext',
'matrix',
'eval',
'solve',
'DMLOp',
'set_lazy',
'debug_array_conversion',
'load',
'full',
'seq']
import numpy as np
import pandas as pd
from scipy.sparse import coo_matrix, spmatrix
try:
import py4j.java_gateway
from py4j.java_gateway import JavaObject
from pyspark import SparkContext
from pyspark.sql import DataFrame, SparkSession
import pyspark.mllib.common
except ImportError:
raise ImportError(
'Unable to import `pyspark`. Hint: Make sure you are running with PySpark.')
from . import MLContext, pydml, _java2py, Matrix
from .converters import *
def setSparkContext(sc):
"""
Before using the matrix, the user needs to invoke this function if SparkContext is not previously created in the session.
Parameters
----------
sc: SparkContext
SparkContext
"""
matrix.sc = sc
matrix.sparkSession = SparkSession.builder.getOrCreate()
matrix.ml = MLContext(matrix.sc)
def check_MLContext():
if matrix.ml is None:
if SparkContext._active_spark_context is not None:
setSparkContext(SparkContext._active_spark_context)
else:
raise Exception(
'Expected setSparkContext(sc) to be called, where sc is active SparkContext.')
########################## AST related operations ########################
class DMLOp(object):
"""
Represents an intermediate node of Abstract syntax tree created to generate the PyDML script
"""
def __init__(self, inputs, dml=None):
self.inputs = inputs
self.dml = dml
self.ID = None
self.depth = 1
for m in self.inputs:
m.referenced = m.referenced + [self]
if isinstance(m, matrix) and m.op is not None:
self.depth = max(self.depth, m.op.depth + 1)
MAX_DEPTH = 0
def _visit(self, execute=True):
matrix.dml = matrix.dml + self.dml
def _print_ast(self, numSpaces):
ret = []
for m in self.inputs:
ret = [m._print_ast(numSpaces + 2)]
return ''.join(ret)
# Special object used internally to specify the placeholder which will be replaced by output ID
# This helps to provide dml containing output ID in construct_intermediate_node
OUTPUT_ID = '$$OutputID$$'
def set_lazy(isLazy):
"""
This method allows users to set whether the matrix operations should be executed in lazy manner.
Parameters
----------
isLazy: True if matrix operations should be evaluated in lazy manner.
"""
if isLazy:
DMLOp.MAX_DEPTH = 0
else:
DMLOp.MAX_DEPTH = 1
def construct_intermediate_node(inputs, dml):
"""
Convenient utility to create an intermediate node of AST.
Parameters
----------
inputs = list of input matrix objects and/or DMLOp
dml = list of DML string (which will be eventually joined before execution). To specify out.ID, please use the placeholder
"""
dmlOp = DMLOp(inputs)
out = matrix(None, op=dmlOp)
dmlOp.dml = [out.ID if x == OUTPUT_ID else x for x in dml]
if DMLOp.MAX_DEPTH > 0 and out.op.depth >= DMLOp.MAX_DEPTH:
out.eval()
return out
def load(file, format='csv'):
"""
Allows user to load a matrix from filesystem
Parameters
----------
file: filepath
format: can be csv, text or binary or mm
"""
return construct_intermediate_node(
[], [OUTPUT_ID, ' = load(\"', file, '\", format=\"', format, '\")\n'])
def full(shape, fill_value):
"""
Return a new array of given shape filled with fill_value.
Parameters
----------
shape: tuple of length 2
fill_value: float or int
"""
return construct_intermediate_node([], [OUTPUT_ID, ' = full(', str(
fill_value), ', rows=', str(shape[0]), ', cols=', str(shape[1]), ')\n'])
def reset():
"""
Resets the visited status of matrix and the operators in the generated AST.
"""
for m in matrix.visited:
m.visited = False
matrix.visited = []
matrix.ml = MLContext(matrix.sc)
matrix.dml = []
matrix.script = pydml('')
def perform_dfs(outputs, execute):
"""
Traverses the forest of nodes rooted at outputs nodes and returns the DML script to execute
"""
for m in outputs:
m.output = True
m._visit(execute=execute)
return ''.join(matrix.dml)
###############################################################################
########################## Utility functions ##################################
def _log_base(val, base):
if not isinstance(val, str):
raise ValueError('The val to _log_base should be of type string')
return '(log(' + val + ')/log(' + str(base) + '))'
def _matricize(lhs, inputs):
"""
Utility fn to convert the supported types to matrix class or to string (if float or int)
and return the string to be passed to DML as well as inputs
"""
if isinstance(lhs, SUPPORTED_TYPES):
lhs = matrix(lhs)
if isinstance(lhs, matrix):
lhsStr = lhs.ID
inputs = inputs + [lhs]
elif isinstance(lhs, float) or isinstance(lhs, int):
lhsStr = str(lhs)
else:
raise TypeError('Incorrect type')
return lhsStr, inputs
def binary_op(lhs, rhs, opStr):
"""
Common function called by all the binary operators in matrix class
"""
inputs = []
lhsStr, inputs = _matricize(lhs, inputs)
rhsStr, inputs = _matricize(rhs, inputs)
return construct_intermediate_node(
inputs, [OUTPUT_ID, ' = ', lhsStr, opStr, rhsStr, '\n'])
def binaryMatrixFunction(X, Y, fnName):
"""
Common function called by supported PyDML built-in function that has two arguments.
"""
inputs = []
lhsStr, inputs = _matricize(X, inputs)
rhsStr, inputs = _matricize(Y, inputs)
return construct_intermediate_node(
inputs, [OUTPUT_ID, ' = ', fnName, '(', lhsStr, ', ', rhsStr, ')\n'])
def unaryMatrixFunction(X, fnName):
"""
Common function called by supported PyDML built-in function that has one argument.
"""
inputs = []
lhsStr, inputs = _matricize(X, inputs)
return construct_intermediate_node(
inputs, [OUTPUT_ID, ' = ', fnName, '(', lhsStr, ')\n'])
def seq(start=None, stop=None, step=1):
"""
Creates a single column vector with values starting from <start>, to <stop>, in increments of <step>.
Note: Unlike Numpy's arange which returns a row-vector, this returns a column vector.
Also, Unlike Numpy's arange which doesnot include stop, this method includes stop in the interval.
Parameters
----------
start: int or float [Optional: default = 0]
stop: int or float
step : int float [Optional: default = 1]
"""
if start is None and stop is None:
raise ValueError('Both start and stop cannot be None')
elif start is not None and stop is None:
stop = start
start = 0
return construct_intermediate_node(
[], [OUTPUT_ID, ' = seq(', str(start), ',', str(stop), ',', str(step), ')\n'])
# utility function that converts 1:3 into DML string
def convert_seq_to_dml(s):
ret = []
if s is None:
return ''
elif isinstance(s, slice):
if s.step is not None:
raise ValueError('Slicing with step is not supported.')
if s.start is None:
ret = ret + ['0 : ']
else:
ret = ret + [getValue(s.start), ':']
if s.start is None:
ret = ret + ['']
else:
ret = ret + [getValue(s.stop)]
else:
ret = ret + [getValue(s)]
return ''.join(ret)
# utility function that converts index (such as [1, 2:3]) into DML string
def getIndexingDML(index):
ret = ['[']
if isinstance(index, tuple) and len(index) == 1:
ret = ret + [convert_seq_to_dml(index[0]), ',']
elif isinstance(index, tuple) and len(index) == 2:
ret = ret + [convert_seq_to_dml(index[0]),
',', convert_seq_to_dml(index[1])]
else:
raise TypeError(
'matrix indexes can only be tuple of length 2. For example: m[1,1], m[0:1,], m[:, 0:1]')
return ret + [']']
def convert_outputs_to_list(outputs):
if isinstance(outputs, matrix):
return [outputs]
elif isinstance(outputs, list):
for o in outputs:
if not isinstance(o, matrix):
raise TypeError('Only matrix or list of matrix allowed')
return outputs
else:
raise TypeError('Only matrix or list of matrix allowed')
def reset_output_flag(outputs):
for m in outputs:
m.output = False
###############################################################################
########################## Global user-facing functions #######################
def solve(A, b):
"""
Computes the least squares solution for system of linear equations A %*% x = b
Examples
--------
>>> import numpy as np
>>> from sklearn import datasets
>>> import SystemML as sml
>>> from pyspark.sql import SparkSession
>>> diabetes = datasets.load_diabetes()
>>> diabetes_X = diabetes.data[:, np.newaxis, 2]
>>> X_train = diabetes_X[:-20]
>>> X_test = diabetes_X[-20:]
>>> y_train = diabetes.target[:-20]
>>> y_test = diabetes.target[-20:]
>>> sml.setSparkContext(sc)
>>> X = sml.matrix(X_train)
>>> y = sml.matrix(y_train)
>>> A = X.transpose().dot(X)
>>> b = X.transpose().dot(y)
>>> beta = sml.solve(A, b).toNumPy()
>>> y_predicted = X_test.dot(beta)
>>> print('Residual sum of squares: %.2f' % np.mean((y_predicted - y_test) ** 2))
Residual sum of squares: 25282.12
"""
return binaryMatrixFunction(A, b, 'solve')
def eval(outputs, execute=True):
"""
Executes the unevaluated DML script and computes the matrices specified by outputs.
Parameters
----------
outputs: list of matrices or a matrix object
execute: specified whether to execute the unevaluated operation or just return the script.
"""
check_MLContext()
reset()
outputs = convert_outputs_to_list(outputs)
matrix.script.setScriptString(perform_dfs(outputs, execute))
if not execute:
reset_output_flag(outputs)
return matrix.script.scriptString
results = matrix.ml.execute(matrix.script)
for m in outputs:
m.eval_data = results._java_results.get(m.ID)
reset_output_flag(outputs)
def debug_array_conversion(throwError):
matrix.THROW_ARRAY_CONVERSION_ERROR = throwError
def _get_new_var_id():
matrix.systemmlVarID += 1
return 'mVar' + str(matrix.systemmlVarID)
###############################################################################
class matrix(object):
"""
matrix class is a python wrapper that implements basic matrix operators, matrix functions
as well as converters to common Python types (for example: Numpy arrays, PySpark DataFrame
and Pandas DataFrame).
The operators supported are:
1. Arithmetic operators: +, -, *, /, //, %, ** as well as dot (i.e. matrix multiplication)
2. Indexing in the matrix
3. Relational/Boolean operators: <, <=, >, >=, ==, !=, &, |
In addition, following functions are supported for matrix:
1. transpose
2. Aggregation functions: sum, mean, var, sd, max, min, argmin, argmax, cumsum
3. Global statistical built-In functions: exp, log, abs, sqrt, round, floor, ceil, ceiling, sin, cos, tan, asin, acos, atan, sign, solve
For all the above functions, we always return a two dimensional matrix, especially for aggregation functions with axis.
For example: Assuming m1 is a matrix of (3, n), NumPy returns a 1d vector of dimension (3,) for operation m1.sum(axis=1)
whereas SystemML returns a 2d matrix of dimension (3, 1).
Note: an evaluated matrix contains a data field computed by eval method as DataFrame or NumPy array.
Examples
--------
>>> import SystemML as sml
>>> import numpy as np
>>> sml.setSparkContext(sc)
Welcome to Apache SystemML!
>>> m1 = sml.matrix(np.ones((3,3)) + 2)
>>> m2 = sml.matrix(np.ones((3,3)) + 3)
>>> m2 = m1 * (m2 + m1)
>>> m4 = 1.0 - m2
>>> m4
# This matrix (mVar5) is backed by below given PyDML script (which is not yet evaluated). To fetch the data of this matrix, invoke toNumPy() or toDF() or toPandas() methods.
mVar1 = load(" ", format="csv")
mVar2 = load(" ", format="csv")
mVar3 = mVar2 + mVar1
mVar4 = mVar1 * mVar3
mVar5 = 1.0 - mVar4
save(mVar5, " ")
>>> m2.eval()
>>> m2
# This matrix (mVar4) is backed by NumPy array. To fetch the NumPy array, invoke toNumPy() method.
>>> m4
# This matrix (mVar5) is backed by below given PyDML script (which is not yet evaluated). To fetch the data of this matrix, invoke toNumPy() or toDF() or toPandas() methods.
mVar4 = load(" ", format="csv")
mVar5 = 1.0 - mVar4
save(mVar5, " ")
>>> m4.sum(axis=1).toNumPy()
array([[-60.],
[-60.],
[-60.]])
Design Decisions:
1. Until eval() method is invoked, we create an AST (not exposed to the user) that consist of unevaluated operations and data required by those operations.
As an anology, a spark user can treat eval() method similar to calling RDD.persist() followed by RDD.count().
2. The AST consist of two kinds of nodes: either of type matrix or of type DMLOp.
Both these classes expose _visit method, that helps in traversing the AST in DFS manner.
3. A matrix object can either be evaluated or not.
If evaluated, the attribute 'data' is set to one of the supported types (for example: NumPy array or DataFrame). In this case, the attribute 'op' is set to None.
If not evaluated, the attribute 'op' which refers to one of the intermediate node of AST and if of type DMLOp. In this case, the attribute 'data' is set to None.
4. DMLOp has an attribute 'inputs' which contains list of matrix objects or DMLOp.
5. To simplify the traversal, every matrix object is considered immutable and an matrix operations creates a new matrix object.
As an example:
`m1 = sml.matrix(np.ones((3,3)))` creates a matrix object backed by 'data=(np.ones((3,3))'.
`m1 = m1 * 2` will create a new matrix object which is now backed by 'op=DMLOp( ... )' whose input is earlier created matrix object.
6. Left indexing (implemented in __setitem__ method) is a special case, where Python expects the existing object to be mutated.
To ensure the above property, we make deep copy of existing object and point any references to the left-indexed matrix to the newly created object.
Then the left-indexed matrix is set to be backed by DMLOp consisting of following pydml:
left-indexed-matrix = new-deep-copied-matrix
left-indexed-matrix[index] = value
7. Please use m.print_ast() and/or type `m` for debugging. Here is a sample session:
>>> npm = np.ones((3,3))
>>> m1 = sml.matrix(npm + 3)
>>> m2 = sml.matrix(npm + 5)
>>> m3 = m1 + m2
>>> m3
mVar2 = load(" ", format="csv")
mVar1 = load(" ", format="csv")
mVar3 = mVar1 + mVar2
save(mVar3, " ")
>>> m3.print_ast()
- [mVar3] (op).
- [mVar1] (data).
- [mVar2] (data).
"""
# Global variable that is used to keep track of intermediate matrix
# variables in the DML script
systemmlVarID = 0
# Since joining of string is expensive operation, we collect the set of strings into list and then join
# them before execution: See matrix.script.scriptString =
# ''.join(matrix.dml) in eval() method
dml = []
# Represents MLContext's script object
script = None
# Represents MLContext object
ml = None
# Contains list of nodes visited in Abstract Syntax Tree. This helps to avoid computation of matrix objects
# that have been previously evaluated.
visited = []
def __init__(self, data, op=None):
"""
Constructs a lazy matrix
Parameters
----------
data: NumPy ndarray, Pandas DataFrame, scipy sparse matrix or PySpark DataFrame. (data cannot be None for external users, 'data=None' is used internally for lazy evaluation).
"""
self.dtype = np.double
check_MLContext()
self.visited = False
self.output = False
self.ID = _get_new_var_id()
self.referenced = []
# op refers to the node of Abstract Syntax Tree created internally for
# lazy evaluation
self.op = op
self.eval_data = data
self._shape = None
if isinstance(data, SUPPORTED_TYPES):
self._shape = data.shape
if not (isinstance(data, SUPPORTED_TYPES) or hasattr(
data, '_jdf') or (data is None and op is not None)):
raise TypeError('Unsupported input type')
def eval(self):
"""
This is a convenience function that calls the global eval method
"""
eval([self])
def toPandas(self):
"""
This is a convenience function that calls the global eval method and then converts the matrix object into Pandas DataFrame.
"""
self.eval()
if isinstance(self.eval_data, py4j.java_gateway.JavaObject):
self.eval_data = _java2py(
SparkContext._active_spark_context, self.eval_data)
if isinstance(self.eval_data, Matrix):
self.eval_data = self.eval_data.toNumPy()
self.eval_data = convertToPandasDF(self.eval_data)
return self.eval_data
def toNumPy(self):
"""
This is a convenience function that calls the global eval method and then converts the matrix object into NumPy array.
"""
self.eval()
if isinstance(self.eval_data, py4j.java_gateway.JavaObject):
self.eval_data = _java2py(
SparkContext._active_spark_context, self.eval_data)
if isinstance(self.eval_data, Matrix):
self.eval_data = self.eval_data.toNumPy()
return self.eval_data
if isinstance(self.eval_data, pd.DataFrame):
self.eval_data = self.eval_data.as_matrix()
elif isinstance(self.eval_data, DataFrame):
self.eval_data = self.eval_data.toPandas().as_matrix()
elif isinstance(self.eval_data, spmatrix):
self.eval_data = self.eval_data.toarray()
elif isinstance(self.eval_data, Matrix):
self.eval_data = self.eval_data.toNumPy()
# Always keep default format as NumPy array if possible
return self.eval_data
def toDF(self):
"""
This is a convenience function that calls the global eval method and then converts the matrix object into DataFrame.
"""
if isinstance(self.eval_data, DataFrame):
return self.eval_data
if isinstance(self.eval_data, py4j.java_gateway.JavaObject):
self.eval_data = _java2py(
SparkContext._active_spark_context, self.eval_data)
if isinstance(self.eval_data, Matrix):
self.eval_data = self.eval_data.toDF()
return self.eval_data
self.eval_data = matrix.sparkSession.createDataFrame(self.toPandas())
return self.eval_data
def save(self, file, format='csv'):
"""
Allows user to save a matrix to filesystem
Parameters
----------
file: filepath
format: can be csv, text or binary or mm
"""
tmp = construct_intermediate_node(
[self], ['save(', self.ID, ',\"', file, '\", format=\"', format, '\")\n'])
construct_intermediate_node(
[tmp], [OUTPUT_ID, ' = full(0, rows=1, cols=1)\n']).eval()
def _mark_as_visited(self):
self.visited = True
# for cleanup
matrix.visited = matrix.visited + [self]
return self
def _register_as_input(self, execute):
# TODO: Remove this when automatic registration of frame is resolved
matrix.dml = [self.ID, ' = load(\" \", format=\"csv\")\n'] + matrix.dml
if isinstance(self.eval_data, SUPPORTED_TYPES) and execute:
matrix.script.input(
self.ID, convertToMatrixBlock(
matrix.sc, self.eval_data))
elif execute:
matrix.script.input(self.ID, self.toDF())
return self
def _register_as_output(self, execute):
# TODO: Remove this when automatic registration of frame is resolved
matrix.dml = matrix.dml + ['save(', self.ID, ', \" \")\n']
if execute:
matrix.script.output(self.ID)
def _visit(self, execute=True):
"""
This function is called for two scenarios:
1. For printing the PyDML script which has not yet been evaluated (execute=False). See '__repr__' method.
2. Called as part of 'eval' method (execute=True). In this scenario, it builds the PyDML script by visiting itself
and its child nodes. Also, it does appropriate registration as input or output that is required by MLContext.
"""
if self.visited:
return self
self._mark_as_visited()
if self.eval_data is not None:
self._register_as_input(execute)
elif self.op is not None:
# Traverse the AST
for m in self.op.inputs:
m._visit(execute=execute)
self.op._visit(execute=execute)
else:
raise Exception('Expected either op or data to be set')
if self.eval_data is None and self.output:
self._register_as_output(execute)
return self
def print_ast(self):
"""
Please use m.print_ast() and/or type `m` for debugging. Here is a sample session:
>>> npm = np.ones((3,3))
>>> m1 = sml.matrix(npm + 3)
>>> m2 = sml.matrix(npm + 5)
>>> m3 = m1 + m2
>>> m3
mVar2 = load(" ", format="csv")
mVar1 = load(" ", format="csv")
mVar3 = mVar1 + mVar2
save(mVar3, " ")
>>> m3.print_ast()
- [mVar3] (op).
- [mVar1] (data).
- [mVar2] (data).
"""
return self._print_ast(0)
def _print_ast(self, numSpaces):
head = ''.join([' '] * numSpaces + ['- [', self.ID, '] '])
if self.eval_data is not None:
out = head + '(data).\n'
elif self.op is not None:
ret = [head, '(op).\n']
for m in self.op.inputs:
ret = ret + [m._print_ast(numSpaces + 2)]
out = ''.join(ret)
else:
raise ValueError('Either op or data needs to be set')
if numSpaces == 0:
print(out)
else:
return out
def __repr__(self):
"""
This function helps to debug matrix class and also examine the generated PyDML script
"""
if self.eval_data is None:
print(
'# This matrix (' +
self.ID +
') is backed by below given PyDML script (which is not yet evaluated). To fetch the data of this matrix, invoke toNumPy() or toDF() or toPandas() methods.\n' +
eval(
[self],
execute=False))
else:
print('# This matrix (' + self.ID + ') is backed by ' + str(type(self.eval_data)) +
'. To fetch the DataFrame or NumPy array, invoke toDF() or toNumPy() method respectively.')
return ''
######################### NumPy related methods ##########################
__array_priority__ = 10.2
ndim = 2
THROW_ARRAY_CONVERSION_ERROR = False
def __array__(self, dtype=np.double):
"""
As per NumPy from Python,
This method is called to obtain an ndarray object when needed. You should always guarantee this returns an actual ndarray object.
Using this method, you get back a ndarray object, and subsequent operations on the returned ndarray object will be singlenode.
"""
if not isinstance(self.eval_data, SUPPORTED_TYPES):
# Only warn if there is an unevaluated operation (which could
# potentially generate large matrix or if data is non-supported
# singlenode formats)
import inspect
frame, filename, line_number, function_name, lines, index = inspect.stack()[
1]
msg = 'Conversion from SystemML matrix to NumPy array (occurs in ' + str(
filename) + ':' + str(line_number) + ' ' + function_name + ")"
if matrix.THROW_ARRAY_CONVERSION_ERROR:
raise Exception('[ERROR]:' + msg)
else:
print('[WARN]:' + msg)
return np.array(self.toNumPy(), dtype)
def astype(self, t):
# TODO: Throw error if incorrect type
return self
def asfptype(self):
return self
def set_shape(self, shape):
raise NotImplementedError('Reshaping is not implemented')
def get_shape(self):
if self._shape is None:
lhsStr, inputs = _matricize(self, [])
rlen_ID = _get_new_var_id()
clen_ID = _get_new_var_id()
multiline_dml = [rlen_ID, ' = ', lhsStr, '.shape(0)\n']
multiline_dml = multiline_dml + \
[clen_ID, ' = ', lhsStr, '.shape(1)\n']
multiline_dml = multiline_dml + \
[OUTPUT_ID, ' = full(0, rows=2, cols=1)\n']
multiline_dml = multiline_dml + \
[OUTPUT_ID, '[0,0] = ', rlen_ID, '\n']
multiline_dml = multiline_dml + \
[OUTPUT_ID, '[1,0] = ', clen_ID, '\n']
ret = construct_intermediate_node(inputs, multiline_dml).toNumPy()
self._shape = tuple(np.array(ret, dtype=int).flatten())
return self._shape
shape = property(fget=get_shape, fset=set_shape)
def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs):
"""
This function enables systemml matrix to be compatible with NumPy's ufuncs.
Parameters
----------
func: ufunc object that was called.
method: string indicating which Ufunc method was called (one of "__call__", "reduce", "reduceat", "accumulate", "outer", "inner").
pos: index of self in inputs.
inputs: tuple of the input arguments to the ufunc
kwargs: dictionary containing the optional input arguments of the ufunc.
"""
if method != '__call__' or kwargs:
return NotImplemented
if func in matrix._numpy_to_systeml_mapping:
fn = matrix._numpy_to_systeml_mapping[func]
else:
return NotImplemented
if len(inputs) == 2:
return fn(inputs[0], inputs[1])
elif len(inputs) == 1:
return fn(inputs[0])
else:
raise ValueError('Unsupported number of inputs')
def hstack(self, other):
"""
Stack matrices horizontally (column wise). Invokes cbind internally.
"""
return binaryMatrixFunction(self, other, 'cbind')
def vstack(self, other):
"""
Stack matrices vertically (row wise). Invokes rbind internally.
"""
return binaryMatrixFunction(self, other, 'rbind')
######################### Arithmetic operators ###########################
def negative(self):
lhsStr, inputs = _matricize(self, [])
return construct_intermediate_node(
inputs, [OUTPUT_ID, ' = -', lhsStr, '\n'])
def remainder(self, other):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
rhsStr, inputs = _matricize(other, inputs)
return construct_intermediate_node(
inputs, [OUTPUT_ID, ' = floor(', lhsStr, '/', rhsStr, ') * ', rhsStr, '\n'])
def ldexp(self, other):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
rhsStr, inputs = _matricize(other, inputs)
return construct_intermediate_node(
inputs, [OUTPUT_ID, ' = ', lhsStr, '* (2**', rhsStr, ')\n'])
def mod(self, other):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
rhsStr, inputs = _matricize(other, inputs)
return construct_intermediate_node(inputs, [
OUTPUT_ID, ' = ', lhsStr, ' - floor(', lhsStr, '/', rhsStr, ') * ', rhsStr, '\n'])
def logaddexp(self, other):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
rhsStr, inputs = _matricize(other, inputs)
return construct_intermediate_node(
inputs, [OUTPUT_ID, ' = log(exp(', lhsStr, ') + exp(', rhsStr, '))\n'])
def logaddexp2(self, other):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
rhsStr, inputs = _matricize(other, inputs)
opStr = _log_base('2**' + lhsStr + '2**' + rhsStr, 2)
return construct_intermediate_node(
inputs, [OUTPUT_ID, ' = ', opStr, '\n'])
def log1p(self):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
return construct_intermediate_node(
inputs, [OUTPUT_ID, ' = log(1 + ', lhsStr, ')\n'])
def exp(self):
return unaryMatrixFunction(self, 'exp')
def exp2(self):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
return construct_intermediate_node(
inputs, [OUTPUT_ID, ' = 2**', lhsStr, '\n'])
def square(self):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
return construct_intermediate_node(
inputs, [OUTPUT_ID, ' = ', lhsStr, '**2\n'])
def reciprocal(self):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
return construct_intermediate_node(
inputs, [OUTPUT_ID, ' = 1/', lhsStr, '\n'])
def expm1(self):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
return construct_intermediate_node(
inputs, [OUTPUT_ID, ' = exp(', lhsStr, ') - 1\n'])
def ones_like(self):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
rlen = lhsStr + '.shape(axis=0)'
clen = lhsStr + '.shape(axis=1)'
return construct_intermediate_node(
inputs, [OUTPUT_ID, ' = full(1, rows=', rlen, ', cols=', clen, ')\n'])
def zeros_like(self):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
rlen = lhsStr + '.shape(axis=0)'
clen = lhsStr + '.shape(axis=1)'
return construct_intermediate_node(
inputs, [OUTPUT_ID, ' = full(0, rows=', rlen, ', cols=', clen, ')\n'])
def log2(self):
return self.log(2)
def log10(self):
return self.log(10)
def log(self, y=None):
if y is None:
return unaryMatrixFunction(self, 'log')
else:
return binaryMatrixFunction(self, y, 'log')
def abs(self):
return unaryMatrixFunction(self, 'abs')
def sqrt(self):
return unaryMatrixFunction(self, 'sqrt')
def round(self):
return unaryMatrixFunction(self, 'round')
def floor(self):
return unaryMatrixFunction(self, 'floor')
def ceil(self):
return unaryMatrixFunction(self, 'ceil')
def ceiling(self):
return unaryMatrixFunction(self, 'ceiling')
def sin(self):
return unaryMatrixFunction(self, 'sin')
def cos(self):
return unaryMatrixFunction(self, 'cos')
def tan(self):
return unaryMatrixFunction(self, 'tan')
def sinh(self):
return unaryMatrixFunction(self, 'sinh')
def cosh(self):
return unaryMatrixFunction(self, 'cosh')
def tanh(self):
return unaryMatrixFunction(self, 'tanh')
def arcsin(self):
return self.asin()
def arccos(self):
return self.acos()
def arctan(self):
return self.atan()
def asin(self):
return unaryMatrixFunction(self, 'asin')
def acos(self):
return unaryMatrixFunction(self, 'acos')
def atan(self):
return unaryMatrixFunction(self, 'atan')
def rad2deg(self):
"""
Convert angles from radians to degrees.
"""
inputs = []
lhsStr, inputs = _matricize(self, inputs)
# 180/pi = 57.2957795131
return construct_intermediate_node(
inputs, [OUTPUT_ID, ' = ', lhsStr, '*57.2957795131\n'])
def deg2rad(self):
"""
Convert angles from degrees to radians.
"""
inputs = []
lhsStr, inputs = _matricize(self, inputs)
# pi/180 = 0.01745329251
return construct_intermediate_node(
inputs, [OUTPUT_ID, ' = ', lhsStr, '*0.01745329251\n'])
def sign(self):
return unaryMatrixFunction(self, 'sign')
def __add__(self, other):
return binary_op(self, other, ' + ')
def __sub__(self, other):
return binary_op(self, other, ' - ')
def __mul__(self, other):
return binary_op(self, other, ' * ')
def __floordiv__(self, other):
return binary_op(self, other, ' // ')
def __div__(self, other):
"""
Performs division (Python 2 way).
"""
return binary_op(self, other, ' / ')
def __truediv__(self, other):
"""
Performs division (Python 3 way).
"""
return binary_op(self, other, ' / ')
def __mod__(self, other):
return binary_op(self, other, ' % ')
def __pow__(self, other):
return binary_op(self, other, ' ** ')
def __radd__(self, other):
return binary_op(other, self, ' + ')
def __rsub__(self, other):
return binary_op(other, self, ' - ')
def __rmul__(self, other):
return binary_op(other, self, ' * ')
def __rfloordiv__(self, other):
return binary_op(other, self, ' // ')
def __rdiv__(self, other):
return binary_op(other, self, ' / ')
def __rtruediv__(self, other):
"""
Performs division (Python 3 way).
"""
return binary_op(other, self, ' / ')
def __rmod__(self, other):
return binary_op(other, self, ' % ')
def __rpow__(self, other):
return binary_op(other, self, ' ** ')
def dot(self, other):
"""
Numpy way of performing matrix multiplication
"""
return binaryMatrixFunction(self, other, 'dot')
def __matmul__(self, other):
"""
Performs matrix multiplication (infix operator: @). See PEP 465)
"""
return binaryMatrixFunction(self, other, 'dot')
######################### Relational/Boolean operators ###################
def __lt__(self, other):
return binary_op(self, other, ' < ')
def __le__(self, other):
return binary_op(self, other, ' <= ')
def __gt__(self, other):
return binary_op(self, other, ' > ')
def __ge__(self, other):
return binary_op(self, other, ' >= ')
def __eq__(self, other):
return binary_op(self, other, ' == ')
def __ne__(self, other):
return binary_op(self, other, ' != ')
# TODO: Cast the output back into scalar and return boolean results
def __and__(self, other):
return binary_op(other, self, ' & ')
def __or__(self, other):
return binary_op(other, self, ' | ')
def logical_not(self):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
return construct_intermediate_node(
inputs, [OUTPUT_ID, ' = !', lhsStr, '\n'])
def remove_empty(self, axis=None):
"""
Removes all empty rows or columns from the input matrix target X according to specified axis.
Parameters
----------
axis : int (0 or 1)
"""
if axis is None:
raise ValueError('axis is a mandatory argument for remove_empty')
if axis == 0:
return self._parameterized_helper_fn(
self, 'removeEmpty', {'target': self, 'margin': 'rows'})
elif axis == 1:
return self._parameterized_helper_fn(
self, 'removeEmpty', {'target': self, 'margin': 'cols'})
else:
raise ValueError(
'axis for remove_empty needs to be either 0 or 1.')
def replace(self, pattern=None, replacement=None):
"""
Removes all empty rows or columns from the input matrix target X according to specified axis.
Parameters
----------
pattern : float or int
replacement : float or int
"""
if pattern is None or not isinstance(pattern, (float, int)):
raise ValueError('pattern should be of type float or int')
if replacement is None or not isinstance(replacement, (float, int)):
raise ValueError('replacement should be of type float or int')
return self._parameterized_helper_fn(
self, 'replace', {'target': self, 'pattern': pattern, 'replacement': replacement})
def _parameterized_helper_fn(self, fnName, **kwargs):
"""
Helper to invoke parameterized builtin function
"""
dml_script = ''
lhsStr, inputs = _matricize(self, [])
dml_script = [OUTPUT_ID, ' = ', fnName, '(', lhsStr]
first_arg = True
for key in kwargs:
if first_arg:
first_arg = False
else:
dml_script = dml_script + [', ']
v = kwargs[key]
if isinstance(v, str):
dml_script = dml_script + [key, '=\"', v, '\"']
elif isinstance(v, matrix):
dml_script = dml_script + [key, '=', v.ID]
else:
dml_script = dml_script + [key, '=', str(v)]
dml_script = dml_script + [')\n']
return construct_intermediate_node(inputs, dml_script)
######################### Aggregation functions ##########################
def prod(self):
"""
Return the product of all cells in matrix
"""
return self._aggFn('prod', None)
def sum(self, axis=None):
"""
Compute the sum along the specified axis
Parameters
----------
axis : int, optional
"""
return self._aggFn('sum', axis)
def mean(self, axis=None):
"""
Compute the arithmetic mean along the specified axis
Parameters
----------
axis : int, optional
"""
return self._aggFn('mean', axis)
def var(self, axis=None):
"""
Compute the variance along the specified axis.
We assume that delta degree of freedom is 1 (unlike NumPy which assumes ddof=0).
Parameters
----------
axis : int, optional
"""
return self._aggFn('var', axis)
def moment(self, moment=1, axis=None):
"""
Calculates the nth moment about the mean
Parameters
----------
moment : int
can be 1, 2, 3 or 4
axis : int, optional
"""
if moment == 1:
return self.mean(axis)
elif moment == 2:
return self.var(axis)
elif moment == 3 or moment == 4:
return self._moment_helper(moment, axis)
else:
raise ValueError(
'The specified moment is not supported:' +
str(moment))
def _moment_helper(self, k, axis=0):
dml_script = ''
lhsStr, inputs = _matricize(self, [])
dml_script = [OUTPUT_ID, ' = moment(', lhsStr, ', ', str(k), ')\n']
dml_script = [OUTPUT_ID, ' = moment(', lhsStr, ', ', str(k), ')\n']
if axis is None:
dml_script = [
OUTPUT_ID,
' = moment(full(',
lhsStr,
', rows=length(',
lhsStr,
'), cols=1), ',
str(k),
')\n']
elif axis == 0:
dml_script = [
OUTPUT_ID, ' = full(0, rows=nrow(', lhsStr, '), cols=1)\n']
dml_script = dml_script + \
['parfor(i in 1:nrow(', lhsStr, '), check=0):\n']
dml_script = dml_script + ['\t',
OUTPUT_ID,
'[i-1, 0] = moment(full(',
lhsStr,
'[i-1,], rows=ncol(',
lhsStr,
'), cols=1), ',
str(k),
')\n\n']
elif axis == 1:
dml_script = [
OUTPUT_ID, ' = full(0, rows=1, cols=ncol(', lhsStr, '))\n']
dml_script = dml_script + \
['parfor(i in 1:ncol(', lhsStr, '), check=0):\n']
dml_script = dml_script + \
['\t',
OUTPUT_ID,
'[0, i-1] = moment(',
lhsStr,
'[,i-1], ',
str(k),
')\n\n']
else:
raise ValueError('Incorrect axis:' + axis)
return construct_intermediate_node(inputs, dml_script)
def sd(self, axis=None):
"""
Compute the standard deviation along the specified axis
Parameters
----------
axis : int, optional
"""
return self._aggFn('sd', axis)
def max(self, other=None, axis=None):
"""
Compute the maximum value along the specified axis
Parameters
----------
other: matrix or numpy array (& other supported types) or scalar
axis : int, optional
"""
if other is not None and axis is not None:
raise ValueError('Both axis and other cannot be not None')
elif other is None and axis is not None:
return self._aggFn('max', axis)
else:
return binaryMatrixFunction(self, other, 'max')
def min(self, other=None, axis=None):
"""
Compute the minimum value along the specified axis
Parameters
----------
other: matrix or numpy array (& other supported types) or scalar
axis : int, optional
"""
if other is not None and axis is not None:
raise ValueError('Both axis and other cannot be not None')
elif other is None and axis is not None:
return self._aggFn('min', axis)
else:
return binaryMatrixFunction(self, other, 'min')
def argmin(self, axis=None):
"""
Returns the indices of the minimum values along an axis.
Parameters
----------
axis : int, optional (only axis=1, i.e. rowIndexMax is supported in this version)
"""
return self._aggFn('argmin', axis)
def argmax(self, axis=None):
"""
Returns the indices of the maximum values along an axis.
Parameters
----------
axis : int, optional (only axis=1, i.e. rowIndexMax is supported in this version)
"""
return self._aggFn('argmax', axis)
def cumsum(self, axis=None):
"""
Returns the indices of the maximum values along an axis.
Parameters
----------
axis : int, optional (only axis=0, i.e. cumsum along the rows is supported in this version)
"""
return self._aggFn('cumsum', axis)
def transpose(self):
"""
Transposes the matrix.
"""
return self._aggFn('transpose', None)
def trace(self):
"""
Return the sum of the cells of the main diagonal square matrix
"""
return self._aggFn('trace', None)
def _aggFn(self, fnName, axis):
"""
Common function that is called for functions that have axis as parameter.
"""
dml_script = ''
lhsStr, inputs = _matricize(self, [])
if axis is None:
dml_script = [OUTPUT_ID, ' = ', fnName, '(', lhsStr, ')\n']
else:
dml_script = [OUTPUT_ID, ' = ', fnName,
'(', lhsStr, ', axis=', str(axis), ')\n']
return construct_intermediate_node(inputs, dml_script)
######################### Indexing operators #############################
def __getitem__(self, index):
"""
Implements evaluation of right indexing operations such as m[1,1], m[0:1,], m[:, 0:1]
"""
return construct_intermediate_node(
[self], [OUTPUT_ID, ' = ', self.ID] + getIndexingDML(index) + ['\n'])
# Performs deep copy if the matrix is backed by data
def _prepareForInPlaceUpdate(self):
temp = matrix(self.eval_data, op=self.op)
for op in self.referenced:
op.inputs = [temp if x.ID == self.ID else x for x in op.inputs]
# Copy even the IDs as the IDs might be used to create DML
self.ID, temp.ID = temp.ID, self.ID
self.op = DMLOp([temp], dml=[self.ID, " = ", temp.ID])
self.eval_data = None
temp.referenced = self.referenced + [self.op]
self.referenced = []
def __setitem__(self, index, value):
"""
Implements evaluation of left indexing operations such as m[1,1]=2
"""
self._prepareForInPlaceUpdate()
if isinstance(value, matrix) or isinstance(value, DMLOp):
self.op.inputs = self.op.inputs + [value]
if isinstance(value, matrix):
value.referenced = value.referenced + [self.op]
self.op.dml = self.op.dml + \
['\n', self.ID] + \
getIndexingDML(index) + [' = ', getValue(value), '\n']
# Not implemented: conj, hyperbolic/inverse-hyperbolic functions(i.e.
# sinh, arcsinh, cosh, ...), bitwise operator, xor operator, isreal,
# iscomplex, isfinite, isinf, isnan, copysign, nextafter, modf, frexp,
# trunc
_numpy_to_systeml_mapping = {
np.add: __add__,
np.subtract: __sub__,
np.multiply: __mul__,
np.divide: __div__,
np.logaddexp: logaddexp,
np.true_divide: __truediv__,
np.floor_divide: __floordiv__,
np.negative: negative,
np.power: __pow__,
np.remainder: remainder,
np.mod: mod,
np.fmod: __mod__,
np.absolute: abs,
np.rint: round,
np.sign: sign,
np.exp: exp,
np.exp2: exp2,
np.log: log,
np.log2: log2,
np.log10: log10,
np.expm1: expm1,
np.log1p: log1p,
np.sqrt: sqrt,
np.square: square,
np.reciprocal: reciprocal,
np.ones_like: ones_like,
np.zeros_like: zeros_like,
np.sin: sin,
np.cos: cos,
np.tan: tan,
np.arcsin: arcsin,
np.arccos: arccos,
np.arctan: arctan,
np.deg2rad: deg2rad,
np.rad2deg: rad2deg,
np.greater: __gt__,
np.greater_equal: __ge__,
np.less: __lt__,
np.less_equal: __le__,
np.not_equal: __ne__,
np.equal: __eq__,
np.logical_not: logical_not,
np.logical_and: __and__,
np.logical_or: __or__,
np.maximum: max,
np.minimum: min,
np.signbit: sign,
np.ldexp: ldexp,
np.dot: dot}
| apache-2.0 |
chenyyx/scikit-learn-doc-zh | examples/en/gaussian_process/plot_gpc_isoprobability.py | 64 | 3049 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=================================================================
Iso-probability lines for Gaussian Processes classification (GPC)
=================================================================
A two-dimensional classification example showing iso-probability lines for
the predicted probabilities.
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Adapted to GaussianProcessClassifier:
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import DotProduct, ConstantKernel as C
# A few constants
lim = 8
def g(x):
"""The function to predict (classification will then consist in predicting
whether g(x) <= 0 or not)"""
return 5. - x[:, 1] - .5 * x[:, 0] ** 2.
# Design of experiments
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
# Observations
y = np.array(g(X) > 0, dtype=int)
# Instanciate and fit Gaussian Process Model
kernel = C(0.1, (1e-5, np.inf)) * DotProduct(sigma_0=0.1) ** 2
gp = GaussianProcessClassifier(kernel=kernel)
gp.fit(X, y)
print("Learned kernel: %s " % gp.kernel_)
# Evaluate real function and the predicted probability
res = 50
x1, x2 = np.meshgrid(np.linspace(- lim, lim, res),
np.linspace(- lim, lim, res))
xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T
y_true = g(xx)
y_prob = gp.predict_proba(xx)[:, 1]
y_true = y_true.reshape((res, res))
y_prob = y_prob.reshape((res, res))
# Plot the probabilistic classification iso-values
fig = plt.figure(1)
ax = fig.gca()
ax.axes.set_aspect('equal')
plt.xticks([])
plt.yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
cax = plt.imshow(y_prob, cmap=cm.gray_r, alpha=0.8,
extent=(-lim, lim, -lim, lim))
norm = plt.matplotlib.colors.Normalize(vmin=0., vmax=0.9)
cb = plt.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm)
cb.set_label('${\\rm \mathbb{P}}\left[\widehat{G}(\mathbf{x}) \leq 0\\right]$')
plt.clim(0, 1)
plt.plot(X[y <= 0, 0], X[y <= 0, 1], 'r.', markersize=12)
plt.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12)
cs = plt.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot')
cs = plt.contour(x1, x2, y_prob, [0.666], colors='b',
linestyles='solid')
plt.clabel(cs, fontsize=11)
cs = plt.contour(x1, x2, y_prob, [0.5], colors='k',
linestyles='dashed')
plt.clabel(cs, fontsize=11)
cs = plt.contour(x1, x2, y_prob, [0.334], colors='r',
linestyles='solid')
plt.clabel(cs, fontsize=11)
plt.show()
| gpl-3.0 |
jkarnows/scikit-learn | examples/svm/plot_weighted_samples.py | 69 | 1942 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasis the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
huangziwei/pyMF3 | pymf3/factorization/_sparseseminmf.py | 1 | 1891 | import numpy as np
from ._base import Base
from sklearn.cluster import KMeans
__all__ = ['ssNMF']
class ssNMF(Base):
'''
semi-NMF with sparse contraint.
'''
def __init__(self, data, W=None, H=None, num_bases=2, sparsity=0.25, **kwargs):
super(ssNMF, self).__init__(data, num_bases=num_bases)
self._sparsity_param = sparsity
def _cost_function(self):
cost = np.sqrt(np.sum((self.data[:,:] - np.dot(self.W, self.H)) ** 2)) + self._sparsity_param * np.sum(self.H)
return cost
def _init_W(self):
kmeans = KMeans(n_clusters=self._num_bases, random_state=0).fit(self.data)
labels = kmeans.labels_
W = np.zeros([self.ndims[0], self._num_bases])
for i in range(self.ndims[0]):
for k in range(self._num_bases):
if labels[i] == k:
W[i, k] = 1
else:
W[i, k] = 0
self.W = W + 0.2
def _update_H(self):
WtV = np.dot(self.W.T, self.data)
WtW = np.dot(self.W.T, self.W)
# try:
# self.H = np.dot(np.linalg.inv(WtW), WtV)
# except np.linalg.linalg.LinAlgError:
# self.H = np.dot(np.linalg.pinv(WtW), WtV)
self.H *= WtV / (np.dot(WtW, self.H) + self._sparsity_param)
# self.H[self.H == 0] = 1e-16
def _update_W(self):
def pos(A):
return (np.abs(A) + A) / 2
def neg(A):
return (np.abs(A) - A) / 2
VHt = np.dot(self.data, self.H.T)
HHt = np.dot(self.H, self.H.T)
W1 = pos(VHt) + np.dot(self.W, neg(HHt))
W2 = neg(VHt) + np.dot(self.W, pos(HHt))
W1[W1 == 0] = 1e-16
W2[W2 == 0] = 1e-16
self.W *= np.sqrt(W1/W2)
for i in range(self.W.shape[1]):
self.W[:, i] /= np.linalg.norm(self.W[:, i]) | mit |
dcherian/tools | ROMS/pmacc/tools/post_tools/rompy/tags/rompy-0.1.6/make_standard_images.py | 1 | 9070 | #!/usr/bin/env python
import os
import glob
from optparse import OptionParser
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from rompy import rompy, plot_utils, utils, extract_utils
def surface_map(file,img_file=None,varname='salt',clim=None):
(data, coords) = rompy.extract(file,varname=varname,extraction_type='surface')
# plot_utils.plot_surface(coords['xm'],coords['ym'],data)
title = '%s %s %s %s' % ( extract_utils.run_title(file), os.path.basename(file), var_title_map[var], extract_utils.file_time(file).strftime(title_time_fmt) )
plot_utils.plot_map(coords['xm'],coords['ym'],data,filename=img_file, clim=clim, title=title, caxis_label=clabel_map[varname])
def main_basin_curtain(file,img_file,varname,n=4,clim=None): # Main Basin
if varname == 'U':
main_basin_U_curtain(file,img_file,n,clim)
else:
x,y = utils.high_res_main_basin_xy(n=n)
(data, coords) = rompy.extract(file, varname=varname, extraction_type='profile', x=x, y=y)
title = '%s %s Main Basin %s %s' % (extract_utils.run_title(file), os.path.basename(file), var_title_map[var], extract_utils.file_time(file).strftime(title_time_fmt))
plot_utils.plot_parker(coords=coords, data=data, varname=varname, region='Main Basin', filename=img_file, n=n, x_axis_offset=utils.offset_region(coords), clim=clim,cmap='banas_hsv_cm',labeled_contour_gap=2, title=title, caxis_label=clabel_map[varname])
def hood_canal_curtain(file,img_file,varname,n=1,clim=None): # Hood Canal
if varname == 'U':
hood_canal_U_curtain(file,img_file,n,clim)
else:
x,y = utils.high_res_hood_canal_xy(n=n)
(data, coords) = rompy.extract(file, varname=varname, extraction_type='profile', x=x, y=y)
title = '%s %s Hood Canal %s %s' % (extract_utils.run_title(file), os.path.basename(file), var_title_map[var], extract_utils.file_time(file).strftime(title_time_fmt))
plot_utils.plot_parker(coords=coords, data=data, varname=varname, region='Hood Canal', filename=img_file, n=n, x_axis_offset=utils.offset_region(coords), clim=clim, cmap='banas_hsv_cm',labeled_contour_gap=2, title=title, caxis_label=clabel_map[varname])
def hood_canal_U_curtain(file,img_file,n=1,clim=None): # velocity in Hood Canal
x,y = utils.high_res_hood_canal_xy(n=n)
(u, coords) = rompy.extract(file,varname='u',extraction_type='profile',x=x,y=y)
(v, coords) = rompy.extract(file,varname='v',extraction_type='profile',x=x,y=y)
data = np.zeros(u.shape)
for i in range(u.shape[1]):
if i == u.shape[1]-1:
x_vec = np.array([x[i] - x[i-1], y[i] - y[i-1]])
else:
x_vec = np.array([x[i+1] - x[i], y[i+1] - y[i]])
for j in range(u.shape[0]):
u_vec = np.array([u[j,i], v[j,i]])
data[j,i] = np.dot(x_vec,u_vec)/(np.sqrt(np.dot(x_vec,x_vec)))
data = np.ma.array(data, mask=np.abs(data) > 100)
title = '%s %s Hood Canal %s %s' % (extract_utils.run_title(file), os.path.basename(file), var_title_map['U'], extract_utils.file_time(file).strftime(title_time_fmt))
hood_U_clim = (np.array(clim)/2.0).tolist()
plot_utils.plot_parker(coords=coords,data=data,varname='U', region='Hood Canal', filename=img_file, n=n, clim=clim, x_axis_offset=utils.offset_region(coords), cmap='red_blue', title=title, caxis_label=clabel_map['U'])
def main_basin_U_curtain(file,img_file,n=1,clim=None): # velocity in Main Basin
x,y = utils.high_res_main_basin_xy(n=n)
(u, coords) = rompy.extract(file,varname='u',extraction_type='profile',x=x,y=y)
(v, coords) = rompy.extract(file,varname='v',extraction_type='profile',x=x,y=y)
data = np.zeros(u.shape)
for i in range(u.shape[1]):
if i == u.shape[1]-1:
x_vec = np.array([x[i] - x[i-1], y[i] - y[i-1]])
else:
x_vec = np.array([x[i+1] - x[i], y[i+1] - y[i]])
for j in range(u.shape[0]):
u_vec = np.array([u[j,i], v[j,i]])
data[j,i] = np.dot(x_vec,u_vec)/(np.sqrt(np.dot(x_vec,x_vec)))
data = np.ma.array(data, mask=np.abs(data) > 100)
title = '%s %s Main Basin %s %s' % (extract_utils.run_title(file), os.path.basename(file), var_title_map['U'], extract_utils.file_time(file).strftime(title_time_fmt))
plot_utils.plot_parker(coords=coords,data=data,varname='U', region=' Main Basin', filename=img_file, n=n, clim=clim, x_axis_offset=utils.offset_region(coords),cmap='red_blue', title=title, caxis_label=clabel_map['U'])
def daves_curtain(file,img_file,section,varname,clim=None):
if varname == 'U':
daves_U_curtain(file,img_file,section,varname,clim)
else:
x = utils.get_daves_section_var(section=section,var='lon')
y = utils.get_daves_section_var(section=section,var='lat')
(data, coords) = rompy.extract(file, varname=varname, extraction_type='profile', x=x, y=y)
title = '%s %s %s %s %s' % (extract_utils.run_title(file), os.path.basename(file), section, var_title_map[var], extract_utils.file_time(file).strftime(title_time_fmt))
plot_utils.plot_parker(
coords=coords,
data=data,
filename=img_file,
title=title,
x_axis_offset=utils.offset_region(coords),
clim=clim,
cmap='banas_hsv_cm',
labeled_contour_gap=2,
caxis_label=clabel_map[varname],
inset=inset_dict[section],
ctd_ind=ctd_ind_dict[section],
label=utils.get_daves_section_var(section=section,var='label'),
label_ind=utils.get_daves_section_var(section=section,var='label_ind')
)
return
def daves_U_curtain(file,img_file,section,varname,clim):
x = utils.get_daves_section_var(section=section,var='lon')
y = utils.get_daves_section_var(section=section,var='lat')
(u, coords) = rompy.extract(file,varname='u',extraction_type='profile',x=x,y=y)
(v, coords) = rompy.extract(file,varname='v',extraction_type='profile',x=x,y=y)
data = np.zeros(u.shape)
for i in range(u.shape[1]):
if i == u.shape[1]-1:
x_vec = np.array([x[i] - x[i-1], y[i] - y[i-1]])
else:
x_vec = np.array([x[i+1] - x[i], y[i+1] - y[i]])
for j in range(u.shape[0]):
u_vec = np.array([u[j,i], v[j,i]])
data[j,i] = np.dot(x_vec,u_vec)/(np.sqrt(np.dot(x_vec,x_vec)))
data = np.ma.array(data, mask=np.abs(data) > 100)
title = '%s %s %s %s %s' % (extract_utils.run_title(file), os.path.basename(file), section, var_title_map['U'], extract_utils.file_time(file).strftime(title_time_fmt))
plot_utils.plot_parker(
coords=coords,
data=data,
filename=img_file,
title=title,
x_axis_offset=utils.offset_region(coords),
clim=clim,
cmap='red_blue',
labeled_contour_gap=2,
caxis_label=clabel_map[varname],
inset=inset_dict[section],
ctd_ind=ctd_ind_dict[section],
label=utils.get_daves_section_var(section=section,var='label'),
label_ind=utils.get_daves_section_var(section=section,var='label_ind')
)
return
# begin actual code that runs.
parser = OptionParser()
parser.add_option('-i', '--img_dir',
dest='img_dir',
default='./image_sequence',
help='Location to save images. Default is ./image_sequnce')
(options, args) = parser.parse_args()
if args == []:
fl = glob.glob('ocean_his*.nc')
print(fl)
file_list = [fl[0]]
else:
file_list = args
img_dir = options.img_dir
var_list = ['salt','temp','U']
var_title_map = {'salt':'Salinity','temp':'Temperature','U':'Velocity'}
title_time_fmt = '%Y-%m-%d %H:%M UTC'
clims = {'salt':[0, 21,33, 33], 'temp': [8, 20], 'U':[-2,2]}
clabel_map = {'temp': u'\u00B0 C', 'salt': 'psu', 'U': 'm/s'}
inset_dict = {'AI_HC':'Puget Sound','AI_WB':'Puget Sound','JdF_SoG':'Strait of Georgia'}
ctd_ind_dict = {
'AI_HC':utils.get_daves_section_var(section='AI_HC',var='PRISM_ctd_ind'),
'AI_WB':utils.get_daves_section_var(section='AI_WB',var='PRISM_ctd_ind'),
'JdF_SoG':utils.get_daves_section_var(section='JdF_SoG',var='IOS_ctd_ind')
}
ctd_ind_dict['JdF_SoG'].extend(utils.get_daves_section_var(section='JdF_SoG',var='JEMS_ctd_ind'))
for file in file_list:
ncf_index = os.path.basename(file)[:-3]
print('%s' %ncf_index)
if not os.path.exists(img_dir):
os.makedirs(img_dir)
for var in var_list:
hood_img_file = '%s/%s_hood_%s.png' %(img_dir, ncf_index,var)
main_img_file = '%s/%s_main_%s.png' %(img_dir, ncf_index,var)
surface_img_file = '%s/%s_surface_%s.png' % (img_dir, ncf_index, var)
AI_HC_img_file = '%s/AI_HC_%s_%s.png' %(img_dir,var,ncf_index)
AI_WB_img_file = '%s/AI_WB_%s_%s.png' %(img_dir,var,ncf_index)
JdF_SoG_img_file = '%s/JdF_SoG_%s_%s.png' %(img_dir,var,ncf_index)
print('making hood canal %s' % var)
hood_canal_curtain(file, hood_img_file, var, n=8, clim=clims[var])
print('making main basin %s' % var)
main_basin_curtain(file, main_img_file, var, n=8, clim=clims[var])
print('making AI_HC %s' % var)
daves_curtain(file,AI_HC_img_file,section='AI_HC',varname=var,clim=clims[var])
print('making AI_WB %s' % var)
daves_curtain(file,AI_WB_img_file,section='AI_WB',varname=var,clim=clims[var])
print('making JdF_SoG %s' % var)
daves_curtain(file,JdF_SoG_img_file,section='JdF_SoG',varname=var,clim=clims[var])
if not var == 'U':
print('making surface %s' % var)
surface_map(file,surface_img_file,var,clim=clims[var])
| mit |
ycool/apollo | modules/tools/vehicle_calibration/plot_data.py | 3 | 4296 | #!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
This module provide function to plot the speed control info from log csv file
"""
import sys
import matplotlib.pyplot as plt
import numpy as np
import tkFileDialog
from process import get_start_index
from process import preprocess
from process import process
class Plotter(object):
"""
Plot the speed info
"""
def __init__(self):
"""
Init the speed info
"""
np.set_printoptions(precision=3)
self.file = open('temp_result.csv', 'a')
def process_data(self, filename):
"""
Load the file and preprocess th data
"""
self.data = preprocess(filename)
self.tablecmd, self.tablespeed, self.tableacc, self.speedsection, self.accsection, self.timesection = process(
self.data)
def plot_result(self):
"""
Plot the desired data
"""
fig, axarr = plt.subplots(2, 1, sharex=True)
plt.tight_layout()
fig.subplots_adjust(hspace=0)
axarr[0].plot(
self.data['time'], self.data['ctlbrake'], label='Brake CMD')
axarr[0].plot(
self.data['time'],
self.data['brake_percentage'],
label='Brake Output')
axarr[0].plot(
self.data['time'], self.data['ctlthrottle'], label='Throttle CMD')
axarr[0].plot(
self.data['time'],
self.data['throttle_percentage'],
label='Throttle Output')
axarr[0].plot(
self.data['time'],
self.data['engine_rpm'] / 100,
label='Engine RPM')
axarr[0].legend(fontsize='medium')
axarr[0].grid(True)
axarr[0].set_title('Command')
axarr[1].plot(
self.data['time'],
self.data['vehicle_speed'],
label='Vehicle Speed')
for i in range(len(self.timesection)):
axarr[1].plot(
self.timesection[i],
self.speedsection[i],
label='Speed Segment')
axarr[1].plot(
self.timesection[i], self.accsection[i], label='IMU Segment')
axarr[1].legend(fontsize='medium')
axarr[1].grid(True)
axarr[1].set_title('Speed')
mng = plt.get_current_fig_manager()
mng.full_screen_toggle()
#plt.tight_layout(pad=0.20)
fig.canvas.mpl_connect('key_press_event', self.press)
plt.show()
def press(self, event):
"""
Keyboard events during plotting
"""
if event.key == 'q' or event.key == 'Q':
self.file.close()
plt.close()
if event.key == 'w' or event.key == 'W':
for i in range(len(self.tablecmd)):
for j in range(len(self.tablespeed[i])):
self.file.write("%s, %s, %s\n" %
(self.tablecmd[i], self.tablespeed[i][j],
self.tableacc[i][j]))
print("Finished writing results")
def main():
"""
demo
"""
if len(sys.argv) == 2:
# Get the latest file
file_path = sys.argv[1]
else:
file_path = tkFileDialog.askopenfilename(
initialdir="/home/caros/.ros",
filetypes=(("csv files", ".csv"), ("all files", "*.*")))
print('File path: %s' % file_path)
plotter = Plotter()
plotter.process_data(file_path)
print('Finished reading the file.')
plotter.plot_result()
if __name__ == '__main__':
main()
| apache-2.0 |
clarkfitzg/seaborn | seaborn/axisgrid.py | 20 | 66716 | from __future__ import division
from itertools import product
from distutils.version import LooseVersion
import warnings
from textwrap import dedent
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from six import string_types
from . import utils
from .palettes import color_palette
class Grid(object):
"""Base class for grids of subplots."""
_margin_titles = False
_legend_out = True
def set(self, **kwargs):
"""Set attributes on each subplot Axes."""
for ax in self.axes.flat:
ax.set(**kwargs)
return self
def savefig(self, *args, **kwargs):
"""Save the figure."""
kwargs = kwargs.copy()
kwargs.setdefault("bbox_inches", "tight")
self.fig.savefig(*args, **kwargs)
def add_legend(self, legend_data=None, title=None, label_order=None,
**kwargs):
"""Draw a legend, maybe placing it outside axes and resizing the figure.
Parameters
----------
legend_data : dict, optional
Dictionary mapping label names to matplotlib artist handles. The
default reads from ``self._legend_data``.
title : string, optional
Title for the legend. The default reads from ``self._hue_var``.
label_order : list of labels, optional
The order that the legend entries should appear in. The default
reads from ``self.hue_names`` or sorts the keys in ``legend_data``.
kwargs : key, value pairings
Other keyword arguments are passed to the underlying legend methods
on the Figure or Axes object.
Returns
-------
self : Grid instance
Returns self for easy chaining.
"""
# Find the data for the legend
legend_data = self._legend_data if legend_data is None else legend_data
if label_order is None:
if self.hue_names is None:
label_order = np.sort(list(legend_data.keys()))
else:
label_order = list(map(str, self.hue_names))
blank_handle = mpl.patches.Patch(alpha=0, linewidth=0)
handles = [legend_data.get(l, blank_handle) for l in label_order]
title = self._hue_var if title is None else title
try:
title_size = mpl.rcParams["axes.labelsize"] * .85
except TypeError: # labelsize is something like "large"
title_size = mpl.rcParams["axes.labelsize"]
# Set default legend kwargs
kwargs.setdefault("scatterpoints", 1)
if self._legend_out:
# Draw a full-figure legend outside the grid
figlegend = self.fig.legend(handles, label_order, "center right",
**kwargs)
self._legend = figlegend
figlegend.set_title(title)
# Set the title size a roundabout way to maintain
# compatability with matplotlib 1.1
prop = mpl.font_manager.FontProperties(size=title_size)
figlegend._legend_title_box._text.set_font_properties(prop)
# Draw the plot to set the bounding boxes correctly
plt.draw()
# Calculate and set the new width of the figure so the legend fits
legend_width = figlegend.get_window_extent().width / self.fig.dpi
figure_width = self.fig.get_figwidth()
self.fig.set_figwidth(figure_width + legend_width)
# Draw the plot again to get the new transformations
plt.draw()
# Now calculate how much space we need on the right side
legend_width = figlegend.get_window_extent().width / self.fig.dpi
space_needed = legend_width / (figure_width + legend_width)
margin = .04 if self._margin_titles else .01
self._space_needed = margin + space_needed
right = 1 - self._space_needed
# Place the subplot axes to give space for the legend
self.fig.subplots_adjust(right=right)
else:
# Draw a legend in the first axis
ax = self.axes.flat[0]
leg = ax.legend(handles, label_order, loc="best", **kwargs)
leg.set_title(title)
# Set the title size a roundabout way to maintain
# compatability with matplotlib 1.1
prop = mpl.font_manager.FontProperties(size=title_size)
leg._legend_title_box._text.set_font_properties(prop)
return self
def _clean_axis(self, ax):
"""Turn off axis labels and legend."""
ax.set_xlabel("")
ax.set_ylabel("")
ax.legend_ = None
return self
def _update_legend_data(self, ax):
"""Extract the legend data from an axes object and save it."""
handles, labels = ax.get_legend_handles_labels()
data = {l: h for h, l in zip(handles, labels)}
self._legend_data.update(data)
def _get_palette(self, data, hue, hue_order, palette):
"""Get a list of colors for the hue variable."""
if hue is None:
palette = color_palette(n_colors=1)
else:
hue_names = utils.categorical_order(data[hue], hue_order)
n_colors = len(hue_names)
# By default use either the current color palette or HUSL
if palette is None:
current_palette = mpl.rcParams["axes.color_cycle"]
if n_colors > len(current_palette):
colors = color_palette("husl", n_colors)
else:
colors = color_palette(n_colors=n_colors)
# Allow for palette to map from hue variable names
elif isinstance(palette, dict):
color_names = [palette[h] for h in hue_names]
colors = color_palette(color_names, n_colors)
# Otherwise act as if we just got a list of colors
else:
colors = color_palette(palette, n_colors)
palette = color_palette(colors, n_colors)
return palette
_facet_docs = dict(
data=dedent("""\
data : DataFrame
Tidy ("long-form") dataframe where each column is a variable and each
row is an observation.\
"""),
col_wrap=dedent("""\
col_wrap : int, optional
"Wrap" the column variable at this width, so that the column facets
span multiple rows. Incompatible with a ``row`` facet.\
"""),
share_xy=dedent("""\
share_{x,y} : bool, optional
If true, the facets will share y axes across columns and/or x axes
across rows.\
"""),
size=dedent("""\
size : scalar, optional
Height (in inches) of each facet. See also: ``aspect``.\
"""),
aspect=dedent("""\
aspect : scalar, optional
Aspect ratio of each facet, so that ``aspect * size`` gives the width
of each facet in inches.\
"""),
palette=dedent("""\
palette : seaborn color palette or dict, optional
Colors to use for the different levels of the ``hue`` variable. Should
be something that can be interpreted by :func:`color_palette`, or a
dictionary mapping hue levels to matplotlib colors.\
"""),
legend_out=dedent("""\
legend_out : bool, optional
If ``True``, the figure size will be extended, and the legend will be
drawn outside the plot on the center right.\
"""),
margin_titles=dedent("""\
margin_titles : bool, optional
If ``True``, the titles for the row variable are drawn to the right of
the last column. This option is experimental and may not work in all
cases.\
"""),
)
class FacetGrid(Grid):
"""Subplot grid for plotting conditional relationships."""
def __init__(self, data, row=None, col=None, hue=None, col_wrap=None,
sharex=True, sharey=True, size=3, aspect=1, palette=None,
row_order=None, col_order=None, hue_order=None, hue_kws=None,
dropna=True, legend_out=True, despine=True,
margin_titles=False, xlim=None, ylim=None, subplot_kws=None,
gridspec_kws=None):
MPL_GRIDSPEC_VERSION = LooseVersion('1.4')
OLD_MPL = LooseVersion(mpl.__version__) < MPL_GRIDSPEC_VERSION
# Determine the hue facet layer information
hue_var = hue
if hue is None:
hue_names = None
else:
hue_names = utils.categorical_order(data[hue], hue_order)
colors = self._get_palette(data, hue, hue_order, palette)
# Set up the lists of names for the row and column facet variables
if row is None:
row_names = []
else:
row_names = utils.categorical_order(data[row], row_order)
if col is None:
col_names = []
else:
col_names = utils.categorical_order(data[col], col_order)
# Additional dict of kwarg -> list of values for mapping the hue var
hue_kws = hue_kws if hue_kws is not None else {}
# Make a boolean mask that is True anywhere there is an NA
# value in one of the faceting variables, but only if dropna is True
none_na = np.zeros(len(data), np.bool)
if dropna:
row_na = none_na if row is None else data[row].isnull()
col_na = none_na if col is None else data[col].isnull()
hue_na = none_na if hue is None else data[hue].isnull()
not_na = ~(row_na | col_na | hue_na)
else:
not_na = ~none_na
# Compute the grid shape
ncol = 1 if col is None else len(col_names)
nrow = 1 if row is None else len(row_names)
self._n_facets = ncol * nrow
self._col_wrap = col_wrap
if col_wrap is not None:
if row is not None:
err = "Cannot use `row` and `col_wrap` together."
raise ValueError(err)
ncol = col_wrap
nrow = int(np.ceil(len(data[col].unique()) / col_wrap))
self._ncol = ncol
self._nrow = nrow
# Calculate the base figure size
# This can get stretched later by a legend
figsize = (ncol * size * aspect, nrow * size)
# Validate some inputs
if col_wrap is not None:
margin_titles = False
# Build the subplot keyword dictionary
subplot_kws = {} if subplot_kws is None else subplot_kws.copy()
gridspec_kws = {} if gridspec_kws is None else gridspec_kws.copy()
if xlim is not None:
subplot_kws["xlim"] = xlim
if ylim is not None:
subplot_kws["ylim"] = ylim
# Initialize the subplot grid
if col_wrap is None:
kwargs = dict(figsize=figsize, squeeze=False,
sharex=sharex, sharey=sharey,
subplot_kw=subplot_kws,
gridspec_kw=gridspec_kws)
if OLD_MPL:
_ = kwargs.pop('gridspec_kw', None)
if gridspec_kws:
msg = "gridspec module only available in mpl >= {}"
warnings.warn(msg.format(MPL_GRIDSPEC_VERSION))
fig, axes = plt.subplots(nrow, ncol, **kwargs)
self.axes = axes
else:
# If wrapping the col variable we need to make the grid ourselves
if gridspec_kws:
warnings.warn("`gridspec_kws` ignored when using `col_wrap`")
n_axes = len(col_names)
fig = plt.figure(figsize=figsize)
axes = np.empty(n_axes, object)
axes[0] = fig.add_subplot(nrow, ncol, 1, **subplot_kws)
if sharex:
subplot_kws["sharex"] = axes[0]
if sharey:
subplot_kws["sharey"] = axes[0]
for i in range(1, n_axes):
axes[i] = fig.add_subplot(nrow, ncol, i + 1, **subplot_kws)
self.axes = axes
# Now we turn off labels on the inner axes
if sharex:
for ax in self._not_bottom_axes:
for label in ax.get_xticklabels():
label.set_visible(False)
ax.xaxis.offsetText.set_visible(False)
if sharey:
for ax in self._not_left_axes:
for label in ax.get_yticklabels():
label.set_visible(False)
ax.yaxis.offsetText.set_visible(False)
# Set up the class attributes
# ---------------------------
# First the public API
self.data = data
self.fig = fig
self.axes = axes
self.row_names = row_names
self.col_names = col_names
self.hue_names = hue_names
self.hue_kws = hue_kws
# Next the private variables
self._nrow = nrow
self._row_var = row
self._ncol = ncol
self._col_var = col
self._margin_titles = margin_titles
self._col_wrap = col_wrap
self._hue_var = hue_var
self._colors = colors
self._legend_out = legend_out
self._legend = None
self._legend_data = {}
self._x_var = None
self._y_var = None
self._dropna = dropna
self._not_na = not_na
# Make the axes look good
fig.tight_layout()
if despine:
self.despine()
__init__.__doc__ = dedent("""\
Initialize the matplotlib figure and FacetGrid object.
The :class:`FacetGrid` is an object that links a Pandas DataFrame to
a matplotlib figure with a particular structure.
In particular, :class:`FacetGrid` is used to draw plots with multiple
Axes where each Axes shows the same relationship conditioned on
different levels of some variable. It's possible to condition on up to
three variables by assigning variables to the rows and columns of the
grid and using different colors for the plot elements.
The general approach to plotting here is called "small multiples",
where the same kind of plot is repeated multiple times, and the
specific use of small multiples to display the same relationship
conditioned on one ore more other variables is often called a "trellis
plot".
The basic workflow is to initialize the :class:`FacetGrid` object with
the dataset and the variables that are used to structure the grid. Then
one or more plotting functions can be applied to each subset by calling
:meth:`FacetGrid.map` or :meth:`FacetGrid.map_dataframe`. Finally, the
plot can be tweaked with other methods to do things like change the
axis labels, use different ticks, or add a legend. See the detailed
code examples below for more information.
Parameters
----------
{data}
row, col, hue : strings
Variables that define subsets of the data, which will be drawn on
separate facets in the grid. See the ``*_order`` parameters to
control the order of levels of this variable.
{col_wrap}
{share_xy}
{size}
{aspect}
{palette}
{{row,col,hue}}_order : lists, optional
Order for the levels of the faceting variables. By default, this
will be the order that the levels appear in ``data`` or, if the
variables are pandas categoricals, the category order.
hue_kws : dictionary of param -> list of values mapping
Other keyword arguments to insert into the plotting call to let
other plot attributes vary across levels of the hue variable (e.g.
the markers in a scatterplot).
{legend_out}
despine : boolean, optional
Remove the top and right spines from the plots.
{margin_titles}
{{x, y}}lim: tuples, optional
Limits for each of the axes on each facet (only relevant when
share{{x, y}} is True.
subplot_kws : dict, optional
Dictionary of keyword arguments passed to matplotlib subplot(s)
methods.
gridspec_kws : dict, optional
Dictionary of keyword arguments passed to matplotlib's ``gridspec``
module (via ``plt.subplots``). Requires matplotlib >= 1.4 and is
ignored if ``col_wrap`` is not ``None``.
See Also
--------
PairGrid : Subplot grid for plotting pairwise relationships.
lmplot : Combine a regression plot and a :class:`FacetGrid`.
factorplot : Combine a categorical plot and a :class:`FacetGrid`.
Examples
--------
Initialize a 2x2 grid of facets using the tips dataset:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set(style="ticks", color_codes=True)
>>> tips = sns.load_dataset("tips")
>>> g = sns.FacetGrid(tips, col="time", row="smoker")
Draw a univariate plot on each facet:
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> g = sns.FacetGrid(tips, col="time", row="smoker")
>>> g = g.map(plt.hist, "total_bill")
(Note that it's not necessary to re-catch the returned variable; it's
the same object, but doing so in the examples makes dealing with the
doctests somewhat less annoying).
Pass additional keyword arguments to the mapped function:
.. plot::
:context: close-figs
>>> import numpy as np
>>> bins = np.arange(0, 65, 5)
>>> g = sns.FacetGrid(tips, col="time", row="smoker")
>>> g = g.map(plt.hist, "total_bill", bins=bins, color="r")
Plot a bivariate function on each facet:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="time", row="smoker")
>>> g = g.map(plt.scatter, "total_bill", "tip", edgecolor="w")
Assign one of the variables to the color of the plot elements:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="time", hue="smoker")
>>> g = (g.map(plt.scatter, "total_bill", "tip", edgecolor="w")
... .add_legend())
Change the size and aspect ratio of each facet:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="day", size=4, aspect=.5)
>>> g = g.map(sns.boxplot, "time", "total_bill")
Specify the order for plot elements:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="smoker", col_order=["Yes", "No"])
>>> g = g.map(plt.hist, "total_bill", bins=bins, color="m")
Use a different color palette:
.. plot::
:context: close-figs
>>> kws = dict(s=50, linewidth=.5, edgecolor="w")
>>> g = sns.FacetGrid(tips, col="sex", hue="time", palette="Set1",
... hue_order=["Dinner", "Lunch"])
>>> g = (g.map(plt.scatter, "total_bill", "tip", **kws)
... .add_legend())
Use a dictionary mapping hue levels to colors:
.. plot::
:context: close-figs
>>> pal = dict(Lunch="seagreen", Dinner="gray")
>>> g = sns.FacetGrid(tips, col="sex", hue="time", palette=pal,
... hue_order=["Dinner", "Lunch"])
>>> g = (g.map(plt.scatter, "total_bill", "tip", **kws)
... .add_legend())
Additionally use a different marker for the hue levels:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="sex", hue="time", palette=pal,
... hue_order=["Dinner", "Lunch"],
... hue_kws=dict(marker=["^", "v"]))
>>> g = (g.map(plt.scatter, "total_bill", "tip", **kws)
... .add_legend())
"Wrap" a column variable with many levels into the rows:
.. plot::
:context: close-figs
>>> attend = sns.load_dataset("attention")
>>> g = sns.FacetGrid(attend, col="subject", col_wrap=5,
... size=1.5, ylim=(0, 10))
>>> g = g.map(sns.pointplot, "solutions", "score", scale=.7)
Define a custom bivariate function to map onto the grid:
.. plot::
:context: close-figs
>>> from scipy import stats
>>> def qqplot(x, y, **kwargs):
... _, xr = stats.probplot(x, fit=False)
... _, yr = stats.probplot(y, fit=False)
... plt.scatter(xr, yr, **kwargs)
>>> g = sns.FacetGrid(tips, col="smoker", hue="sex")
>>> g = (g.map(qqplot, "total_bill", "tip", **kws)
... .add_legend())
Define a custom function that uses a ``DataFrame`` object and accepts
column names as positional variables:
.. plot::
:context: close-figs
>>> import pandas as pd
>>> df = pd.DataFrame(
... data=np.random.randn(90, 4),
... columns=pd.Series(list("ABCD"), name="walk"),
... index=pd.date_range("Jan 1", "March 31", name="date"))
>>> df = df.cumsum(axis=0).stack().reset_index(name="val")
>>> def dateplot(x, y, **kwargs):
... ax = plt.gca()
... data = kwargs.pop("data")
... data.plot(x=x, y=y, ax=ax, grid=False, **kwargs)
>>> g = sns.FacetGrid(df, col="walk", col_wrap=2, size=3.5)
>>> g = g.map_dataframe(dateplot, "date", "val")
Use different axes labels after plotting:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="smoker", row="sex")
>>> g = (g.map(plt.scatter, "total_bill", "tip", color="g", **kws)
... .set_axis_labels("Total bill (US Dollars)", "Tip"))
Set other attributes that are shared across the facetes:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="smoker", row="sex")
>>> g = (g.map(plt.scatter, "total_bill", "tip", color="r", **kws)
... .set(xlim=(0, 60), ylim=(0, 12),
... xticks=[10, 30, 50], yticks=[2, 6, 10]))
Use a different template for the facet titles:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips.sort("size"), col="size", col_wrap=3)
>>> g = (g.map(plt.hist, "tip", bins=np.arange(0, 13), color="c")
... .set_titles("{{col_name}} diners"))
Tighten the facets:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="smoker", row="sex",
... margin_titles=True)
>>> g = (g.map(plt.scatter, "total_bill", "tip", color="m", **kws)
... .set(xlim=(0, 60), ylim=(0, 12),
... xticks=[10, 30, 50], yticks=[2, 6, 10])
... .fig.subplots_adjust(wspace=.05, hspace=.05))
""").format(**_facet_docs)
def facet_data(self):
"""Generator for name indices and data subsets for each facet.
Yields
------
(i, j, k), data_ijk : tuple of ints, DataFrame
The ints provide an index into the {row, col, hue}_names attribute,
and the dataframe contains a subset of the full data corresponding
to each facet. The generator yields subsets that correspond with
the self.axes.flat iterator, or self.axes[i, j] when `col_wrap`
is None.
"""
data = self.data
# Construct masks for the row variable
if self._nrow == 1 or self._col_wrap is not None:
row_masks = [np.repeat(True, len(self.data))]
else:
row_masks = [data[self._row_var] == n for n in self.row_names]
# Construct masks for the column variable
if self._ncol == 1:
col_masks = [np.repeat(True, len(self.data))]
else:
col_masks = [data[self._col_var] == n for n in self.col_names]
# Construct masks for the hue variable
if len(self._colors) == 1:
hue_masks = [np.repeat(True, len(self.data))]
else:
hue_masks = [data[self._hue_var] == n for n in self.hue_names]
# Here is the main generator loop
for (i, row), (j, col), (k, hue) in product(enumerate(row_masks),
enumerate(col_masks),
enumerate(hue_masks)):
data_ijk = data[row & col & hue & self._not_na]
yield (i, j, k), data_ijk
def map(self, func, *args, **kwargs):
"""Apply a plotting function to each facet's subset of the data.
Parameters
----------
func : callable
A plotting function that takes data and keyword arguments. It
must plot to the currently active matplotlib Axes and take a
`color` keyword argument. If faceting on the `hue` dimension,
it must also take a `label` keyword argument.
args : strings
Column names in self.data that identify variables with data to
plot. The data for each variable is passed to `func` in the
order the variables are specified in the call.
kwargs : keyword arguments
All keyword arguments are passed to the plotting function.
Returns
-------
self : object
Returns self.
"""
# If color was a keyword argument, grab it here
kw_color = kwargs.pop("color", None)
# Iterate over the data subsets
for (row_i, col_j, hue_k), data_ijk in self.facet_data():
# If this subset is null, move on
if not data_ijk.values.tolist():
continue
# Get the current axis
ax = self.facet_axis(row_i, col_j)
# Decide what color to plot with
kwargs["color"] = self._facet_color(hue_k, kw_color)
# Insert the other hue aesthetics if appropriate
for kw, val_list in self.hue_kws.items():
kwargs[kw] = val_list[hue_k]
# Insert a label in the keyword arguments for the legend
if self._hue_var is not None:
kwargs["label"] = str(self.hue_names[hue_k])
# Get the actual data we are going to plot with
plot_data = data_ijk[list(args)]
if self._dropna:
plot_data = plot_data.dropna()
plot_args = [v for k, v in plot_data.iteritems()]
# Some matplotlib functions don't handle pandas objects correctly
if func.__module__ is not None:
if func.__module__.startswith("matplotlib"):
plot_args = [v.values for v in plot_args]
# Draw the plot
self._facet_plot(func, ax, plot_args, kwargs)
# Finalize the annotations and layout
self._finalize_grid(args[:2])
return self
def map_dataframe(self, func, *args, **kwargs):
"""Like `map` but passes args as strings and inserts data in kwargs.
This method is suitable for plotting with functions that accept a
long-form DataFrame as a `data` keyword argument and access the
data in that DataFrame using string variable names.
Parameters
----------
func : callable
A plotting function that takes data and keyword arguments. Unlike
the `map` method, a function used here must "understand" Pandas
objects. It also must plot to the currently active matplotlib Axes
and take a `color` keyword argument. If faceting on the `hue`
dimension, it must also take a `label` keyword argument.
args : strings
Column names in self.data that identify variables with data to
plot. The data for each variable is passed to `func` in the
order the variables are specified in the call.
kwargs : keyword arguments
All keyword arguments are passed to the plotting function.
Returns
-------
self : object
Returns self.
"""
# If color was a keyword argument, grab it here
kw_color = kwargs.pop("color", None)
# Iterate over the data subsets
for (row_i, col_j, hue_k), data_ijk in self.facet_data():
# If this subset is null, move on
if not data_ijk.values.tolist():
continue
# Get the current axis
ax = self.facet_axis(row_i, col_j)
# Decide what color to plot with
kwargs["color"] = self._facet_color(hue_k, kw_color)
# Insert the other hue aesthetics if appropriate
for kw, val_list in self.hue_kws.items():
kwargs[kw] = val_list[hue_k]
# Insert a label in the keyword arguments for the legend
if self._hue_var is not None:
kwargs["label"] = self.hue_names[hue_k]
# Stick the facet dataframe into the kwargs
if self._dropna:
data_ijk = data_ijk.dropna()
kwargs["data"] = data_ijk
# Draw the plot
self._facet_plot(func, ax, args, kwargs)
# Finalize the annotations and layout
self._finalize_grid(args[:2])
return self
def _facet_color(self, hue_index, kw_color):
color = self._colors[hue_index]
if kw_color is not None:
return kw_color
elif color is not None:
return color
def _facet_plot(self, func, ax, plot_args, plot_kwargs):
# Draw the plot
func(*plot_args, **plot_kwargs)
# Sort out the supporting information
self._update_legend_data(ax)
self._clean_axis(ax)
def _finalize_grid(self, axlabels):
"""Finalize the annotations and layout."""
self.set_axis_labels(*axlabels)
self.set_titles()
self.fig.tight_layout()
def facet_axis(self, row_i, col_j):
"""Make the axis identified by these indices active and return it."""
# Calculate the actual indices of the axes to plot on
if self._col_wrap is not None:
ax = self.axes.flat[col_j]
else:
ax = self.axes[row_i, col_j]
# Get a reference to the axes object we want, and make it active
plt.sca(ax)
return ax
def despine(self, **kwargs):
"""Remove axis spines from the facets."""
utils.despine(self.fig, **kwargs)
return self
def set_axis_labels(self, x_var=None, y_var=None):
"""Set axis labels on the left column and bottom row of the grid."""
if x_var is not None:
self._x_var = x_var
self.set_xlabels(x_var)
if y_var is not None:
self._y_var = y_var
self.set_ylabels(y_var)
return self
def set_xlabels(self, label=None, **kwargs):
"""Label the x axis on the bottom row of the grid."""
if label is None:
label = self._x_var
for ax in self._bottom_axes:
ax.set_xlabel(label, **kwargs)
return self
def set_ylabels(self, label=None, **kwargs):
"""Label the y axis on the left column of the grid."""
if label is None:
label = self._y_var
for ax in self._left_axes:
ax.set_ylabel(label, **kwargs)
return self
def set_xticklabels(self, labels=None, step=None, **kwargs):
"""Set x axis tick labels on the bottom row of the grid."""
for ax in self._bottom_axes:
if labels is None:
labels = [l.get_text() for l in ax.get_xticklabels()]
if step is not None:
xticks = ax.get_xticks()[::step]
labels = labels[::step]
ax.set_xticks(xticks)
ax.set_xticklabels(labels, **kwargs)
return self
def set_yticklabels(self, labels=None, **kwargs):
"""Set y axis tick labels on the left column of the grid."""
for ax in self._left_axes:
if labels is None:
labels = [l.get_text() for l in ax.get_yticklabels()]
ax.set_yticklabels(labels, **kwargs)
return self
def set_titles(self, template=None, row_template=None, col_template=None,
**kwargs):
"""Draw titles either above each facet or on the grid margins.
Parameters
----------
template : string
Template for all titles with the formatting keys {col_var} and
{col_name} (if using a `col` faceting variable) and/or {row_var}
and {row_name} (if using a `row` faceting variable).
row_template:
Template for the row variable when titles are drawn on the grid
margins. Must have {row_var} and {row_name} formatting keys.
col_template:
Template for the row variable when titles are drawn on the grid
margins. Must have {col_var} and {col_name} formatting keys.
Returns
-------
self: object
Returns self.
"""
args = dict(row_var=self._row_var, col_var=self._col_var)
kwargs["size"] = kwargs.pop("size", mpl.rcParams["axes.labelsize"])
# Establish default templates
if row_template is None:
row_template = "{row_var} = {row_name}"
if col_template is None:
col_template = "{col_var} = {col_name}"
if template is None:
if self._row_var is None:
template = col_template
elif self._col_var is None:
template = row_template
else:
template = " | ".join([row_template, col_template])
if self._margin_titles:
if self.row_names is not None:
# Draw the row titles on the right edge of the grid
for i, row_name in enumerate(self.row_names):
ax = self.axes[i, -1]
args.update(dict(row_name=row_name))
title = row_template.format(**args)
bgcolor = self.fig.get_facecolor()
ax.annotate(title, xy=(1.02, .5), xycoords="axes fraction",
rotation=270, ha="left", va="center",
backgroundcolor=bgcolor, **kwargs)
if self.col_names is not None:
# Draw the column titles as normal titles
for j, col_name in enumerate(self.col_names):
args.update(dict(col_name=col_name))
title = col_template.format(**args)
self.axes[0, j].set_title(title, **kwargs)
return self
# Otherwise title each facet with all the necessary information
if (self._row_var is not None) and (self._col_var is not None):
for i, row_name in enumerate(self.row_names):
for j, col_name in enumerate(self.col_names):
args.update(dict(row_name=row_name, col_name=col_name))
title = template.format(**args)
self.axes[i, j].set_title(title, **kwargs)
elif self.row_names is not None and len(self.row_names):
for i, row_name in enumerate(self.row_names):
args.update(dict(row_name=row_name))
title = template.format(**args)
self.axes[i, 0].set_title(title, **kwargs)
elif self.col_names is not None and len(self.col_names):
for i, col_name in enumerate(self.col_names):
args.update(dict(col_name=col_name))
title = template.format(**args)
# Index the flat array so col_wrap works
self.axes.flat[i].set_title(title, **kwargs)
return self
@property
def ax(self):
"""Easy access to single axes."""
if self.axes.shape == (1, 1):
return self.axes[0, 0]
else:
raise AttributeError
@property
def _inner_axes(self):
"""Return a flat array of the inner axes."""
if self._col_wrap is None:
return self.axes[:-1, 1:].flat
else:
axes = []
n_empty = self._nrow * self._ncol - self._n_facets
for i, ax in enumerate(self.axes):
append = (i % self._ncol and
i < (self._ncol * (self._nrow - 1)) and
i < (self._ncol * (self._nrow - 1) - n_empty))
if append:
axes.append(ax)
return np.array(axes, object).flat
@property
def _left_axes(self):
"""Return a flat array of the left column of axes."""
if self._col_wrap is None:
return self.axes[:, 0].flat
else:
axes = []
for i, ax in enumerate(self.axes):
if not i % self._ncol:
axes.append(ax)
return np.array(axes, object).flat
@property
def _not_left_axes(self):
"""Return a flat array of axes that aren't on the left column."""
if self._col_wrap is None:
return self.axes[:, 1:].flat
else:
axes = []
for i, ax in enumerate(self.axes):
if i % self._ncol:
axes.append(ax)
return np.array(axes, object).flat
@property
def _bottom_axes(self):
"""Return a flat array of the bottom row of axes."""
if self._col_wrap is None:
return self.axes[-1, :].flat
else:
axes = []
n_empty = self._nrow * self._ncol - self._n_facets
for i, ax in enumerate(self.axes):
append = (i >= (self._ncol * (self._nrow - 1)) or
i >= (self._ncol * (self._nrow - 1) - n_empty))
if append:
axes.append(ax)
return np.array(axes, object).flat
@property
def _not_bottom_axes(self):
"""Return a flat array of axes that aren't on the bottom row."""
if self._col_wrap is None:
return self.axes[:-1, :].flat
else:
axes = []
n_empty = self._nrow * self._ncol - self._n_facets
for i, ax in enumerate(self.axes):
append = (i < (self._ncol * (self._nrow - 1)) and
i < (self._ncol * (self._nrow - 1) - n_empty))
if append:
axes.append(ax)
return np.array(axes, object).flat
class PairGrid(Grid):
"""Subplot grid for plotting pairwise relationships in a dataset."""
def __init__(self, data, hue=None, hue_order=None, palette=None,
hue_kws=None, vars=None, x_vars=None, y_vars=None,
diag_sharey=True, size=2.5, aspect=1,
despine=True, dropna=True):
"""Initialize the plot figure and PairGrid object.
Parameters
----------
data : DataFrame
Tidy (long-form) dataframe where each column is a variable and
each row is an observation.
hue : string (variable name), optional
Variable in ``data`` to map plot aspects to different colors.
hue_order : list of strings
Order for the levels of the hue variable in the palette
palette : dict or seaborn color palette
Set of colors for mapping the ``hue`` variable. If a dict, keys
should be values in the ``hue`` variable.
hue_kws : dictionary of param -> list of values mapping
Other keyword arguments to insert into the plotting call to let
other plot attributes vary across levels of the hue variable (e.g.
the markers in a scatterplot).
vars : list of variable names, optional
Variables within ``data`` to use, otherwise use every column with
a numeric datatype.
{x, y}_vars : lists of variable names, optional
Variables within ``data`` to use separately for the rows and
columns of the figure; i.e. to make a non-square plot.
size : scalar, optional
Height (in inches) of each facet.
aspect : scalar, optional
Aspect * size gives the width (in inches) of each facet.
despine : boolean, optional
Remove the top and right spines from the plots.
dropna : boolean, optional
Drop missing values from the data before plotting.
See Also
--------
pairplot : Easily drawing common uses of :class:`PairGrid`.
FacetGrid : Subplot grid for plotting conditional relationships.
Examples
--------
Draw a scatterplot for each pairwise relationship:
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> import seaborn as sns; sns.set()
>>> iris = sns.load_dataset("iris")
>>> g = sns.PairGrid(iris)
>>> g = g.map(plt.scatter)
Show a univariate distribution on the diagonal:
.. plot::
:context: close-figs
>>> g = sns.PairGrid(iris)
>>> g = g.map_diag(plt.hist)
>>> g = g.map_offdiag(plt.scatter)
(It's not actually necessary to catch the return value every time,
as it is the same object, but it makes it easier to deal with the
doctests).
Color the points using a categorical variable:
.. plot::
:context: close-figs
>>> g = sns.PairGrid(iris, hue="species")
>>> g = g.map(plt.scatter)
>>> g = g.add_legend()
Plot a subset of variables
.. plot::
:context: close-figs
>>> g = sns.PairGrid(iris, vars=["sepal_length", "sepal_width"])
>>> g = g.map(plt.scatter)
Pass additional keyword arguments to the functions
.. plot::
:context: close-figs
>>> g = sns.PairGrid(iris)
>>> g = g.map_diag(plt.hist, edgecolor="w")
>>> g = g.map_offdiag(plt.scatter, edgecolor="w", s=40)
Use different variables for the rows and columns:
.. plot::
:context: close-figs
>>> g = sns.PairGrid(iris,
... x_vars=["sepal_length", "sepal_width"],
... y_vars=["petal_length", "petal_width"])
>>> g = g.map(plt.scatter)
Use different functions on the upper and lower triangles:
.. plot::
:context: close-figs
>>> g = sns.PairGrid(iris)
>>> g = g.map_upper(plt.scatter)
>>> g = g.map_lower(sns.kdeplot, cmap="Blues_d")
>>> g = g.map_diag(sns.kdeplot, lw=3, legend=False)
Use different colors and markers for each categorical level:
.. plot::
:context: close-figs
>>> g = sns.PairGrid(iris, hue="species", palette="Set2",
... hue_kws={"marker": ["o", "s", "D"]})
>>> g = g.map(plt.scatter, linewidths=1, edgecolor="w", s=40)
>>> g = g.add_legend()
"""
# Sort out the variables that define the grid
if vars is not None:
x_vars = list(vars)
y_vars = list(vars)
elif (x_vars is not None) or (y_vars is not None):
if (x_vars is None) or (y_vars is None):
raise ValueError("Must specify `x_vars` and `y_vars`")
else:
numeric_cols = self._find_numeric_cols(data)
x_vars = numeric_cols
y_vars = numeric_cols
if np.isscalar(x_vars):
x_vars = [x_vars]
if np.isscalar(y_vars):
y_vars = [y_vars]
self.x_vars = list(x_vars)
self.y_vars = list(y_vars)
self.square_grid = self.x_vars == self.y_vars
# Create the figure and the array of subplots
figsize = len(x_vars) * size * aspect, len(y_vars) * size
fig, axes = plt.subplots(len(y_vars), len(x_vars),
figsize=figsize,
sharex="col", sharey="row",
squeeze=False)
self.fig = fig
self.axes = axes
self.data = data
# Save what we are going to do with the diagonal
self.diag_sharey = diag_sharey
self.diag_axes = None
# Label the axes
self._add_axis_labels()
# Sort out the hue variable
self._hue_var = hue
if hue is None:
self.hue_names = ["_nolegend_"]
self.hue_vals = pd.Series(["_nolegend_"] * len(data),
index=data.index)
else:
hue_names = utils.categorical_order(data[hue], hue_order)
if dropna:
# Filter NA from the list of unique hue names
hue_names = list(filter(pd.notnull, hue_names))
self.hue_names = hue_names
self.hue_vals = data[hue]
# Additional dict of kwarg -> list of values for mapping the hue var
self.hue_kws = hue_kws if hue_kws is not None else {}
self.palette = self._get_palette(data, hue, hue_order, palette)
self._legend_data = {}
# Make the plot look nice
if despine:
utils.despine(fig=fig)
fig.tight_layout()
def map(self, func, **kwargs):
"""Plot with the same function in every subplot.
Parameters
----------
func : callable plotting function
Must take x, y arrays as positional arguments and draw onto the
"currently active" matplotlib Axes.
"""
kw_color = kwargs.pop("color", None)
for i, y_var in enumerate(self.y_vars):
for j, x_var in enumerate(self.x_vars):
hue_grouped = self.data.groupby(self.hue_vals)
for k, label_k in enumerate(self.hue_names):
# Attempt to get data for this level, allowing for empty
try:
data_k = hue_grouped.get_group(label_k)
except KeyError:
data_k = pd.DataFrame(columns=self.data.columns,
dtype=np.float)
ax = self.axes[i, j]
plt.sca(ax)
# Insert the other hue aesthetics if appropriate
for kw, val_list in self.hue_kws.items():
kwargs[kw] = val_list[k]
color = self.palette[k] if kw_color is None else kw_color
func(data_k[x_var], data_k[y_var],
label=label_k, color=color, **kwargs)
self._clean_axis(ax)
self._update_legend_data(ax)
if kw_color is not None:
kwargs["color"] = kw_color
self._add_axis_labels()
return self
def map_diag(self, func, **kwargs):
"""Plot with a univariate function on each diagonal subplot.
Parameters
----------
func : callable plotting function
Must take an x array as a positional arguments and draw onto the
"currently active" matplotlib Axes. There is a special case when
using a ``hue`` variable and ``plt.hist``; the histogram will be
plotted with stacked bars.
"""
# Add special diagonal axes for the univariate plot
if self.square_grid and self.diag_axes is None:
diag_axes = []
for i, (var, ax) in enumerate(zip(self.x_vars,
np.diag(self.axes))):
if i and self.diag_sharey:
diag_ax = ax._make_twin_axes(sharex=ax,
sharey=diag_axes[0],
frameon=False)
else:
diag_ax = ax._make_twin_axes(sharex=ax, frameon=False)
diag_ax.set_axis_off()
diag_axes.append(diag_ax)
self.diag_axes = np.array(diag_axes, np.object)
# Plot on each of the diagonal axes
for i, var in enumerate(self.x_vars):
ax = self.diag_axes[i]
hue_grouped = self.data[var].groupby(self.hue_vals)
# Special-case plt.hist with stacked bars
if func is plt.hist:
plt.sca(ax)
vals = []
for label in self.hue_names:
# Attempt to get data for this level, allowing for empty
try:
vals.append(np.asarray(hue_grouped.get_group(label)))
except KeyError:
vals.append(np.array([]))
func(vals, color=self.palette, histtype="barstacked",
**kwargs)
else:
for k, label_k in enumerate(self.hue_names):
# Attempt to get data for this level, allowing for empty
try:
data_k = hue_grouped.get_group(label_k)
except KeyError:
data_k = np.array([])
plt.sca(ax)
func(data_k, label=label_k,
color=self.palette[k], **kwargs)
self._clean_axis(ax)
self._add_axis_labels()
return self
def map_lower(self, func, **kwargs):
"""Plot with a bivariate function on the lower diagonal subplots.
Parameters
----------
func : callable plotting function
Must take x, y arrays as positional arguments and draw onto the
"currently active" matplotlib Axes.
"""
kw_color = kwargs.pop("color", None)
for i, j in zip(*np.tril_indices_from(self.axes, -1)):
hue_grouped = self.data.groupby(self.hue_vals)
for k, label_k in enumerate(self.hue_names):
# Attempt to get data for this level, allowing for empty
try:
data_k = hue_grouped.get_group(label_k)
except KeyError:
data_k = pd.DataFrame(columns=self.data.columns,
dtype=np.float)
ax = self.axes[i, j]
plt.sca(ax)
x_var = self.x_vars[j]
y_var = self.y_vars[i]
# Insert the other hue aesthetics if appropriate
for kw, val_list in self.hue_kws.items():
kwargs[kw] = val_list[k]
color = self.palette[k] if kw_color is None else kw_color
func(data_k[x_var], data_k[y_var], label=label_k,
color=color, **kwargs)
self._clean_axis(ax)
self._update_legend_data(ax)
if kw_color is not None:
kwargs["color"] = kw_color
self._add_axis_labels()
return self
def map_upper(self, func, **kwargs):
"""Plot with a bivariate function on the upper diagonal subplots.
Parameters
----------
func : callable plotting function
Must take x, y arrays as positional arguments and draw onto the
"currently active" matplotlib Axes.
"""
kw_color = kwargs.pop("color", None)
for i, j in zip(*np.triu_indices_from(self.axes, 1)):
hue_grouped = self.data.groupby(self.hue_vals)
for k, label_k in enumerate(self.hue_names):
# Attempt to get data for this level, allowing for empty
try:
data_k = hue_grouped.get_group(label_k)
except KeyError:
data_k = pd.DataFrame(columns=self.data.columns,
dtype=np.float)
ax = self.axes[i, j]
plt.sca(ax)
x_var = self.x_vars[j]
y_var = self.y_vars[i]
# Insert the other hue aesthetics if appropriate
for kw, val_list in self.hue_kws.items():
kwargs[kw] = val_list[k]
color = self.palette[k] if kw_color is None else kw_color
func(data_k[x_var], data_k[y_var], label=label_k,
color=color, **kwargs)
self._clean_axis(ax)
self._update_legend_data(ax)
if kw_color is not None:
kwargs["color"] = kw_color
return self
def map_offdiag(self, func, **kwargs):
"""Plot with a bivariate function on the off-diagonal subplots.
Parameters
----------
func : callable plotting function
Must take x, y arrays as positional arguments and draw onto the
"currently active" matplotlib Axes.
"""
self.map_lower(func, **kwargs)
self.map_upper(func, **kwargs)
return self
def _add_axis_labels(self):
"""Add labels to the left and bottom Axes."""
for ax, label in zip(self.axes[-1, :], self.x_vars):
ax.set_xlabel(label)
for ax, label in zip(self.axes[:, 0], self.y_vars):
ax.set_ylabel(label)
def _find_numeric_cols(self, data):
"""Find which variables in a DataFrame are numeric."""
# This can't be the best way to do this, but I do not
# know what the best way might be, so this seems ok
numeric_cols = []
for col in data:
try:
data[col].astype(np.float)
numeric_cols.append(col)
except (ValueError, TypeError):
pass
return numeric_cols
class JointGrid(object):
"""Grid for drawing a bivariate plot with marginal univariate plots."""
def __init__(self, x, y, data=None, size=6, ratio=5, space=.2,
dropna=True, xlim=None, ylim=None):
"""Set up the grid of subplots.
Parameters
----------
x, y : strings or vectors
Data or names of variables in ``data``.
data : DataFrame, optional
DataFrame when ``x`` and ``y`` are variable names.
size : numeric
Size of each side of the figure in inches (it will be square).
ratio : numeric
Ratio of joint axes size to marginal axes height.
space : numeric, optional
Space between the joint and marginal axes
dropna : bool, optional
If True, remove observations that are missing from `x` and `y`.
{x, y}lim : two-tuples, optional
Axis limits to set before plotting.
See Also
--------
jointplot : High-level interface for drawing bivariate plots with
several different default plot kinds.
Examples
--------
Initialize the figure but don't draw any plots onto it:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set(style="ticks", color_codes=True)
>>> tips = sns.load_dataset("tips")
>>> g = sns.JointGrid(x="total_bill", y="tip", data=tips)
Add plots using default parameters:
.. plot::
:context: close-figs
>>> g = sns.JointGrid(x="total_bill", y="tip", data=tips)
>>> g = g.plot(sns.regplot, sns.distplot)
Draw the join and marginal plots separately, which allows finer-level
control other parameters:
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> g = sns.JointGrid(x="total_bill", y="tip", data=tips)
>>> g = g.plot_joint(plt.scatter, color=".5", edgecolor="white")
>>> g = g.plot_marginals(sns.distplot, kde=False, color=".5")
Draw the two marginal plots separately:
.. plot::
:context: close-figs
>>> import numpy as np
>>> g = sns.JointGrid(x="total_bill", y="tip", data=tips)
>>> g = g.plot_joint(plt.scatter, color="m", edgecolor="white")
>>> _ = g.ax_marg_x.hist(tips["total_bill"], color="b", alpha=.6,
... bins=np.arange(0, 60, 5))
>>> _ = g.ax_marg_y.hist(tips["tip"], color="r", alpha=.6,
... orientation="horizontal",
... bins=np.arange(0, 12, 1))
Add an annotation with a statistic summarizing the bivariate
relationship:
.. plot::
:context: close-figs
>>> from scipy import stats
>>> g = sns.JointGrid(x="total_bill", y="tip", data=tips)
>>> g = g.plot_joint(plt.scatter,
... color="g", s=40, edgecolor="white")
>>> g = g.plot_marginals(sns.distplot, kde=False, color="g")
>>> g = g.annotate(stats.pearsonr)
Use a custom function and formatting for the annotation
.. plot::
:context: close-figs
>>> g = sns.JointGrid(x="total_bill", y="tip", data=tips)
>>> g = g.plot_joint(plt.scatter,
... color="g", s=40, edgecolor="white")
>>> g = g.plot_marginals(sns.distplot, kde=False, color="g")
>>> rsquare = lambda a, b: stats.pearsonr(a, b)[0] ** 2
>>> g = g.annotate(rsquare, template="{stat}: {val:.2f}",
... stat="$R^2$", loc="upper left", fontsize=12)
Remove the space between the joint and marginal axes:
.. plot::
:context: close-figs
>>> g = sns.JointGrid(x="total_bill", y="tip", data=tips, space=0)
>>> g = g.plot_joint(sns.kdeplot, cmap="Blues_d")
>>> g = g.plot_marginals(sns.kdeplot, shade=True)
Draw a smaller plot with relatively larger marginal axes:
.. plot::
:context: close-figs
>>> g = sns.JointGrid(x="total_bill", y="tip", data=tips,
... size=5, ratio=2)
>>> g = g.plot_joint(sns.kdeplot, cmap="Reds_d")
>>> g = g.plot_marginals(sns.kdeplot, color="r", shade=True)
Set limits on the axes:
.. plot::
:context: close-figs
>>> g = sns.JointGrid(x="total_bill", y="tip", data=tips,
... xlim=(0, 50), ylim=(0, 8))
>>> g = g.plot_joint(sns.kdeplot, cmap="Purples_d")
>>> g = g.plot_marginals(sns.kdeplot, color="m", shade=True)
"""
# Set up the subplot grid
f = plt.figure(figsize=(size, size))
gs = plt.GridSpec(ratio + 1, ratio + 1)
ax_joint = f.add_subplot(gs[1:, :-1])
ax_marg_x = f.add_subplot(gs[0, :-1], sharex=ax_joint)
ax_marg_y = f.add_subplot(gs[1:, -1], sharey=ax_joint)
self.fig = f
self.ax_joint = ax_joint
self.ax_marg_x = ax_marg_x
self.ax_marg_y = ax_marg_y
# Turn off tick visibility for the measure axis on the marginal plots
plt.setp(ax_marg_x.get_xticklabels(), visible=False)
plt.setp(ax_marg_y.get_yticklabels(), visible=False)
# Turn off the ticks on the density axis for the marginal plots
plt.setp(ax_marg_x.yaxis.get_majorticklines(), visible=False)
plt.setp(ax_marg_x.yaxis.get_minorticklines(), visible=False)
plt.setp(ax_marg_y.xaxis.get_majorticklines(), visible=False)
plt.setp(ax_marg_y.xaxis.get_minorticklines(), visible=False)
plt.setp(ax_marg_x.get_yticklabels(), visible=False)
plt.setp(ax_marg_y.get_xticklabels(), visible=False)
ax_marg_x.yaxis.grid(False)
ax_marg_y.xaxis.grid(False)
# Possibly extract the variables from a DataFrame
if data is not None:
if x in data:
x = data[x]
if y in data:
y = data[y]
# Possibly drop NA
if dropna:
not_na = pd.notnull(x) & pd.notnull(y)
x = x[not_na]
y = y[not_na]
# Find the names of the variables
if hasattr(x, "name"):
xlabel = x.name
ax_joint.set_xlabel(xlabel)
if hasattr(y, "name"):
ylabel = y.name
ax_joint.set_ylabel(ylabel)
# Convert the x and y data to arrays for plotting
self.x = np.asarray(x)
self.y = np.asarray(y)
if xlim is not None:
ax_joint.set_xlim(xlim)
if ylim is not None:
ax_joint.set_ylim(ylim)
# Make the grid look nice
utils.despine(f)
utils.despine(ax=ax_marg_x, left=True)
utils.despine(ax=ax_marg_y, bottom=True)
f.tight_layout()
f.subplots_adjust(hspace=space, wspace=space)
def plot(self, joint_func, marginal_func, annot_func=None):
"""Shortcut to draw the full plot.
Use `plot_joint` and `plot_marginals` directly for more control.
Parameters
----------
joint_func, marginal_func: callables
Functions to draw the bivariate and univariate plots.
Returns
-------
self : JointGrid instance
Returns `self`.
"""
self.plot_marginals(marginal_func)
self.plot_joint(joint_func)
if annot_func is not None:
self.annotate(annot_func)
return self
def plot_joint(self, func, **kwargs):
"""Draw a bivariate plot of `x` and `y`.
Parameters
----------
func : plotting callable
This must take two 1d arrays of data as the first two
positional arguments, and it must plot on the "current" axes.
kwargs : key, value mappings
Keyword argument are passed to the plotting function.
Returns
-------
self : JointGrid instance
Returns `self`.
"""
plt.sca(self.ax_joint)
func(self.x, self.y, **kwargs)
return self
def plot_marginals(self, func, **kwargs):
"""Draw univariate plots for `x` and `y` separately.
Parameters
----------
func : plotting callable
This must take a 1d array of data as the first positional
argument, it must plot on the "current" axes, and it must
accept a "vertical" keyword argument to orient the measure
dimension of the plot vertically.
kwargs : key, value mappings
Keyword argument are passed to the plotting function.
Returns
-------
self : JointGrid instance
Returns `self`.
"""
kwargs["vertical"] = False
plt.sca(self.ax_marg_x)
func(self.x, **kwargs)
kwargs["vertical"] = True
plt.sca(self.ax_marg_y)
func(self.y, **kwargs)
return self
def annotate(self, func, template=None, stat=None, loc="best", **kwargs):
"""Annotate the plot with a statistic about the relationship.
Parameters
----------
func : callable
Statistical function that maps the x, y vectors either to (val, p)
or to val.
template : string format template, optional
The template must have the format keys "stat" and "val";
if `func` returns a p value, it should also have the key "p".
stat : string, optional
Name to use for the statistic in the annotation, by default it
uses the name of `func`.
loc : string or int, optional
Matplotlib legend location code; used to place the annotation.
kwargs : key, value mappings
Other keyword arguments are passed to `ax.legend`, which formats
the annotation.
Returns
-------
self : JointGrid instance.
Returns `self`.
"""
default_template = "{stat} = {val:.2g}; p = {p:.2g}"
# Call the function and determine the form of the return value(s)
out = func(self.x, self.y)
try:
val, p = out
except TypeError:
val, p = out, None
default_template, _ = default_template.split(";")
# Set the default template
if template is None:
template = default_template
# Default to name of the function
if stat is None:
stat = func.__name__
# Format the annotation
if p is None:
annotation = template.format(stat=stat, val=val)
else:
annotation = template.format(stat=stat, val=val, p=p)
# Draw an invisible plot and use the legend to draw the annotation
# This is a bit of a hack, but `loc=best` works nicely and is not
# easily abstracted.
phantom, = self.ax_joint.plot(self.x, self.y, linestyle="", alpha=0)
self.ax_joint.legend([phantom], [annotation], loc=loc, **kwargs)
phantom.remove()
return self
def set_axis_labels(self, xlabel="", ylabel="", **kwargs):
"""Set the axis labels on the bivariate axes.
Parameters
----------
xlabel, ylabel : strings
Label names for the x and y variables.
kwargs : key, value mappings
Other keyword arguments are passed to the set_xlabel or
set_ylabel.
Returns
-------
self : JointGrid instance
returns `self`
"""
self.ax_joint.set_xlabel(xlabel, **kwargs)
self.ax_joint.set_ylabel(ylabel, **kwargs)
return self
def savefig(self, *args, **kwargs):
"""Wrap figure.savefig defaulting to tight bounding box."""
kwargs.setdefault("bbox_inches", "tight")
self.fig.savefig(*args, **kwargs)
| bsd-3-clause |
Jimmy-Morzaria/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py | 221 | 5517 | """
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.utils import check_random_state
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
def test_binomial_deviance():
# Check binomial deviance loss.
# Check against alternative definitions in ESLII.
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
# Check log odds estimator.
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
# Smoke test for init estimators with sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float)
y.fill(0.0)
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float)
y.fill(1.0)
sw = np.ones(102, dtype=np.float)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_sample_weight_deviance():
# Test if deviance supports sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
| bsd-3-clause |
NunoEdgarGub1/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | 4609 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
quasiben/bokeh | bokeh/core/compat/mplexporter/exporter.py | 8 | 12406 | """
Matplotlib Exporter
===================
This submodule contains tools for crawling a matplotlib figure and exporting
relevant pieces to a renderer.
"""
import warnings
import io
from . import utils
import matplotlib
from matplotlib import transforms
from matplotlib.backends.backend_agg import FigureCanvasAgg
class Exporter(object):
"""Matplotlib Exporter
Parameters
----------
renderer : Renderer object
The renderer object called by the exporter to create a figure
visualization. See mplexporter.Renderer for information on the
methods which should be defined within the renderer.
close_mpl : bool
If True (default), close the matplotlib figure as it is rendered. This
is useful for when the exporter is used within the notebook, or with
an interactive matplotlib backend.
"""
def __init__(self, renderer, close_mpl=True):
self.close_mpl = close_mpl
self.renderer = renderer
def run(self, fig):
"""
Run the exporter on the given figure
Parameters
---------
fig : matplotlib.Figure instance
The figure to export
"""
# Calling savefig executes the draw() command, putting elements
# in the correct place.
if fig.canvas is None:
fig.canvas = FigureCanvasAgg(fig)
fig.savefig(io.BytesIO(), format='png', dpi=fig.dpi)
if self.close_mpl:
import matplotlib.pyplot as plt
plt.close(fig)
self.crawl_fig(fig)
@staticmethod
def process_transform(transform, ax=None, data=None, return_trans=False,
force_trans=None):
"""Process the transform and convert data to figure or data coordinates
Parameters
----------
transform : matplotlib Transform object
The transform applied to the data
ax : matplotlib Axes object (optional)
The axes the data is associated with
data : ndarray (optional)
The array of data to be transformed.
return_trans : bool (optional)
If true, return the final transform of the data
force_trans : matplotlib.transform instance (optional)
If supplied, first force the data to this transform
Returns
-------
code : string
Code is either "data", "axes", "figure", or "display", indicating
the type of coordinates output.
transform : matplotlib transform
the transform used to map input data to output data.
Returned only if return_trans is True
new_data : ndarray
Data transformed to match the given coordinate code.
Returned only if data is specified
"""
if isinstance(transform, transforms.BlendedGenericTransform):
warnings.warn("Blended transforms not yet supported. "
"Zoom behavior may not work as expected.")
if force_trans is not None:
if data is not None:
data = (transform - force_trans).transform(data)
transform = force_trans
code = "display"
if ax is not None:
for (c, trans) in [("data", ax.transData),
("axes", ax.transAxes),
("figure", ax.figure.transFigure),
("display", transforms.IdentityTransform())]:
if transform.contains_branch(trans):
code, transform = (c, transform - trans)
break
if data is not None:
if return_trans:
return code, transform.transform(data), transform
else:
return code, transform.transform(data)
else:
if return_trans:
return code, transform
else:
return code
def crawl_fig(self, fig):
"""Crawl the figure and process all axes"""
with self.renderer.draw_figure(fig=fig,
props=utils.get_figure_properties(fig)):
for ax in fig.axes:
self.crawl_ax(ax)
def crawl_ax(self, ax):
"""Crawl the axes and process all elements within"""
with self.renderer.draw_axes(ax=ax,
props=utils.get_axes_properties(ax)):
for line in ax.lines:
self.draw_line(ax, line)
for text in ax.texts:
self.draw_text(ax, text)
for (text, ttp) in zip([ax.xaxis.label, ax.yaxis.label, ax.title],
["xlabel", "ylabel", "title"]):
if(hasattr(text, 'get_text') and text.get_text()):
self.draw_text(ax, text, force_trans=ax.transAxes,
text_type=ttp)
for artist in ax.artists:
# TODO: process other artists
if isinstance(artist, matplotlib.text.Text):
self.draw_text(ax, artist)
for patch in ax.patches:
self.draw_patch(ax, patch)
for collection in ax.collections:
self.draw_collection(ax, collection)
for image in ax.images:
self.draw_image(ax, image)
legend = ax.get_legend()
if legend is not None:
props = utils.get_legend_properties(ax, legend)
with self.renderer.draw_legend(legend=legend, props=props):
if props['visible']:
self.crawl_legend(ax, legend)
def crawl_legend(self, ax, legend):
"""
Recursively look through objects in legend children
"""
legendElements = list(utils.iter_all_children(legend._legend_box,
skipContainers=True))
legendElements.append(legend.legendPatch)
for child in legendElements:
# force a large zorder so it appears on top
child.set_zorder(1E6 + child.get_zorder())
try:
# What kind of object...
if isinstance(child, matplotlib.patches.Patch):
self.draw_patch(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.text.Text):
if not (child is legend.get_children()[-1]
and child.get_text() == 'None'):
self.draw_text(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.lines.Line2D):
self.draw_line(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.collections.Collection):
self.draw_collection(ax, child,
force_pathtrans=ax.transAxes)
else:
warnings.warn("Legend element %s not implemented" % child)
except NotImplementedError:
warnings.warn("Legend element %s not implemented" % child)
def draw_line(self, ax, line, force_trans=None):
"""Process a matplotlib line and call renderer.draw_line"""
coordinates, data = self.process_transform(line.get_transform(),
ax, line.get_xydata(),
force_trans=force_trans)
linestyle = utils.get_line_style(line)
if linestyle['dasharray'] is None:
linestyle = None
markerstyle = utils.get_marker_style(line)
if (markerstyle['marker'] in ['None', 'none', None]
or markerstyle['markerpath'][0].size == 0):
markerstyle = None
label = line.get_label()
if markerstyle or linestyle:
self.renderer.draw_marked_line(data=data, coordinates=coordinates,
linestyle=linestyle,
markerstyle=markerstyle,
label=label,
mplobj=line)
def draw_text(self, ax, text, force_trans=None, text_type=None):
"""Process a matplotlib text object and call renderer.draw_text"""
content = text.get_text()
if content:
transform = text.get_transform()
position = text.get_position()
coords, position = self.process_transform(transform, ax,
position,
force_trans=force_trans)
style = utils.get_text_style(text)
self.renderer.draw_text(text=content, position=position,
coordinates=coords,
text_type=text_type,
style=style, mplobj=text)
def draw_patch(self, ax, patch, force_trans=None):
"""Process a matplotlib patch object and call renderer.draw_path"""
vertices, pathcodes = utils.SVG_path(patch.get_path())
transform = patch.get_transform()
coordinates, vertices = self.process_transform(transform,
ax, vertices,
force_trans=force_trans)
linestyle = utils.get_path_style(patch, fill=patch.get_fill())
self.renderer.draw_path(data=vertices,
coordinates=coordinates,
pathcodes=pathcodes,
style=linestyle,
mplobj=patch)
def draw_collection(self, ax, collection,
force_pathtrans=None,
force_offsettrans=None):
"""Process a matplotlib collection and call renderer.draw_collection"""
(transform, transOffset,
offsets, paths) = collection._prepare_points()
offset_coords, offsets = self.process_transform(
transOffset, ax, offsets, force_trans=force_offsettrans)
path_coords = self.process_transform(
transform, ax, force_trans=force_pathtrans)
processed_paths = [utils.SVG_path(path) for path in paths]
processed_paths = [(self.process_transform(
transform, ax, path[0], force_trans=force_pathtrans)[1], path[1])
for path in processed_paths]
path_transforms = collection.get_transforms()
try:
# matplotlib 1.3: path_transforms are transform objects.
# Convert them to numpy arrays.
path_transforms = [t.get_matrix() for t in path_transforms]
except AttributeError:
# matplotlib 1.4: path transforms are already numpy arrays.
pass
styles = {'linewidth': collection.get_linewidths(),
'facecolor': collection.get_facecolors(),
'edgecolor': collection.get_edgecolors(),
'alpha': collection._alpha,
'zorder': collection.get_zorder()}
offset_dict = {"data": "before",
"screen": "after"}
offset_order = offset_dict[collection.get_offset_position()]
self.renderer.draw_path_collection(paths=processed_paths,
path_coordinates=path_coords,
path_transforms=path_transforms,
offsets=offsets,
offset_coordinates=offset_coords,
offset_order=offset_order,
styles=styles,
mplobj=collection)
def draw_image(self, ax, image):
"""Process a matplotlib image object and call renderer.draw_image"""
self.renderer.draw_image(imdata=utils.image_to_base64(image),
extent=image.get_extent(),
coordinates="data",
style={"alpha": image.get_alpha(),
"zorder": image.get_zorder()},
mplobj=image)
| bsd-3-clause |
bootphon/abx_numpy | examples/2D_normal_example.py | 1 | 1680 | """
This example apply the abx evaluation on 2D data sampled from gaussian distributions (diagonal covariance)
"""
from __future__ import print_function
import abx_numpy
import numpy as np
def sample_data(parameters):
data = []
n_samples = []
for klass in parameters:
sample = np.empty((klass['N'], 2))
for i in range(2):
sample[:, i] = np.random.normal(klass['mean'][i],
klass['std'][i],
klass['N'])
data.append(sample)
n_samples.append(klass['N'])
classes = np.repeat(np.arange(len(parameters)), repeats=n_samples)
data = np.concatenate(data, axis=0)
return classes, data
def plot_data(parameters, data):
import matplotlib.pyplot as plt
assert len(parameters) <= 3, 'Cannot plot more than 3 classes'
i = 0
colors = ['r', 'g', 'b']
for n_klass, klass in enumerate(parameters):
plt.plot(*data[i:i+klass['N']].T, marker='o',
color=colors[n_klass], ls='',
label='class {}'.format(n_klass+1))
i += klass['N']
plt.legend(numpoints=1)
plt.title('Normally distributed data points (diagonal covariance)')
plt.show()
def evaluate():
parameters = [
{'mean': [1, 1], 'std': [0.5, 1], 'N': 100},
{'mean': [1, 3], 'std': [1, 1], 'N': 150},
{'mean': [3, 2], 'std': [0.5, 0.5], 'N': 200}
]
classes, data = sample_data(parameters)
plot_data(parameters, data)
results = abx_numpy.abx(classes, data, lambda x, y: np.linalg.norm(x - y))
print(results)
if __name__ == '__main__':
evaluate()
| gpl-3.0 |
lmallin/coverage_test | python_venv/lib/python2.7/site-packages/pandas/tests/indexes/timedeltas/test_indexing.py | 13 | 4622 | import pytest
from datetime import timedelta
import pandas.util.testing as tm
from pandas import TimedeltaIndex, timedelta_range, compat, Index, Timedelta
class TestTimedeltaIndex(object):
_multiprocess_can_split_ = True
def test_insert(self):
idx = TimedeltaIndex(['4day', '1day', '2day'], name='idx')
result = idx.insert(2, timedelta(days=5))
exp = TimedeltaIndex(['4day', '1day', '5day', '2day'], name='idx')
tm.assert_index_equal(result, exp)
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([Timedelta('4day'), 'inserted', Timedelta('1day'),
Timedelta('2day')], name='idx')
assert not isinstance(result, TimedeltaIndex)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
idx = timedelta_range('1day 00:00:01', periods=3, freq='s', name='idx')
# preserve freq
expected_0 = TimedeltaIndex(['1day', '1day 00:00:01', '1day 00:00:02',
'1day 00:00:03'],
name='idx', freq='s')
expected_3 = TimedeltaIndex(['1day 00:00:01', '1day 00:00:02',
'1day 00:00:03', '1day 00:00:04'],
name='idx', freq='s')
# reset freq to None
expected_1_nofreq = TimedeltaIndex(['1day 00:00:01', '1day 00:00:01',
'1day 00:00:02', '1day 00:00:03'],
name='idx', freq=None)
expected_3_nofreq = TimedeltaIndex(['1day 00:00:01', '1day 00:00:02',
'1day 00:00:03', '1day 00:00:05'],
name='idx', freq=None)
cases = [(0, Timedelta('1day'), expected_0),
(-3, Timedelta('1day'), expected_0),
(3, Timedelta('1day 00:00:04'), expected_3),
(1, Timedelta('1day 00:00:01'), expected_1_nofreq),
(3, Timedelta('1day 00:00:05'), expected_3_nofreq)]
for n, d, expected in cases:
result = idx.insert(n, d)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
def test_delete(self):
idx = timedelta_range(start='1 Days', periods=5, freq='D', name='idx')
# prserve freq
expected_0 = timedelta_range(start='2 Days', periods=4, freq='D',
name='idx')
expected_4 = timedelta_range(start='1 Days', periods=4, freq='D',
name='idx')
# reset freq to None
expected_1 = TimedeltaIndex(
['1 day', '3 day', '4 day', '5 day'], freq=None, name='idx')
cases = {0: expected_0,
-5: expected_0,
-1: expected_4,
4: expected_4,
1: expected_1}
for n, expected in compat.iteritems(cases):
result = idx.delete(n)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
with pytest.raises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_delete_slice(self):
idx = timedelta_range(start='1 days', periods=10, freq='D', name='idx')
# prserve freq
expected_0_2 = timedelta_range(start='4 days', periods=7, freq='D',
name='idx')
expected_7_9 = timedelta_range(start='1 days', periods=7, freq='D',
name='idx')
# reset freq to None
expected_3_5 = TimedeltaIndex(['1 d', '2 d', '3 d',
'7 d', '8 d', '9 d', '10d'],
freq=None, name='idx')
cases = {(0, 1, 2): expected_0_2,
(7, 8, 9): expected_7_9,
(3, 4, 5): expected_3_5}
for n, expected in compat.iteritems(cases):
result = idx.delete(n)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
result = idx.delete(slice(n[0], n[-1] + 1))
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
| mit |
elijah513/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 244 | 2496 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/matplotlib/tests/test_simplification.py | 3 | 7188 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import numpy as np
import matplotlib
from matplotlib.testing.decorators import image_comparison, knownfailureif, cleanup
import matplotlib.pyplot as plt
from pylab import *
import numpy as np
from matplotlib import patches, path, transforms
from nose.tools import raises
import io
nan = np.nan
Path = path.Path
# NOTE: All of these tests assume that path.simplify is set to True
# (the default)
@image_comparison(baseline_images=['clipping'], remove_text=True)
def test_clipping():
t = np.arange(0.0, 2.0, 0.01)
s = np.sin(2*pi*t)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(t, s, linewidth=1.0)
ax.set_ylim((-0.20, -0.28))
@image_comparison(baseline_images=['overflow'], remove_text=True)
def test_overflow():
x = np.array([1.0,2.0,3.0,2.0e5])
y = np.arange(len(x))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x,y)
ax.set_xlim(xmin=2,xmax=6)
@image_comparison(baseline_images=['clipping_diamond'], remove_text=True)
def test_diamond():
x = np.array([0.0, 1.0, 0.0, -1.0, 0.0])
y = np.array([1.0, 0.0, -1.0, 0.0, 1.0])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y)
ax.set_xlim(xmin=-0.6, xmax=0.6)
ax.set_ylim(ymin=-0.6, ymax=0.6)
@cleanup
def test_noise():
np.random.seed(0)
x = np.random.uniform(size=(5000,)) * 50
fig = plt.figure()
ax = fig.add_subplot(111)
p1 = ax.plot(x, solid_joinstyle='round', linewidth=2.0)
path = p1[0].get_path()
transform = p1[0].get_transform()
path = transform.transform_path(path)
simplified = list(path.iter_segments(simplify=(800, 600)))
assert len(simplified) == 3884
@cleanup
def test_sine_plus_noise():
np.random.seed(0)
x = np.sin(np.linspace(0, np.pi * 2.0, 1000)) + np.random.uniform(size=(1000,)) * 0.01
fig = plt.figure()
ax = fig.add_subplot(111)
p1 = ax.plot(x, solid_joinstyle='round', linewidth=2.0)
path = p1[0].get_path()
transform = p1[0].get_transform()
path = transform.transform_path(path)
simplified = list(path.iter_segments(simplify=(800, 600)))
assert len(simplified) == 876
@image_comparison(baseline_images=['simplify_curve'], remove_text=True)
def test_simplify_curve():
pp1 = patches.PathPatch(
Path([(0, 0), (1, 0), (1, 1), (nan, 1), (0, 0), (2, 0), (2, 2), (0, 0)],
[Path.MOVETO, Path.CURVE3, Path.CURVE3, Path.CURVE3, Path.CURVE3, Path.CURVE3, Path.CURVE3, Path.CLOSEPOLY]),
fc="none")
fig = plt.figure()
ax = fig.add_subplot(111)
ax.add_patch(pp1)
ax.set_xlim((0, 2))
ax.set_ylim((0, 2))
@image_comparison(baseline_images=['hatch_simplify'], remove_text=True)
def test_hatch():
fig = plt.figure()
ax = fig.add_subplot(111)
ax.add_patch(Rectangle((0, 0), 1, 1, fill=False, hatch="/"))
ax.set_xlim((0.45, 0.55))
ax.set_ylim((0.45, 0.55))
@image_comparison(baseline_images=['fft_peaks'], remove_text=True)
def test_fft_peaks():
fig = plt.figure()
t = arange(65536)
ax = fig.add_subplot(111)
p1 = ax.plot(abs(fft(sin(2*pi*.01*t)*blackman(len(t)))))
path = p1[0].get_path()
transform = p1[0].get_transform()
path = transform.transform_path(path)
simplified = list(path.iter_segments(simplify=(800, 600)))
assert len(simplified) == 20
@cleanup
def test_start_with_moveto():
# Should be entirely clipped away to a single MOVETO
data = b"""
ZwAAAAku+v9UAQAA+Tj6/z8CAADpQ/r/KAMAANlO+v8QBAAAyVn6//UEAAC6ZPr/2gUAAKpv+v+8
BgAAm3r6/50HAACLhfr/ewgAAHyQ+v9ZCQAAbZv6/zQKAABepvr/DgsAAE+x+v/lCwAAQLz6/7wM
AAAxx/r/kA0AACPS+v9jDgAAFN36/zQPAAAF6Pr/AxAAAPfy+v/QEAAA6f36/5wRAADbCPv/ZhIA
AMwT+/8uEwAAvh77//UTAACwKfv/uRQAAKM0+/98FQAAlT/7/z0WAACHSvv//RYAAHlV+/+7FwAA
bGD7/3cYAABea/v/MRkAAFF2+//pGQAARIH7/6AaAAA3jPv/VRsAACmX+/8JHAAAHKL7/7ocAAAP
rfv/ah0AAAO4+/8YHgAA9sL7/8QeAADpzfv/bx8AANzY+/8YIAAA0OP7/78gAADD7vv/ZCEAALf5
+/8IIgAAqwT8/6kiAACeD/z/SiMAAJIa/P/oIwAAhiX8/4QkAAB6MPz/HyUAAG47/P+4JQAAYkb8
/1AmAABWUfz/5SYAAEpc/P95JwAAPmf8/wsoAAAzcvz/nCgAACd9/P8qKQAAHIj8/7cpAAAQk/z/
QyoAAAWe/P/MKgAA+aj8/1QrAADus/z/2isAAOO+/P9eLAAA2Mn8/+AsAADM1Pz/YS0AAMHf/P/g
LQAAtur8/10uAACr9fz/2C4AAKEA/f9SLwAAlgv9/8ovAACLFv3/QDAAAIAh/f+1MAAAdSz9/ycx
AABrN/3/mDEAAGBC/f8IMgAAVk39/3UyAABLWP3/4TIAAEFj/f9LMwAANm79/7MzAAAsef3/GjQA
ACKE/f9+NAAAF4/9/+E0AAANmv3/QzUAAAOl/f+iNQAA+a/9/wA2AADvuv3/XDYAAOXF/f+2NgAA
29D9/w83AADR2/3/ZjcAAMfm/f+7NwAAvfH9/w44AACz/P3/XzgAAKkH/v+vOAAAnxL+//04AACW
Hf7/SjkAAIwo/v+UOQAAgjP+/905AAB5Pv7/JDoAAG9J/v9pOgAAZVT+/606AABcX/7/7zoAAFJq
/v8vOwAASXX+/207AAA/gP7/qjsAADaL/v/lOwAALZb+/x48AAAjof7/VTwAABqs/v+LPAAAELf+
/788AAAHwv7/8TwAAP7M/v8hPQAA9df+/1A9AADr4v7/fT0AAOLt/v+oPQAA2fj+/9E9AADQA///
+T0AAMYO//8fPgAAvRn//0M+AAC0JP//ZT4AAKsv//+GPgAAojr//6U+AACZRf//wj4AAJBQ///d
PgAAh1v///c+AAB+Zv//Dz8AAHRx//8lPwAAa3z//zk/AABih///TD8AAFmS//9dPwAAUJ3//2w/
AABHqP//ej8AAD6z//+FPwAANb7//48/AAAsyf//lz8AACPU//+ePwAAGt///6M/AAAR6v//pj8A
AAj1//+nPwAA/////w=="""
import base64
if hasattr(base64, 'encodebytes'):
# Python 3 case
decodebytes = base64.decodebytes
else:
# Python 2 case
decodebytes = base64.decodestring
verts = np.fromstring(decodebytes(data), dtype='<i4')
verts = verts.reshape((len(verts) // 2, 2))
path = Path(verts)
segs = path.iter_segments(transforms.IdentityTransform(), clip=(0.0, 0.0, 100.0, 100.0))
segs = list(segs)
assert len(segs) == 1
assert segs[0][1] == Path.MOVETO
@cleanup
@raises(OverflowError)
def test_throw_rendering_complexity_exceeded():
rcParams['path.simplify'] = False
xx = np.arange(200000)
yy = np.random.rand(200000)
yy[1000] = np.nan
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(xx, yy)
try:
fig.savefig(io.BytesIO())
finally:
rcParams['path.simplify'] = True
@image_comparison(baseline_images=['clipper_edge'], remove_text=True)
def test_clipper():
dat = (0, 1, 0, 2, 0, 3, 0, 4, 0, 5)
fig = plt.figure(figsize=(2, 1))
fig.subplots_adjust(left = 0, bottom = 0, wspace = 0, hspace = 0)
ax = fig.add_axes((0, 0, 1.0, 1.0), ylim = (0, 5), autoscale_on = False)
ax.plot(dat)
ax.xaxis.set_major_locator(plt.MultipleLocator(1))
ax.yaxis.set_major_locator(plt.MultipleLocator(1))
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_xlim(5, 9)
@image_comparison(baseline_images=['para_equal_perp'], remove_text=True)
def test_para_equal_perp():
x = np.array([0, 1, 2, 1, 0, -1, 0, 1] + [1] * 128)
y = np.array([1, 1, 2, 1, 0, -1, 0, 0] + [0] * 128)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x + 1, y + 1)
ax.plot(x + 1, y + 1, 'ro')
@image_comparison(baseline_images=['clipping_with_nans'])
def test_clipping_with_nans():
x = np.linspace(0, 3.14 * 2, 3000)
y = np.sin(x)
x[::100] = np.nan
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y)
ax.set_ylim(-0.25, 0.25)
if __name__=='__main__':
import nose
nose.runmodule(argv=['-s','--with-doctest'], exit=False)
| mit |
dallascard/guac | core/old_extractors/extract_non-distributional_values.py | 1 | 4284 | import os
import sys
import gzip
from optparse import OptionParser
import numpy as np
import pandas as pd
from scipy import sparse
from ..old_extractors import features
from ..old_extractors.features import tokenizer
from ..util import file_handling as fh, defines
def main():
# Handle input options and arguments
usage = "%prog <vector-file.txt.gz>"
parser = OptionParser(usage=usage)
parser.add_option('-p', dest='prefix', default='_ndv_',
help='Token prefix; default=%default')
(options, args) = parser.parse_args()
prefix = options.prefix
input_dir = defines.data_raw_labels_dir
files = fh.ls(input_dir, '*.csv')
sys.exit("THIS IS NOT WORKING RIGHT NOW")
if len(args) < 1:
sys.exit('Please specify a set of vectors')
vector_filename = args[0]
# load the vectors from a file and determine their size
print "Loading vectors"
vector_filename = os.path.join(defines.non_distributional_vectors_dir, vector_filename)
if not os.path.exists(vector_filename):
print vector_filename
sys.exit("Cannot find vector file")
words = get_word_subset(files)
vectors = {}
vector_size = 0
with gzip.open(vector_filename, 'r') as input_file:
count = 0
for line in input_file:
parts = line.split()
word = parts[0]
vector_size = len(parts[1:])
count += 1
if word in words:
print word, count
vector = sparse.csr_matrix(parts[1:], dtype=np.int8)
vectors[word] = vector
print "Extracting vector values:"
data_matrices = []
for f in files:
print f
data_matrices.append(extract_vector_values(f, vectors, vector_size, prefix))
data = pd.concat(data_matrices, axis=0)
non_zero_cols = data.sum(axis=0) > 0
print data.shape
data = data.loc[:, non_zero_cols]
print data.shape
output_dir = fh.makedirs(defines.data_values_dir)
output_filename = fh.make_filename(output_dir, get_feature_name(vector_filename), 'csv')
data.to_csv(output_filename)
# write default function definition
features.make_feature_definition(get_feature_name(vector_filename), prefix,
filename=get_feature_name(vector_filename)+'_default',
binarize=False, min_doc_threshold=0,
feature_type='values')
def get_feature_name(vector_filename):
return 'non-distributional' + fh.get_basename_wo_ext(vector_filename)
def get_word_subset(f):
print "Building subset of words"
words = set()
responses = fh.read_json(defines.data_raw_text_file)
Y = fh.read_csv(f)
rids = Y.index
for rid in rids:
text = responses[rid].lower()
tokens = []
paragraphs = text.split('/')
paragraphs = [p for p in paragraphs if p != '']
for p in paragraphs:
sentences = tokenizer.split_sentences(p)
for s in sentences:
tokens = tokens + tokenizer.make_ngrams(s, n=1)
words.update(set(tokens))
print len(words), "words"
return words
def extract_vector_values(input_filename, vectors, vector_size, prefix):
Y = fh.read_csv(input_filename)
rids = Y.index
n_items = len(rids)
responses = fh.read_json(defines.data_raw_text_file)
keys = vectors.keys()
col_names = [prefix + str(v) for v in np.arange(vector_size)]
data = pd.DataFrame(np.zeros([n_items, vector_size]), index=rids, columns=col_names)
for rid in rids:
text = responses[rid].lower()
tokens = []
paragraphs = text.split('/')
paragraphs = [p for p in paragraphs if p != '']
for p in paragraphs:
sentences = tokenizer.split_sentences(p)
for s in sentences:
tokens = tokens + tokenizer.make_ngrams(s, n=1)
item_vector = sparse.csr_matrix(np.zeros([1, vector_size], dtype=np.int8))
for t in tokens:
if t in vectors:
vector = vectors[t]
item_vector += vector
data.loc[rid] = item_vector.todense()
return data
if __name__ == '__main__':
main()
| apache-2.0 |
wittawatj/kernel-gof | kgof/test/test_kernel.py | 1 | 2951 | """
Module for testing kernel module.
"""
__author__ = 'wittawat'
import autograd
import autograd.numpy as np
import matplotlib.pyplot as plt
import kgof.data as data
import kgof.density as density
import kgof.util as util
import kgof.kernel as kernel
import kgof.goftest as gof
import kgof.glo as glo
import scipy.stats as stats
import numpy.testing as testing
import unittest
class TestKGauss(unittest.TestCase):
def setUp(self):
pass
def test_basic(self):
"""
Nothing special. Just test basic things.
"""
# sample
n = 10
d = 3
with util.NumpySeedContext(seed=29):
X = np.random.randn(n, d)*3
k = kernel.KGauss(sigma2=1)
K = k.eval(X, X)
self.assertEqual(K.shape, (n, n))
self.assertTrue(np.all(K >= 0-1e-6))
self.assertTrue(np.all(K <= 1+1e-6), 'K not bounded by 1')
def test_pair_gradX_Y(self):
# sample
n = 11
d = 3
with util.NumpySeedContext(seed=20):
X = np.random.randn(n, d)*4
Y = np.random.randn(n, d)*2
k = kernel.KGauss(sigma2=2.1)
# n x d
pair_grad = k.pair_gradX_Y(X, Y)
loop_grad = np.zeros((n, d))
for i in range(n):
for j in range(d):
loop_grad[i, j] = k.gradX_Y(X[[i], :], Y[[i], :], j)
testing.assert_almost_equal(pair_grad, loop_grad)
def test_gradX_y(self):
n = 10
with util.NumpySeedContext(seed=10):
for d in [1, 3]:
y = np.random.randn(d)*2
X = np.random.rand(n, d)*3
sigma2 = 1.3
k = kernel.KGauss(sigma2=sigma2)
# n x d
G = k.gradX_y(X, y)
# check correctness
K = k.eval(X, y[np.newaxis, :])
myG = -K/sigma2*(X-y)
self.assertEqual(G.shape, myG.shape)
testing.assert_almost_equal(G, myG)
def test_gradXY_sum(self):
n = 11
with util.NumpySeedContext(seed=12):
for d in [3, 1]:
X = np.random.randn(n, d)
sigma2 = 1.4
k = kernel.KGauss(sigma2=sigma2)
# n x n
myG = np.zeros((n, n))
K = k.eval(X, X)
for i in range(n):
for j in range(n):
diffi2 = np.sum( (X[i, :] - X[j, :])**2 )
#myG[i, j] = -diffi2*K[i, j]/(sigma2**2)+ d*K[i, j]/sigma2
myG[i, j] = K[i, j]/sigma2*(d - diffi2/sigma2)
# check correctness
G = k.gradXY_sum(X, X)
self.assertEqual(G.shape, myG.shape)
testing.assert_almost_equal(G, myG)
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
| mit |
timqian/sms-tools | lectures/8-Sound-transformations/plots-code/stftMorph-orchestra.py | 3 | 2092 | import numpy as np
import time, os, sys
from scipy.signal import hamming, resample
import matplotlib.pyplot as plt
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/transformations/'))
import dftModel as DFT
import utilFunctions as UF
import stftTransformations as STFTT
import stochasticModel as STOC
import math
import stft as STFT
(fs, x1) = UF.wavread('../../../sounds/orchestra.wav')
(fs, x2) = UF.wavread('../../../sounds/speech-male.wav')
w1 = np.hamming(1024)
N1 = 1024
H1 = 256
w2 = np.hamming(1024)
N2 = 1024
smoothf = .2
balancef = 0.5
y = STFTT.stftMorph(x1, x2, fs, w1, N1, w2, N2, H1, smoothf, balancef)
L = int(x1.size/H1)
H2 = int(x2.size/L)
mX2 = STOC.stochasticModelAnal(x2,H2,H2*2, smoothf)
mX,pX = STFT.stftAnal(x1, fs, w1, N1, H1)
mY,pY = STFT.stftAnal(y, fs, w1, N1, H1)
maxplotfreq = 10000.0
plt.figure(1, figsize=(12, 9))
plt.subplot(311)
numFrames = int(mX[:,0].size)
frmTime = H1*np.arange(numFrames)/float(fs)
binFreq = fs*np.arange(N1*maxplotfreq/fs)/N1
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:N1*maxplotfreq/fs+1]))
plt.title('mX (orchestra.wav)')
plt.autoscale(tight=True)
plt.subplot(312)
numFrames = int(mX2[:,0].size)
frmTime = H2*np.arange(numFrames)/float(fs)
N = 2*mX2[0,:].size
binFreq = fs*np.arange(N*maxplotfreq/fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX2[:,:N*maxplotfreq/fs+1]))
plt.title('mX2 (speech-male.wav)')
plt.autoscale(tight=True)
plt.subplot(313)
numFrames = int(mY[:,0].size)
frmTime = H1*np.arange(numFrames)/float(fs)
binFreq = fs*np.arange(N1*maxplotfreq/fs)/N1
plt.pcolormesh(frmTime, binFreq, np.transpose(mY[:,:N1*maxplotfreq/fs+1]))
plt.title('mY')
plt.autoscale(tight=True)
plt.tight_layout()
UF.wavwrite(y, fs, 'orchestra-speech-stftMorph.wav')
plt.savefig('stftMorph-orchestra.png')
plt.show() | agpl-3.0 |
schets/scikit-learn | sklearn/decomposition/tests/test_factor_analysis.py | 222 | 3055 | # Author: Christian Osendorfer <osendorf@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Licence: BSD3
import numpy as np
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
def test_factor_analysis():
# Test FactorAnalysis ability to recover the data covariance structure
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
assert_raises(ValueError, FactorAnalysis, svd_method='foo')
fa_fail = FactorAnalysis()
fa_fail.svd_method = 'foo'
assert_raises(ValueError, fa_fail.fit, X)
fas = []
for method in ['randomized', 'lapack']:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert_equal(X_t.shape, (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert_greater(diff, 0., 'Log likelihood dif not increase')
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_less(diff, 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ['loglike_', 'components_', 'noise_variance_']:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
assert_warns(ConvergenceWarning, fa1.fit, X)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
| bsd-3-clause |
mardub1635/chiasmusDetector | Score.py | 1 | 2538 | # -*- coding: utf-8 -*-
from sklearn import linear_model
from sklearn.metrics import confusion_matrix
import numpy as np
import sys
import dill
import scipy.io.arff as scarff
from sklearn import cross_validation
from sklearn import metrics
from ArffData import ArffData
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import StratifiedShuffleSplit
from scipy import stats
class Score:
"""
Load model
"""
print dill.load( open( "model.pkl", "rb" ) )
clf=dill.load( open( "model.pkl", "rb" ) )['model']
#y_scores=runModel(X,X,y,y,clf,None)
print "Model Weights:"
weightfile=open("weights","r")
weightList=[float(weight[0:weight.index(":")]) for weight in weightfile.read().splitlines()[1:]]
weightfile=open("weights","r")
weightLines=weightfile.read().splitlines()
#print weightList
#print weightLines
featureList=[feat[feat.index(":")+1:feat.index("/")] for feat in weightLines[1:]]
#print featureList
weightfile.close()
def indexBadFeat(self,weightList,featureList):
"""
(self,list)->tuple(list, list)
calls the arffLoader/converter for the arfFile of the current object.
Returns the list of features names as a list and the list of useless feqture position
in the arff file. That keeps track of the useless feature position for later elimination.
Elimination via pop() the list must be in decrease order
"@Arff file
Feat0
Feat1
Feat_TagAB
Feat_TagABCD
>>>indexBadFeat(self,[Feat2,Feat_TagAB])
([2,1],[Feat0,Feat_TagABCD])
"""
#data=scarff.loadarff(self.arffFile)
indexList=[]#The features are present in a certain order.
#Some are useless. Their index is listed for elimination later
usefulFeat=[]
i=0
#print "the weightList:",weightList
#print "the featureList:",featureList
for featureName in featureList:
if weightList[i]==0:
indexList.append(i)
else:
usefulFeat.append(featureName)
i+=1
indexList.reverse()
#print usefulFeat
return (indexList,usefulFeat)
def selFeat(self,indexFeat,row):
"""
returns the predictor instance cleaned from its useless data/features
>>>selFeat([0,1,0,0.3333,])
[0,0.3333]
"""
for i in indexFeat:
row.pop(i)
return row
def scoreFeat(self,featList):
indexList=self.indexBadFeat(self.weightList,self.featureList)[0]
score=0
row=self.selFeat(indexList,featList)
score=self.clf.predict_proba([row])[:,1][0]
#print score
#i=0
# while i<len(featList):
# score=score+featList[i]*self.weightList[i]
# i=i+1
return score#+10
| gpl-3.0 |
Jimmy-Morzaria/scikit-learn | sklearn/metrics/regression.py | 27 | 9558 | """Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array-like of shape = [n_samples, n_outputs]
Ground truth (correct) target values.
y_pred : array-like of shape = [n_samples, n_outputs]
Estimated target values.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
y_type = 'continuous' if y_true.shape[1] == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred
def _average_and_variance(values, sample_weight=None):
"""
Compute the (weighted) average and variance.
Parameters
----------
values : array-like of shape = [n_samples] or [n_samples, n_outputs]
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average : float
The weighted average
variance : float
The weighted variance
"""
values = np.asarray(values)
if values.ndim == 1:
values = values.reshape((-1, 1))
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
if sample_weight.ndim == 1:
sample_weight = sample_weight.reshape((-1, 1))
average = np.average(values, weights=sample_weight)
variance = np.average((values - average)**2, weights=sample_weight)
return average, variance
def mean_absolute_error(y_true, y_pred, sample_weight=None):
"""Mean absolute error regression loss
Parameters
----------
y_true : array-like of shape = [n_samples] or [n_samples, n_outputs]
Ground truth (correct) target values.
y_pred : array-like of shape = [n_samples] or [n_samples, n_outputs]
Estimated target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
"""
y_type, y_true, y_pred = _check_reg_targets(y_true, y_pred)
return np.average(np.abs(y_pred - y_true).mean(axis=1),
weights=sample_weight)
def mean_squared_error(y_true, y_pred, sample_weight=None):
"""Mean squared error regression loss
Parameters
----------
y_true : array-like of shape = [n_samples] or [n_samples, n_outputs]
Ground truth (correct) target values.
y_pred : array-like of shape = [n_samples] or [n_samples, n_outputs]
Estimated target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
"""
y_type, y_true, y_pred = _check_reg_targets(y_true, y_pred)
return np.average(((y_pred - y_true) ** 2).mean(axis=1),
weights=sample_weight)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Parameters
----------
y_true : array-like of shape = [n_samples] or [n_samples, n_outputs]
Ground truth (correct) target values.
y_pred : array-like of shape = [n_samples] or [n_samples, n_outputs]
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred = _check_reg_targets(y_true, y_pred)
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred, sample_weight=None):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Parameters
----------
y_true : array-like
Ground truth (correct) target values.
y_pred : array-like
Estimated target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
The explained variance.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
"""
y_type, y_true, y_pred = _check_reg_targets(y_true, y_pred)
if y_type != "continuous":
raise ValueError("{0} is not supported".format(y_type))
_, numerator = _average_and_variance(y_true - y_pred, sample_weight)
_, denominator = _average_and_variance(y_true, sample_weight)
if denominator == 0.0:
if numerator == 0.0:
return 1.0
else:
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
return 0.0
return 1 - numerator / denominator
def r2_score(y_true, y_pred, sample_weight=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0, lower values are worse.
Parameters
----------
y_true : array-like of shape = [n_samples] or [n_samples, n_outputs]
Ground truth (correct) target values.
y_pred : array-like of shape = [n_samples] or [n_samples, n_outputs]
Estimated target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
The R^2 score.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred = _check_reg_targets(y_true, y_pred)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(dtype=np.float64)
if denominator == 0.0:
if numerator == 0.0:
return 1.0
else:
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
return 0.0
return 1 - numerator / denominator
| bsd-3-clause |
fclesio/learning-space | Python/ims24/oldfolders/module/transform.py | 1 | 8392 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
import psycopg2
import os
import logging
import time
from sqlalchemy import text
from sqlalchemy import create_engine
host = os.environ["IMS_HOSTNAME"]
user = os.environ["IMS_USERNAME"]
password = os.environ["IMS_PASSWORD"]
dbname = os.environ["IMS_DB_NAME"]
port = os.environ["IMS_PORT"]
engine_string = "postgresql://%s:%s@%s:%s/%s" % (user, password, host, port, dbname)
engine = create_engine(engine_string)
class Transform(object):
def __init__(self):
super(Transform, self).__init__()
@staticmethod
def load_csv():
global df
df = pd.read_csv("result.csv")
@staticmethod
def cleasing_1_addresses():
df[["street", "row"]] = pd.DataFrame(
df["Address"].str.split(",", 1).tolist(), columns=["street", "row"]
)
df[["space", "postal_code", "city", "District"]] = pd.DataFrame(
df["row"].str.split(" ", n=3, expand=True)
)
df[["street_clean", "house_number"]] = pd.DataFrame(
df["street"].str.split(" ", 1).tolist(), columns=["street", "row"]
)
@staticmethod
def cleasing_2_flat_size():
# Adjustments in Flat size
df["Wohnflaeche"] = df["Wohnflaeche"].str.replace("m²", "")
df["Wohnflaeche"] = df["Wohnflaeche"].str.replace(" ", "")
df["Wohnflaeche"] = df["Wohnflaeche"].str.replace(",", ".")
df["Wohnflaeche"] = pd.to_numeric(df["Wohnflaeche"])
@staticmethod
def cleasing_3_floor_number():
# Adjustments in Floor
df["Etage"] = df["Etage"].astype(str).str[0]
@staticmethod
def cleasing_4_set_columns():
df.columns = [
"title",
"address",
"apartment_type",
"floor",
"square_meters",
"availability",
"room",
"sleep_room",
"bathroom",
"district",
"animals_allowed",
"base_rent",
"aditional_costs",
"heater_tax",
"total_amount",
"initial_deposit",
"agency",
"url",
"street",
"raw_row",
"space",
"postal_code",
"city",
"street_clean",
"house_number",
]
@staticmethod
def cleasing_5_drop_unecessary_columns():
df.drop(["space"], axis=1)
@staticmethod
def cleasing_6_adjust_flat_size():
df["base_rent"] = df["base_rent"].str.replace("€", "")
df["base_rent"] = df["base_rent"].str.replace(" ", "")
df["base_rent"] = df["base_rent"].str.replace("+", "")
df["base_rent"] = df["base_rent"].str.replace(" ", "")
df["base_rent"] = df["base_rent"].str.replace(".", "")
df["base_rent"] = df["base_rent"].str.replace(",", ".")
df["base_rent"] = df["base_rent"].str.strip()
@staticmethod
def cleasing_7_adjust_aditional_costs():
df["aditional_costs"] = df["aditional_costs"].str.replace("€", "")
df["aditional_costs"] = df["aditional_costs"].str.replace(" ", "")
df["aditional_costs"] = df["aditional_costs"].str.replace("+", "")
df["aditional_costs"] = df["aditional_costs"].str.replace(" ", "")
df["aditional_costs"] = df["aditional_costs"].str.replace(".", "")
df["aditional_costs"] = df["aditional_costs"].str.replace(",", ".")
df["aditional_costs"] = df["aditional_costs"].str.replace("keineAngabe", "")
df["aditional_costs"] = df["aditional_costs"].str.strip()
@staticmethod
def cleasing_8_adjust_heater_tax():
df["heater_tax"] = df["heater_tax"].str.replace("€", "")
df["heater_tax"] = df["heater_tax"].str.replace(" ", "")
df["heater_tax"] = df["heater_tax"].str.replace("+", "")
df["heater_tax"] = df["heater_tax"].str.replace(" ", "")
df["heater_tax"] = df["heater_tax"].str.replace(".", "")
df["heater_tax"] = df["heater_tax"].str.replace(",", ".")
df["heater_tax"] = df["heater_tax"].str.replace("inNebenkostenenthalten", "")
df["heater_tax"] = df["heater_tax"].str.replace(
"nichtinNebenkostenenthalten", ""
)
df["heater_tax"] = df["heater_tax"].str.replace("keineAngabe", "")
df["heater_tax"] = df["heater_tax"].str.replace("inkl.", "")
df["heater_tax"] = df["heater_tax"].str.replace("nicht", "")
df["heater_tax"] = df["heater_tax"].str.strip()
@staticmethod
def cleasing_9_adjust_total_amount():
df["total_amount"] = df["total_amount"].str.replace("€", "")
df["total_amount"] = df["total_amount"].str.replace(" ", "")
df["total_amount"] = df["total_amount"].str.replace("+", "")
df["total_amount"] = df["total_amount"].str.replace(" ", "")
df["total_amount"] = df["total_amount"].str.strip()
df["total_amount"] = df["total_amount"].str.replace(".", "")
df["total_amount"] = df["total_amount"].str.replace(",", ".")
df["total_amount"] = df["total_amount"].str.replace("zzglHeizkosten", "")
df["total_amount"] = df["total_amount"].str.replace(
"zzglNebenkosten&Heizkosten", ""
)
df["total_amount"] = df["total_amount"].str.replace("(", "")
df["total_amount"] = df["total_amount"].str.replace(")", "")
df["total_amount"] = df["total_amount"].str.replace("zzglNebenkosten", "")
@staticmethod
def cleasing_10_adjust_availability():
df["availability"] = df["availability"].str.replace(r"[a-zA-Z]", "")
df["availability"] = df["availability"].str.strip()
df["availability"] = df["availability"].str.replace("!", "")
df["availability"] = df["availability"].str.replace("/", "")
df["availability"] = df["availability"].str.replace("ü", "")
df["availability"] = df["availability"].str.replace("ä", "")
df["availability"] = df["availability"].str.replace(" ", "")
@staticmethod
def cleasing_11_adjust_room():
df["room"] = df["room"].str.replace(" ", "")
df["room"] = df["room"].str.replace(",", ".")
@staticmethod
def cleasing_12_adjust_city():
df["city"] = df["city"].str.replace(",", ".")
df["city"] = df["city"].str.replace(" ", "")
df["city"] = df["city"].str.replace(".", "")
@staticmethod
def cleasing_13_adjust_animals_allowed():
df["animals_allowed"] = df["animals_allowed"].str.replace(
"Nach Vereinbarung", "Arrangement"
)
df["animals_allowed"] = df["animals_allowed"].str.replace("Ja", "Yes")
df["animals_allowed"] = df["animals_allowed"].str.replace("Nein", "No")
df["animals_allowed"] = df["animals_allowed"].str.replace(" ", "")
@staticmethod
def soft_delete_raw_data():
engine.execute(
text(
"update ods.extracted_raw_table set deleted = true where deleted = false;"
).execution_options(autocommit=True)
)
@staticmethod
def load_transformed_data():
df.to_sql(
"extracted_raw_table", engine, schema="ods", if_exists="append", index=False
)
@staticmethod
def remove_raw_extracted_data():
os.remove("result.csv")
def main():
transform = Transform()
transform.load_csv()
transform.cleasing_1_addresses()
transform.cleasing_2_flat_size()
transform.cleasing_3_floor_number()
transform.cleasing_4_set_columns()
transform.cleasing_5_drop_unecessary_columns()
transform.cleasing_6_adjust_flat_size()
transform.cleasing_7_adjust_aditional_costs()
transform.cleasing_8_adjust_heater_tax()
transform.cleasing_9_adjust_total_amount()
transform.cleasing_10_adjust_availability()
transform.cleasing_11_adjust_room()
transform.cleasing_12_adjust_city()
transform.cleasing_13_adjust_animals_allowed()
transform.soft_delete_raw_data()
transform.load_transformed_data()
transform.remove_raw_extracted_data()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
logging.info("Transformations Started ...")
start_time = time.time()
main()
logging.info("Transformations finished ...")
elapsed_time = time.time() - start_time
logging.info("Elapsed Time: %s", elapsed_time)
| gpl-2.0 |
kazuar/ThinkStats2 | code/hinc.py | 67 | 1494 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import numpy as np
import pandas
import thinkplot
import thinkstats2
def Clean(s):
"""Converts dollar amounts to integers."""
try:
return int(s.lstrip('$').replace(',', ''))
except ValueError:
if s == 'Under':
return 0
elif s == 'over':
return np.inf
return None
def ReadData(filename='hinc06.csv'):
"""Reads filename and returns populations in thousands
filename: string
returns: pandas Series of populations in thousands
"""
data = pandas.read_csv(filename, header=None, skiprows=9)
cols = data[[0, 1]]
res = []
for _, row in cols.iterrows():
label, freq = row.values
freq = int(freq.replace(',', ''))
t = label.split()
low, high = Clean(t[0]), Clean(t[-1])
res.append((high, freq))
df = pandas.DataFrame(res)
# correct the first range
df[0][0] -= 1
# compute the cumulative sum of the freqs
df[2] = df[1].cumsum()
# normalize the cumulative freqs
total = df[2][41]
df[3] = df[2] / total
# add column names
df.columns = ['income', 'freq', 'cumsum', 'ps']
return df
def main():
df = ReadData()
print(df)
if __name__ == "__main__":
main()
| gpl-3.0 |
MartinDelzant/scikit-learn | examples/ensemble/plot_adaboost_multiclass.py | 354 | 4124 | """
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1] and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1] algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
| bsd-3-clause |
tedoreve/tools | maxwell.py | 1 | 1264 | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 20 12:11:24 2018
@author: tedoreve
"""
import matplotlib.pyplot as plt
import numpy as np
import astropy.constants as con
import astropy.units as un
def maxwell(T,m,v):
a = np.sqrt(con.k_B*T/con.m_e)
print('a=',a.to('km/s'))
return np.sqrt(2/np.pi)*v**2/a**3 * np.exp(-v**2/2/a**2)
def ptotal(v):
dv = v[1]-v[0]
p = maxwell(T,con.m_e,v).to('s/km')*dv
return np.sum(p)
def accelerator(p,n,v):
fin = np.zeros(len(v))
for i in range(len(v)):
fin[i] = maxwell(T,con.m_e,v[i])*(1-p) + fin[i-1]*p
return fin
if __name__ == '__main__':
T = 50*un.K #ISM temperature before shock wave arrival
p = 0.5 #possibility that the particle remain within the accelerator after each cycle
n = 1 #the cycle times
dv= 1 #speed addition after each cycle
fig, ax = plt.subplots(1, 1)
v = np.linspace(0,200, (201-0)/dv)*un.km/un.s
ax.plot(v, maxwell(T,con.m_e,v).to('s/km'),'r-', lw=5, alpha=0.6, label='maxwell pdf')
ax.set_xlabel('v (km/s)')
ax.set_ylabel('pdf (possibility per km/s)')
ax.set_title('Maxwell Speed Distribution')
plt.legend() | mit |
rishikksh20/scikit-learn | examples/decomposition/plot_faces_decomposition.py | 42 | 4843 | """
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - PCA using randomized SVD',
decomposition.PCA(n_components=n_components, svd_solver='randomized',
whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', tol=5e-3),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
# Plot an image representing the pixelwise variance provided by the
# estimator e.g its noise_variance_ attribute. The Eigenfaces estimator,
# via the PCA decomposition, also provides a scalar noise_variance_
# (the mean of pixelwise variance) that cannot be displayed as an image
# so we skip it.
if (hasattr(estimator, 'noise_variance_') and
estimator.noise_variance_.ndim > 0): # Skip the Eigenfaces case
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
| bsd-3-clause |
ideaplat/Tback | machinelearning/randomforestclassifier/model.py | 1 | 4250 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from data.dbaccess import normalize
from data.db import get_db_session, Pinkunhu2015
class RandomForestModel(object):
""" 使用随机森林模型预测是否脱贫 """
# 提取的属性
features = [
'tv', 'washing_machine', 'fridge',
'reason', 'is_danger_house', 'is_back_poor', 'is_debt', 'standard',
'arable_land', 'debt_total', 'living_space', 'member_count',
# 'person_year_total_income', 'year_total_income',
'subsidy_total', 'wood_land', 'xin_nong_he_total', 'xin_yang_lao_total',
'call_number', 'bank_number', 'help_plan'
]
# 验证的目标
target = 'poor_status'
def run(self):
""" 运行 """
# 获取数据
X, Y = self._fetch_data()
clf = self.get_classifier(X, Y)
# 测试
X, Y = self._fetch_test_data()
self.predict(clf, X, Y)
# 绘制 feature importance
self.plot(clf, self.features)
def get_classifier(self, X, Y):
""" 构建随机森林模型
:param X: 训练数据
:param Y: 训练数据结果
:return: 模型
"""
clf = RandomForestClassifier(n_estimators=10)
clf.fit(X, Y)
return clf
def predict(self, clf, X, Y):
""" 用当前的模型预测
:param clf: 模型
:param X: 测试数据
:param Y: 测试数据结果
:return: 命中率
"""
Y2 = clf.predict(X)
total, hit = 0, 0
for idx, v in enumerate(Y2):
if v == 1:
total += 1
if Y[idx] == v:
hit += 1
print 'Total: %d, Hit: %d, Precision: %.2f%%' % (total, hit, 100.0*hit/total)
# 用 A县 的模型去预测 B县 的结果
# Total: 6769, Hit: 5295, Precision: 78.22%
return hit * 1.0 / total
def plot(self, clf, features):
"""
绘制 feature importance
:param clf: 已构建的分类模型
:param features: 所有的属性列表
:return:
"""
features_len = len(features)
importances = clf.feature_importances_
std = np.std([tree.feature_importances_ for tree in clf.estimators_], axis=0)
indices = np.argsort(importances)[::-1]
# 打印 feature ranking
print("\nFeature ranking:")
for f in range(features_len):
print("%d. %s (%f)" % (f + 1, features[indices[f]], importances[indices[f]]))
# 绘制 feature importances 柱状图
plt.figure()
plt.title("Feature importances")
plt.bar(range(features_len), importances[indices], color="r", yerr=std[indices], align="center")
plt.xticks(range(features_len), np.array(features)[indices], rotation=-90)
plt.xlim([-1, features_len])
plt.show()
def _fetch_data(self):
""" 获取建模数据 """
session = get_db_session()
objs = session.query(Pinkunhu2015).filter(Pinkunhu2015.county == 'A县').all()
X, Y = [], []
for item in objs:
col_list = []
for col in self.features:
normalized_value = normalize(col, getattr(item, col))
col_list.append(normalized_value)
X.append(col_list)
normalized_value = normalize(self.target, getattr(item, self.target))
Y.append(normalized_value)
return X, Y
def _fetch_test_data(self):
""" 获取测试数据 """
session = get_db_session()
objs = session.query(Pinkunhu2015).filter(Pinkunhu2015.county == 'B县').all()
X, Y = [], []
for item in objs:
col_list = []
for col in self.features:
normalized_value = normalize(col, getattr(item, col))
col_list.append(normalized_value)
X.append(col_list)
normalized_value = normalize(self.target, getattr(item, self.target))
Y.append(normalized_value)
return X, Y
if __name__ == '__main__':
m = RandomForestModel()
m.run()
| mit |
Vimos/scikit-learn | benchmarks/bench_saga.py | 45 | 8474 | """Author: Arthur Mensch
Benchmarks of sklearn SAGA vs lightning SAGA vs Liblinear. Shows the gain
in using multinomial logistic regression in term of learning time.
"""
import json
import time
from os.path import expanduser
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_rcv1, load_iris, load_digits, \
fetch_20newsgroups_vectorized
from sklearn.externals.joblib import delayed, Parallel, Memory
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import log_loss
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer, LabelEncoder
from sklearn.utils.extmath import safe_sparse_dot, softmax
def fit_single(solver, X, y, penalty='l2', single_target=True, C=1,
max_iter=10, skip_slow=False):
if skip_slow and solver == 'lightning' and penalty == 'l1':
print('skip_slowping l1 logistic regression with solver lightning.')
return
print('Solving %s logistic regression with penalty %s, solver %s.'
% ('binary' if single_target else 'multinomial',
penalty, solver))
if solver == 'lightning':
from lightning.classification import SAGAClassifier
if single_target or solver not in ['sag', 'saga']:
multi_class = 'ovr'
else:
multi_class = 'multinomial'
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42,
stratify=y)
n_samples = X_train.shape[0]
n_classes = np.unique(y_train).shape[0]
test_scores = [1]
train_scores = [1]
accuracies = [1 / n_classes]
times = [0]
if penalty == 'l2':
alpha = 1. / (C * n_samples)
beta = 0
lightning_penalty = None
else:
alpha = 0.
beta = 1. / (C * n_samples)
lightning_penalty = 'l1'
for this_max_iter in range(1, max_iter + 1, 2):
print('[%s, %s, %s] Max iter: %s' %
('binary' if single_target else 'multinomial',
penalty, solver, this_max_iter))
if solver == 'lightning':
lr = SAGAClassifier(loss='log', alpha=alpha, beta=beta,
penalty=lightning_penalty,
tol=-1, max_iter=this_max_iter)
else:
lr = LogisticRegression(solver=solver,
multi_class=multi_class,
C=C,
penalty=penalty,
fit_intercept=False, tol=1e-24,
max_iter=this_max_iter,
random_state=42,
)
t0 = time.clock()
lr.fit(X_train, y_train)
train_time = time.clock() - t0
scores = []
for (X, y) in [(X_train, y_train), (X_test, y_test)]:
try:
y_pred = lr.predict_proba(X)
except NotImplementedError:
# Lightning predict_proba is not implemented for n_classes > 2
y_pred = _predict_proba(lr, X)
score = log_loss(y, y_pred, normalize=False) / n_samples
score += (0.5 * alpha * np.sum(lr.coef_ ** 2) +
beta * np.sum(np.abs(lr.coef_)))
scores.append(score)
train_score, test_score = tuple(scores)
y_pred = lr.predict(X_test)
accuracy = np.sum(y_pred == y_test) / y_test.shape[0]
test_scores.append(test_score)
train_scores.append(train_score)
accuracies.append(accuracy)
times.append(train_time)
return lr, times, train_scores, test_scores, accuracies
def _predict_proba(lr, X):
pred = safe_sparse_dot(X, lr.coef_.T)
if hasattr(lr, "intercept_"):
pred += lr.intercept_
return softmax(pred)
def exp(solvers, penalties, single_target, n_samples=30000, max_iter=20,
dataset='rcv1', n_jobs=1, skip_slow=False):
mem = Memory(cachedir=expanduser('~/cache'), verbose=0)
if dataset == 'rcv1':
rcv1 = fetch_rcv1()
lbin = LabelBinarizer()
lbin.fit(rcv1.target_names)
X = rcv1.data
y = rcv1.target
y = lbin.inverse_transform(y)
le = LabelEncoder()
y = le.fit_transform(y)
if single_target:
y_n = y.copy()
y_n[y > 16] = 1
y_n[y <= 16] = 0
y = y_n
elif dataset == 'digits':
digits = load_digits()
X, y = digits.data, digits.target
if single_target:
y_n = y.copy()
y_n[y < 5] = 1
y_n[y >= 5] = 0
y = y_n
elif dataset == 'iris':
iris = load_iris()
X, y = iris.data, iris.target
elif dataset == '20newspaper':
ng = fetch_20newsgroups_vectorized()
X = ng.data
y = ng.target
if single_target:
y_n = y.copy()
y_n[y > 4] = 1
y_n[y <= 16] = 0
y = y_n
X = X[:n_samples]
y = y[:n_samples]
cached_fit = mem.cache(fit_single)
out = Parallel(n_jobs=n_jobs, mmap_mode=None)(
delayed(cached_fit)(solver, X, y,
penalty=penalty, single_target=single_target,
C=1, max_iter=max_iter, skip_slow=skip_slow)
for solver in solvers
for penalty in penalties)
res = []
idx = 0
for solver in solvers:
for penalty in penalties:
if not (skip_slow and solver == 'lightning' and penalty == 'l1'):
lr, times, train_scores, test_scores, accuracies = out[idx]
this_res = dict(solver=solver, penalty=penalty,
single_target=single_target,
times=times, train_scores=train_scores,
test_scores=test_scores,
accuracies=accuracies)
res.append(this_res)
idx += 1
with open('bench_saga.json', 'w+') as f:
json.dump(res, f)
def plot():
import pandas as pd
with open('bench_saga.json', 'r') as f:
f = json.load(f)
res = pd.DataFrame(f)
res.set_index(['single_target', 'penalty'], inplace=True)
grouped = res.groupby(level=['single_target', 'penalty'])
colors = {'saga': 'blue', 'liblinear': 'orange', 'lightning': 'green'}
for idx, group in grouped:
single_target, penalty = idx
fig = plt.figure(figsize=(12, 4))
ax = fig.add_subplot(131)
train_scores = group['train_scores'].values
ref = np.min(np.concatenate(train_scores)) * 0.999
for scores, times, solver in zip(group['train_scores'], group['times'],
group['solver']):
scores = scores / ref - 1
ax.plot(times, scores, label=solver, color=colors[solver])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Training objective (relative to min)')
ax.set_yscale('log')
ax = fig.add_subplot(132)
test_scores = group['test_scores'].values
ref = np.min(np.concatenate(test_scores)) * 0.999
for scores, times, solver in zip(group['test_scores'], group['times'],
group['solver']):
scores = scores / ref - 1
ax.plot(times, scores, label=solver, color=colors[solver])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Test objective (relative to min)')
ax.set_yscale('log')
ax = fig.add_subplot(133)
for accuracy, times, solver in zip(group['accuracies'], group['times'],
group['solver']):
ax.plot(times, accuracy, label=solver, color=colors[solver])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Test accuracy')
ax.legend()
name = 'single_target' if single_target else 'multi_target'
name += '_%s' % penalty
plt.suptitle(name)
name += '.png'
fig.tight_layout()
fig.subplots_adjust(top=0.9)
plt.savefig(name)
plt.close(fig)
if __name__ == '__main__':
solvers = ['saga', 'liblinear', 'lightning']
penalties = ['l1', 'l2']
single_target = True
exp(solvers, penalties, single_target, n_samples=None, n_jobs=1,
dataset='20newspaper', max_iter=20)
plot()
| bsd-3-clause |
lidore/trading-with-python | lib/extra.py | 77 | 2540 | '''
Created on Apr 28, 2013
Copyright: Jev Kuznetsov
License: BSD
'''
from __future__ import print_function
import sys
import urllib
import os
import xlrd # module for excel file reading
import pandas as pd
class ProgressBar:
def __init__(self, iterations):
self.iterations = iterations
self.prog_bar = '[]'
self.fill_char = '*'
self.width = 50
self.__update_amount(0)
def animate(self, iteration):
print('\r', self, end='')
sys.stdout.flush()
self.update_iteration(iteration + 1)
def update_iteration(self, elapsed_iter):
self.__update_amount((elapsed_iter / float(self.iterations)) * 100.0)
self.prog_bar += ' %d of %s complete' % (elapsed_iter, self.iterations)
def __update_amount(self, new_amount):
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = '[' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
pct_string = '%d%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + \
(pct_string + self.prog_bar[pct_place + len(pct_string):])
def __str__(self):
return str(self.prog_bar)
def getSpyHoldings(dataDir):
''' get SPY holdings from the net, uses temp data storage to save xls file '''
dest = os.path.join(dataDir,"spy_holdings.xls")
if os.path.exists(dest):
print('File found, skipping download')
else:
print('saving to', dest)
urllib.urlretrieve ("https://www.spdrs.com/site-content/xls/SPY_All_Holdings.xls?fund=SPY&docname=All+Holdings&onyx_code1=1286&onyx_code2=1700",
dest) # download xls file and save it to data directory
# parse
wb = xlrd.open_workbook(dest) # open xls file, create a workbook
sh = wb.sheet_by_index(0) # select first sheet
data = {'name':[], 'symbol':[], 'weight':[],'sector':[]}
for rowNr in range(5,505): # cycle through the rows
v = sh.row_values(rowNr) # get all row values
data['name'].append(v[0])
data['symbol'].append(v[1]) # symbol is in the second column, append it to the list
data['weight'].append(float(v[2]))
data['sector'].append(v[3])
return pd.DataFrame(data)
| bsd-3-clause |
smartscheduling/scikit-learn-categorical-tree | sklearn/linear_model/tests/test_randomized_l1.py | 214 | 4690 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |
Sklearn-HMM/scikit-learn-HMM | sklean-hmm/linear_model/coordinate_descent.py | 2 | 53437 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Gael Varoquaux <gael.varoquaux@inria.fr>
#
# License: BSD 3 clause
import sys
import warnings
import itertools
import operator
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data
from ..utils import array2d, atleast2d_or_csc
from ..cross_validation import _check_cv as check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape = (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : bool
Fit or not an intercept
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
if Xy is None:
X = atleast2d_or_csc(X, copy=(copy_X and fit_intercept and not
sparse.isspmatrix(X)))
if not sparse.isspmatrix(X):
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
n_samples = X.shape[0]
else:
n_samples = len(y)
alpha_max = np.abs(Xy).max() / (n_samples * l1_ratio)
alphas = np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
return alphas
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, fit_intercept=None,
normalize=None, copy_X=True, coef_init=None,
verbose=False, return_models=False,
**params):
"""Compute Lasso path with coordinate descent
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape = (n_samples,)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
fit_intercept : bool
Fit or not an intercept.
WARNING : will be deprecated in 0.16
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
WARNING : will be deprecated in 0.16
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity
return_models : boolean, optional, default True
If ``True``, the function will return list of models. Setting it
to ``False`` will change the function output returning the values
of the alphas and the coefficients along the path. Returning the
model list will be removed in version 0.16.
params : kwargs
keyword arguments passed to the coordinate descent solver.
Returns
-------
models : a list of models along the regularization path
(Is returned if ``return_models`` is set ``True`` (default).
alphas : array, shape: [n_alphas + 1]
The alphas along the path where models are computed.
(Is returned, along with ``coefs``, when ``return_models`` is set
to ``False``)
coefs : shape (n_features, n_alphas + 1)
Coefficients along the path.
(Is returned, along with ``alphas``, when ``return_models`` is set
to ``False``).
dual_gaps : shape (n_alphas + 1)
The dual gaps at the end of the optimization for each alpha.
(Is returned, along with ``alphas``, when ``return_models`` is set
to ``False``).
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Deprecation Notice: Setting ``return_models`` to ``False`` will make
the Lasso Path return an output in the style used by :func:`lars_path`.
This will be become the norm as of version 0.16. Leaving ``return_models``
set to `True` will let the function return a list of models as before.
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5],
... fit_intercept=False)
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
return_models=return_models, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, fit_intercept=True,
normalize=False, copy_X=True, coef_init=None,
verbose=False, return_models=False,
**params):
"""Compute Elastic-Net path with coordinate descent
The Elastic Net optimization function is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape = (n_samples,)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
fit_intercept : bool
Fit or not an intercept.
WARNING : will be deprecated in 0.16
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
WARNING : will be deprecated in 0.16
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity
return_models : boolean, optional, default False
If ``True``, the function will return list of models. Setting it
to ``False`` will change the function output returning the values
of the alphas and the coefficients along the path. Returning the
model list will be removed in version 0.16.
params : kwargs
keyword arguments passed to the coordinate descent solver.
Returns
-------
models : a list of models along the regularization path
(Is returned if ``return_models`` is set ``True`` (default).
alphas : array, shape: [n_alphas + 1]
The alphas along the path where models are computed.
(Is returned, along with ``coefs``, when ``return_models`` is set
to ``False``)
coefs : shape (n_features, n_alphas + 1)
Coefficients along the path.
(Is returned, along with ``alphas``, when ``return_models`` is set
to ``False``).
dual_gaps : shape (n_alphas + 1)
The dual gaps at the end of the optimization for each alpha.
(Is returned, along with ``alphas``, when ``return_models`` is set
to ``False``).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
Deprecation Notice: Setting ``return_models`` to ``False`` will make
the Lasso Path return an output in the style used by :func:`lars_path`.
This will be become the norm as of version 0.15. Leaving ``return_models``
set to `True` will let the function return a list of models as before.
See also
--------
ElasticNet
ElasticNetCV
"""
if return_models:
warnings.warn("Use enet_path(return_models=False), as it returns the"
" coefficients and alphas instead of just a list of"
" models as previously `lasso_path`/`enet_path` did."
" `return_models` will eventually be removed in 0.16,"
" after which, returning alphas and coefs"
" will become the norm.",
DeprecationWarning, stacklevel=2)
if normalize is True:
warnings.warn("normalize param will be removed in 0.16."
" Intercept fitting and feature normalization will be"
" done in estimators.",
DeprecationWarning, stacklevel=2)
else:
normalize = False
if fit_intercept is True or fit_intercept is None:
warnings.warn("fit_intercept param will be removed in 0.16."
" Intercept fitting and feature normalization will be"
" done in estimators.",
DeprecationWarning, stacklevel=2)
if fit_intercept is None:
fit_intercept = True
X = atleast2d_or_csc(X, dtype=np.float64, order='F',
copy=copy_X and fit_intercept)
n_samples, n_features = X.shape
if sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.ones(n_features)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize, fit_intercept, copy=False)
n_samples = X.shape[0]
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
if coef_init is None:
coef_ = np.zeros(n_features, dtype=np.float64)
else:
coef_ = coef_init
models = []
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
dual_gaps = np.empty(n_alphas)
tol = params.get('tol', 1e-4)
positive = params.get('positive', False)
max_iter = params.get('max_iter', 1000)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if sparse.isspmatrix(X):
coef_, dual_gap_, eps_ = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, positive)
else:
coef_, dual_gap_, eps_ = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, positive)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations')
coefs[:, i] = coef_
dual_gaps[i] = dual_gap_
if return_models:
model = ElasticNet(
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept if sparse.isspmatrix(X) else False,
precompute=precompute)
model.coef_ = coefs[:, i]
model.dual_gap_ = dual_gaps[-1]
if fit_intercept and not sparse.isspmatrix(X):
model.fit_intercept = True
model._set_intercept(X_mean, y_mean, X_std)
models.append(model)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_models:
return models
else:
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear Model trained with L1 and L2 prior as regularizer
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept: bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter: int, optional
The maximum number of iterations
copy_X : boolean, optional, default False
If ``True``, X will be copied; else, it may be overwritten.
tol: float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive: bool, optional
When set to ``True``, forces the coefficients to be positive.
Attributes
----------
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
``sparse_coef_`` : scipy.sparse matrix, shape = (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
``intercept_`` : float | array, shape = (n_targets,)
independent term in decision function.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
def fit(self, X, y):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape = (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
X = atleast2d_or_csc(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
# From now on X can be touched inplace
y = np.asarray(y, dtype=np.float64)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std,
coef_init=coef_[k], max_iter=self.max_iter)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape = (n_samples,)
The predicted decision function
"""
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self).decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter: int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
Attributes
----------
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
``sparse_coef_`` : scipy.sparse matrix, shape = (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
``intercept_`` : float | array, shape = (n_targets,)
independent term in decision function.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute='auto', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute='auto', copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, l1_ratio=1,
X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype: a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
precompute = path_params['precompute']
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
# del path_params['precompute']
path_params = path_params.copy()
path_params['fit_intercept'] = False
path_params['normalize'] = False
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = atleast2d_or_csc(X_train, dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y[train], **path_params)
del X_train
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean - np.dot(X_mean, coefs)
residues = safe_sparse_dot(X_test, coefs) - y_test[:, np.newaxis]
residues += intercepts[np.newaxis, :]
this_mses = (residues ** 2).mean(axis=0)
return this_mses, l1_ratio
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
# Dealing right with copy_X is important in the following:
# multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = atleast2d_or_csc(X, copy=False)
if sparse.isspmatrix(X):
if not np.may_share_memory(reference_to_old_X.data, X.data):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = atleast2d_or_csc(X, dtype=np.float64, order='F',
copy=copy_X)
copy_X = False
y = np.asarray(y, dtype=np.float64)
if y.ndim > 1:
raise ValueError("For multi-task outputs, fit the linear model "
"per output/task")
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
if alphas is None:
mean_l1_ratio = 1.
if hasattr(self, 'l1_ratio'):
mean_l1_ratio = np.mean(self.l1_ratio)
alphas = _alpha_grid(X, y, l1_ratio=mean_l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X)
n_alphas = len(alphas)
path_params.update({'alphas': alphas, 'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
all_mse_paths = list()
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
for l1_ratio, mse_alphas in itertools.groupby(
Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_path_residuals)(
X, y, train, test, self.path, path_params,
l1_ratio=l1_ratio, X_order='F',
dtype=np.float64)
for l1_ratio in l1_ratios for train, test in folds
), operator.itemgetter(1)):
mse_alphas = [m[0] for m in mse_alphas]
mse_alphas = np.array(mse_alphas)
mse = np.mean(mse_alphas, axis=0)
i_best_alpha = np.argmin(mse)
this_best_mse = mse[i_best_alpha]
all_mse_paths.append(mse_alphas.T)
if this_best_mse < best_mse:
best_alpha = alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
self.alphas_ = np.asarray(alphas)
self.mse_path_ = np.squeeze(all_mse_paths)
# Refit the model with the parameters selected
model = ElasticNet()
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.fit(X, y)
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: int, optional
The maximum number of iterations
tol: float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
amount of verbosity
Attributes
----------
``alpha_`` : float
The amount of penalization chosen by cross validation
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
``intercept_`` : float | array, shape = (n_targets,)
independent term in decision function.
``mse_path_`` : array, shape = (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
``alphas_`` : numpy array
The grid of alphas used for fitting
``l1_ratio_`` : int
An artifact of the super class LinearModelCV. In this case,
``l1_ratio_ = 1`` because the Lasso estimator uses an L1 penalty
by definition.
Typically it is a float between 0 and 1 passed to an estimator such as
ElasticNet (scaling between l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
``dual_gap_`` : numpy array
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
n_jobs = 1
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
amount of verbosity
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
Attributes
----------
``alpha_`` : float
The amount of penalization chosen by cross validation
``l1_ratio_`` : float
The compromise between l1 and l2 penalization chosen by
cross validation
``coef_`` : array, shape = (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
``intercept_`` : float | array, shape = (n_targets, n_features)
Independent term in the decision function.
``mse_path_`` : array, shape = (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
``intercept_`` : array, shape = (n_tasks,)
Independent term in decision function.
``coef_`` : array, shape = (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X: ndarray, shape = (n_samples, n_features)
Data
y: ndarray, shape = (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = array2d(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
squeeze_me = False
if y.ndim == 1:
squeeze_me = True
y = y[:, np.newaxis]
n_samples, n_features = X.shape
_, n_tasks = y.shape
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
self.coef_, self.dual_gap_, self.eps_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol)
self._set_intercept(X_mean, y_mean, X_std)
# Make sure that the coef_ have the same shape as the given 'y',
# to predict with the same shape
if squeeze_me:
self.coef_ = self.coef_.squeeze()
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
``coef_`` : array, shape = (n_tasks, n_features)
parameter vector (W in the cost function formula)
``intercept_`` : array, shape = (n_tasks,)
independent term in decision function.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
| bsd-3-clause |
wazeerzulfikar/scikit-learn | benchmarks/bench_plot_nmf.py | 27 | 15580 | """
Benchmarks of Non-Negative Matrix Factorization
"""
# Authors: Tom Dupre la Tour (benchmark)
# Chih-Jen Linn (original projected gradient NMF implementation)
# Anthony Di Franco (projected gradient, Python and NumPy port)
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import warnings
import numbers
import numpy as np
import matplotlib.pyplot as plt
import pandas
from sklearn.utils.testing import ignore_warnings
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition.nmf import NMF
from sklearn.decomposition.nmf import _initialize_nmf
from sklearn.decomposition.nmf import _beta_divergence
from sklearn.decomposition.nmf import INTEGER_TYPES, _check_init
from sklearn.externals.joblib import Memory
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot, squared_norm
from sklearn.utils import check_array
from sklearn.utils.validation import check_is_fitted, check_non_negative
mem = Memory(cachedir='.', verbose=0)
###################
# Start of _PGNMF #
###################
# This class implements a projected gradient solver for the NMF.
# The projected gradient solver was removed from scikit-learn in version 0.19,
# and a simplified copy is used here for comparison purpose only.
# It is not tested, and it may change or disappear without notice.
def _norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return np.sqrt(squared_norm(x))
def _nls_subproblem(X, W, H, tol, max_iter, alpha=0., l1_ratio=0.,
sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the projected
gradient descent algorithm.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Constant matrix.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
grad : array-like, shape (n_components, n_features)
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtX = safe_sparse_dot(W.T, X)
WtW = np.dot(W.T, W)
# values justified in the paper (alpha is renamed gamma)
gamma = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtX
if alpha > 0 and l1_ratio == 1.:
grad += alpha
elif alpha > 0:
grad += alpha * (l1_ratio + (1 - l1_ratio) * H)
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if _norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(20):
# Gradient step.
Hn = H - gamma * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_gamma = not suff_decr
if decr_gamma:
if suff_decr:
H = Hn
break
else:
gamma *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
gamma /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.",
ConvergenceWarning)
return H, grad, n_iter
def _fit_projected_gradient(X, W, H, tol, max_iter, nls_max_iter, alpha,
l1_ratio):
gradW = (np.dot(W, np.dot(H, H.T)) -
safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H) -
safe_sparse_dot(W.T, X, dense_output=True))
init_grad = squared_norm(gradW) + squared_norm(gradH.T)
# max(0.001, tol) to force alternating minimizations of W and H
tolW = max(0.001, tol) * np.sqrt(init_grad)
tolH = tolW
for n_iter in range(1, max_iter + 1):
# stopping condition as discussed in paper
proj_grad_W = squared_norm(gradW * np.logical_or(gradW < 0, W > 0))
proj_grad_H = squared_norm(gradH * np.logical_or(gradH < 0, H > 0))
if (proj_grad_W + proj_grad_H) / init_grad < tol ** 2:
break
# update W
Wt, gradWt, iterW = _nls_subproblem(X.T, H.T, W.T, tolW, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
W, gradW = Wt.T, gradWt.T
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = _nls_subproblem(X, W, H, tolH, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
if iterH == 1:
tolH = 0.1 * tolH
H[H == 0] = 0 # fix up negative zeros
if n_iter == max_iter:
Wt, _, _ = _nls_subproblem(X.T, H.T, W.T, tolW, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
W = Wt.T
return W, H, n_iter
class _PGNMF(NMF):
"""Non-Negative Matrix Factorization (NMF) with projected gradient solver.
This class is private and for comparison purpose only.
It may change or disappear without notice.
"""
def __init__(self, n_components=None, solver='pg', init=None,
tol=1e-4, max_iter=200, random_state=None,
alpha=0., l1_ratio=0., nls_max_iter=10):
super(_PGNMF, self).__init__(
n_components=n_components, init=init, solver=solver, tol=tol,
max_iter=max_iter, random_state=random_state, alpha=alpha,
l1_ratio=l1_ratio)
self.nls_max_iter = nls_max_iter
def fit(self, X, y=None, **params):
self.fit_transform(X, **params)
return self
def transform(self, X):
check_is_fitted(self, 'components_')
H = self.components_
W, _, self.n_iter_ = self._fit_transform(X, H=H, update_H=False)
return W
def inverse_transform(self, W):
check_is_fitted(self, 'components_')
return np.dot(W, self.components_)
def fit_transform(self, X, y=None, W=None, H=None):
W, H, self.n_iter = self._fit_transform(X, W=W, H=H, update_H=True)
self.components_ = H
return W
def _fit_transform(self, X, y=None, W=None, H=None, update_H=True):
X = check_array(X, accept_sparse=('csr', 'csc'))
check_non_negative(X, "NMF (input X)")
n_samples, n_features = X.shape
n_components = self.n_components
if n_components is None:
n_components = n_features
if (not isinstance(n_components, INTEGER_TYPES) or
n_components <= 0):
raise ValueError("Number of components must be a positive integer;"
" got (n_components=%r)" % n_components)
if not isinstance(self.max_iter, INTEGER_TYPES) or self.max_iter < 0:
raise ValueError("Maximum number of iterations must be a positive "
"integer; got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
# check W and H, or initialize them
if self.init == 'custom' and update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
_check_init(W, (n_samples, n_components), "NMF (input W)")
elif not update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
W = np.zeros((n_samples, n_components))
else:
W, H = _initialize_nmf(X, n_components, init=self.init,
random_state=self.random_state)
if update_H: # fit_transform
W, H, n_iter = _fit_projected_gradient(
X, W, H, self.tol, self.max_iter, self.nls_max_iter,
self.alpha, self.l1_ratio)
else: # transform
Wt, _, n_iter = _nls_subproblem(X.T, H.T, W.T, self.tol,
self.nls_max_iter,
alpha=self.alpha,
l1_ratio=self.l1_ratio)
W = Wt.T
if n_iter == self.max_iter and self.tol > 0:
warnings.warn("Maximum number of iteration %d reached. Increase it"
" to improve convergence." % self.max_iter,
ConvergenceWarning)
return W, H, n_iter
#################
# End of _PGNMF #
#################
def plot_results(results_df, plot_name):
if results_df is None:
return None
plt.figure(figsize=(16, 6))
colors = 'bgr'
markers = 'ovs'
ax = plt.subplot(1, 3, 1)
for i, init in enumerate(np.unique(results_df['init'])):
plt.subplot(1, 3, i + 1, sharex=ax, sharey=ax)
for j, method in enumerate(np.unique(results_df['method'])):
mask = np.logical_and(results_df['init'] == init,
results_df['method'] == method)
selected_items = results_df[mask]
plt.plot(selected_items['time'], selected_items['loss'],
color=colors[j % len(colors)], ls='-',
marker=markers[j % len(markers)],
label=method)
plt.legend(loc=0, fontsize='x-small')
plt.xlabel("Time (s)")
plt.ylabel("loss")
plt.title("%s" % init)
plt.suptitle(plot_name, fontsize=16)
@ignore_warnings(category=ConvergenceWarning)
# use joblib to cache the results.
# X_shape is specified in arguments for avoiding hashing X
@mem.cache(ignore=['X', 'W0', 'H0'])
def bench_one(name, X, W0, H0, X_shape, clf_type, clf_params, init,
n_components, random_state):
W = W0.copy()
H = H0.copy()
clf = clf_type(**clf_params)
st = time()
W = clf.fit_transform(X, W=W, H=H)
end = time()
H = clf.components_
this_loss = _beta_divergence(X, W, H, 2.0, True)
duration = end - st
return this_loss, duration
def run_bench(X, clfs, plot_name, n_components, tol, alpha, l1_ratio):
start = time()
results = []
for name, clf_type, iter_range, clf_params in clfs:
print("Training %s:" % name)
for rs, init in enumerate(('nndsvd', 'nndsvdar', 'random')):
print(" %s %s: " % (init, " " * (8 - len(init))), end="")
W, H = _initialize_nmf(X, n_components, init, 1e-6, rs)
for max_iter in iter_range:
clf_params['alpha'] = alpha
clf_params['l1_ratio'] = l1_ratio
clf_params['max_iter'] = max_iter
clf_params['tol'] = tol
clf_params['random_state'] = rs
clf_params['init'] = 'custom'
clf_params['n_components'] = n_components
this_loss, duration = bench_one(name, X, W, H, X.shape,
clf_type, clf_params,
init, n_components, rs)
init_name = "init='%s'" % init
results.append((name, this_loss, duration, init_name))
# print("loss: %.6f, time: %.3f sec" % (this_loss, duration))
print(".", end="")
sys.stdout.flush()
print(" ")
# Use a panda dataframe to organize the results
results_df = pandas.DataFrame(results,
columns="method loss time init".split())
print("Total time = %0.3f sec\n" % (time() - start))
# plot the results
plot_results(results_df, plot_name)
return results_df
def load_20news():
print("Loading 20 newsgroups dataset")
print("-----------------------------")
from sklearn.datasets import fetch_20newsgroups
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, stop_words='english')
tfidf = vectorizer.fit_transform(dataset.data)
return tfidf
def load_faces():
print("Loading Olivetti face dataset")
print("-----------------------------")
from sklearn.datasets import fetch_olivetti_faces
faces = fetch_olivetti_faces(shuffle=True)
return faces.data
def build_clfs(cd_iters, pg_iters, mu_iters):
clfs = [("Coordinate Descent", NMF, cd_iters, {'solver': 'cd'}),
("Projected Gradient", _PGNMF, pg_iters, {'solver': 'pg'}),
("Multiplicative Update", NMF, mu_iters, {'solver': 'mu'}),
]
return clfs
if __name__ == '__main__':
alpha = 0.
l1_ratio = 0.5
n_components = 10
tol = 1e-15
# first benchmark on 20 newsgroup dataset: sparse, shape(11314, 39116)
plot_name = "20 Newsgroups sparse dataset"
cd_iters = np.arange(1, 30)
pg_iters = np.arange(1, 6)
mu_iters = np.arange(1, 30)
clfs = build_clfs(cd_iters, pg_iters, mu_iters)
X_20news = load_20news()
run_bench(X_20news, clfs, plot_name, n_components, tol, alpha, l1_ratio)
# second benchmark on Olivetti faces dataset: dense, shape(400, 4096)
plot_name = "Olivetti Faces dense dataset"
cd_iters = np.arange(1, 30)
pg_iters = np.arange(1, 12)
mu_iters = np.arange(1, 30)
clfs = build_clfs(cd_iters, pg_iters, mu_iters)
X_faces = load_faces()
run_bench(X_faces, clfs, plot_name, n_components, tol, alpha, l1_ratio,)
plt.show()
| bsd-3-clause |
baxen/fishbowl | fishbowl/color.py | 1 | 4322 | """
color - Definitions and controls for color palettes
Provides a series of custom qualitative palettes and an interface for saving
(and developing) additional palettes. All functions which accept a palette
support the `palettable` package.
This module is used by core to set colors with the simple tools, but can also
be accessed directly to retrieve and customize palettes.
"""
import itertools
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from cycler import cycler
from fishbowl.base import loads_from_json, saves_to_json
def next_color():
""" Next color from the mpl color cycle
Note that this cycle is independent of the one used within mpl
"""
if not hasattr(next_color, '_cycle'):
next_color._cycle = matplotlib.rcParams['axes.prop_cycle']
next_color._cycle = itertools.cycle(next_color._cycle)
return next(next_color._cycle)['color']
def cmap(arg):
""" Configuration for cmap specified by arg
Parameters
----------
arg
any cmap known by matplotlib or a palettable.palette instance.
"""
if hasattr(arg, 'mpl_colormap'):
matplotlib.cm.register_cmap(arg.name, arg.mpl_colormap)
return {'image.cmap': arg.name}
elif isinstance(arg, str):
return {'image.cmap': arg}
else:
raise ValueError('Could not interpret the argument provided for cmap: '
+ str(arg))
@loads_from_json('fishbowl.palettes.json')
def palette(arg):
""" Configuration for palette specified by arg
Parameters
----------
arg
any name known by fishbowl or a palettable.palette instance.
"""
if hasattr(arg, 'hex_colors'):
return {'color.palette': arg.hex_colors}
@saves_to_json('fishbowl.palettes.json')
def save_palette(name, config):
""" Save a new color palette as name.
Parameters
----------
name : str
save name for the palette
config
list of hex colors, palettable.palette instance, or
name known by fishbowl
"""
return palette(config)
def draw_box_palette(colors, save_name=None, block=10,
sep=1, background='white'):
""" Draw a series of boxes of color in a grid to show a palette.
Parameters
----------
colors
a list or list of lists of colors for the boxes
a list of lists is drawn as a 2D grid
each entry must be a color understood by matplotlib.
save_name : str
if provided, saves plot to save_name
block : int
size of the side of each box (pixels)
sep : int
separation between each box (pixels)
background
color of the background between palettes
"""
if not hasattr(colors[0], "__iter__"):
colors = [colors]
# Calculate pixel sizes
rows = len(colors)
cols = len(colors[0])
side = block + 2 * sep
image = np.zeros((side * rows, side * cols))
# Assign a unique, consecutive value to each color block
for col in range(cols):
for row in range(rows):
image[sep + side * row: sep + block + side * row, sep +
side * col:sep + block + side * col] = rows * col + row + 1
# Figure without border or axis, sized to just contain the blocks
fig = plt.figure(frameon=False, figsize=(cols, rows))
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# Show it as an image, using one color per box value in a cmap
ax.imshow(image,
cmap=matplotlib.colors.ListedColormap(
[background] + list(itertools.chain(*colors))),
interpolation="nearest")
if save_name:
fig.savefig(save_name, dpi=120)
def draw_sin_palette(colors, save_name=None):
""" Draw a series of sin waves in each color to show a palette.
Parameters
----------
colors : array_like
list of colors, each must be understood by matplotlib
save_name : str
if provided, saves plot to save_name
"""
fig, (ax) = plt.subplots()
ax.set_prop_cycle(cycler('color', colors))
n = len(colors)
x = np.linspace(-5, 5, 100)
for i in range(1, n + 1):
ax.plot(x, np.sin(x + i * .5) * (n + 1 - i))
ax.set_axis_off()
if save_name:
fig.savefig(save_name, dpi=300)
| mit |
necromuralist/student_intervention | student_intervention/model_probabilities.py | 1 | 1298 | from sklearn.preprocessing import MinMaxScaler
from bokeh.palettes import Spectral7
tools = "pan,wheel_zoom,box_zoom,reset,resize,hover"
#tooltips = [('Variable', '@{0}'.format(variable)) for variable in non_zero_variables]
fig = b_figure(tools=tools, title='Variable Probabilities')
scaler = MinMaxScaler()
#x_normed = normalize(X_test)
for v_index, variable in enumerate(non_zero_variables):
index = numpy.where(X_test.columns==variable)
x_input = numpy.zeros((1, len(X_test.columns)))
x = numpy.array([value for value in sorted(X_test[variable].unique())])
with warnings.catch_warnings():
warnings.simplefilter('ignore')
x = scaler.fit_transform(x)
y = []
x = [x[0], x[-1]]
for value in x:
x_input[0][index] = value
y.append(grid.best_estimator_.predict_proba(x_input)[0][1])
#lines = plot.plot(x, y, label=variable)
source = ColumnDataSource({variable: x, 'passed': y, 'name': [variable for item in x]})
#fig.circle(variable, 'passed', name=variable, source=source)
hover = fig.select(dict(type=HoverTool))
hover.tooltips = [('Variable', '@name'), ("(x, y)", '($x, $y)')]
hover.mode = 'mouse'
#hover.line_policy='none'
fig.line(x, y, source=source, line_color=Spectral7[v_index], legend=variable)
| mit |
wanglei828/apollo | modules/tools/mapshow/libs/plot_planning.py | 3 | 3641 | #!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import argparse
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from cyber_py import cyber
from modules.planning.proto import planning_pb2
from subplot_st_main import StMainSubplot
from subplot_path import PathSubplot
from subplot_sl_main import SlMainSubplot
from subplot_st_speed import StSpeedSubplot
from subplot_speed import SpeedSubplot
from localization import Localization
from planning import Planning
planning = Planning()
localization = Localization()
def update(frame_number):
# st_main_subplot.show(planning)
# st_speed_subplot.show(planning)
map_path_subplot.show(planning, localization)
dp_st_main_subplot.show(planning)
qp_st_main_subplot.show(planning)
speed_subplot.show(planning)
sl_main_subplot.show(planning)
st_speed_subplot.show(planning)
def planning_callback(planning_pb):
planning.update_planning_pb(planning_pb)
localization.update_localization_pb(
planning_pb.debug.planning_data.adc_position)
planning.compute_st_data()
planning.compute_sl_data()
planning.compute_path_data()
planning.compute_speed_data()
planning.compute_init_point()
def add_listener():
planning_sub = cyber.Node("st_plot")
planning_sub.create_reader('/apollo/planning', planning_pb2.ADCTrajectory,
planning_callback)
def press_key(event):
if event.key == '+' or event.key == '=':
map_path_subplot.zoom_in()
if event.key == '-' or event.key == '_':
map_path_subplot.zoom_out()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="plot_planning is a tool to display "
"planning trajs on a map.",
prog="plot_planning_old.py")
parser.add_argument(
"-m",
"--map",
action="store",
type=str,
required=False,
default=None,
help="Specify the map file in txt or binary format")
args = parser.parse_args()
cyber.init()
add_listener()
fig = plt.figure()
fig.canvas.mpl_connect('key_press_event', press_key)
ax = plt.subplot2grid((3, 3), (0, 0), rowspan=2, colspan=2)
map_path_subplot = PathSubplot(ax, args.map)
ax1 = plt.subplot2grid((3, 3), (0, 2))
speed_subplot = SpeedSubplot(ax1)
ax2 = plt.subplot2grid((3, 3), (2, 2))
dp_st_main_subplot = StMainSubplot(ax2, 'QpSplineStSpeedOptimizer')
ax3 = plt.subplot2grid((3, 3), (1, 2))
qp_st_main_subplot = StMainSubplot(ax3, 'DpStSpeedOptimizer')
ax4 = plt.subplot2grid((3, 3), (2, 0), colspan=1)
sl_main_subplot = SlMainSubplot(ax4)
ax5 = plt.subplot2grid((3, 3), (2, 1), colspan=1)
st_speed_subplot = StSpeedSubplot(ax5, 'QpSplineStSpeedOptimizer')
ani = animation.FuncAnimation(fig, update, interval=100)
ax.axis('equal')
plt.show()
cyber.shutdown()
| apache-2.0 |
abulak/TDA-Cause-Effect-Pairs | PlottingUtilities.py | 1 | 5894 | import os
import json
import re
import numpy as np
import numpy.ma as ma
# import matplotlib
# matplotlib.use('Agg')
import GeometricComplex as GC
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.collections import LineCollection, PolyCollection
class TopologyPlotter:
"""
Outlier Plotter; requires outliers model and assumes points.std,
outliers.$model, and diagram.$model are in the path given.
"""
def __init__(self, path, model="knn", name=None):
if name is None:
self.name = path[-9:-1]
else:
self.name = name
self.suffix = str(model)
self.points = standardise(np.loadtxt(os.path.join(path, 'points.std')))
outliers_path = os.path.join(path, 'outliers.' + self.suffix)
self.outliers = np.loadtxt(outliers_path).astype(np.int)
assert self.outliers.shape[0] == len(set(list(self.outliers))), \
"Duplicates in outliers!"
diagrams_path = os.path.join(path, 'diagrams.' + self.suffix)
with open(diagrams_path, 'r') as file:
self.diagrams = json.load(file)
extrema_path = os.path.join(path, 'extrema.' + self.suffix)
with open(extrema_path, 'r') as file:
self.extrema = json.load(file)
def __mask_points__(self, i):
masked_points = ma.masked_array(self.points)
for outlier in self.outliers[:i]:
masked_points[outlier] = ma.masked
cleaned_points = masked_points.compressed().reshape(
self.points.shape[0] - i, 2)
return cleaned_points
def plot_diagram(self, i, mpl_axis, filtration, axis,
inverted=False):
minimal = self.extrema[i]['minima'][axis]
maximal = self.extrema[i]['maxima'][axis]
mpl_axis.set_aspect('equal')
if inverted:
minimal *= -1
maximal *= -1
mpl_axis.plot([minimal, maximal], [minimal, maximal],
color='black', alpha=0.5)
points = np.array(self.diagrams[i][filtration][0])
if points.size: # if the array is not empty...
mpl_axis.scatter(points[:, 0], points[:, 1],
marker='x', s=100, color='r',
linewidth=2)
for pt in points:
mpl_axis.plot(np.array([pt[0], pt[0]]),
np.array([pt[0], pt[1]]),
color='black', alpha=0.3)
mpl_axis.text(0.5, 0.1, "Filtration: " + filtration,
transform=mpl_axis.transAxes)
def plot_delaunay(self, i):
if i < 1:
cleaned_points = self.points
else:
cleaned_points = standardise(self.__mask_points__(i-1))
edges, triangles = delaunay_triangulation(cleaned_points, dimension=2)
lines = LineCollection(edges, linewidths=1, colors='b', alpha=0.3)
triangles = PolyCollection(triangles, facecolors='b', alpha=0.1)
ax = plt.gca()
plt.scatter(cleaned_points[:, 0], cleaned_points[:, 1],
color='red', alpha=1, s=5)
ax.add_collection(lines)
ax.add_collection(triangles)
return ax
def plot_all_diagrams(self, i):
if i > len(self.outliers):
print("argument must be less than", len(self.outliers),
"for the pair!")
else:
fig = plt.gcf()
cmplx = fig.add_subplot(2,3,1)
cmplx.set_aspect('equal')
self.plot_delaunay(i)
cmplx.tick_params(labeltop='on', labelleft='on',
labelright='off', labelbottom='off')
inv_cmplx = fig.add_subplot(2,3,6)
inv_cmplx.set_aspect('equal')
inv_cmplx.tick_params(labeltop='on', labelright='on')
inv_cmplx.tick_params(labeltop='off', labelleft='off',
labelright='on', labelbottom='on')
self.points *= -1
self.plot_delaunay(i)
with sns.axes_style("whitegrid"):
y_inc = fig.add_subplot(232, sharey=cmplx)
x_inc = fig.add_subplot(234, sharex=cmplx)
self.plot_diagram(i, mpl_axis=x_inc, filtration="X", axis=0)
self.plot_diagram(i, mpl_axis=y_inc, filtration="Y", axis=1)
x_dec = fig.add_subplot(233, sharex=inv_cmplx)
y_dec = fig.add_subplot(235, sharey=inv_cmplx)
self.plot_diagram(i, mpl_axis=x_dec, filtration="X_inverted", axis=0, inverted=True)
self.plot_diagram(i, mpl_axis=y_dec, filtration="Y_inverted", axis=1, inverted=True)
for ax in [x_inc, y_inc, x_dec, y_dec]:
for label in ax.get_xticklabels():
label.set_visible(False)
for label in ax.get_yticklabels():
label.set_visible(False)
plt.tight_layout()
fig.subplots_adjust(wspace=0, hspace=0)
for ax in [x_dec, y_dec, inv_cmplx]:
pos1 = ax.get_position()
pos2 = [pos1.x0 + 0.02, pos1.y0 - 0.03, pos1.width, pos1.height]
ax.set_position(pos2)
return fig
def standardise(points):
"""
Standardise points
:param points: np.array
:return: np.array of points after standardisation (mean=0, st deviation=1)
"""
for i in range(points[0].shape[0]):
p = points[:, i]
mean = np.mean(p)
std = np.std(p)
p -= mean
p /= std
return points
def delaunay_triangulation(points, dimension=2):
cmplx = GC.AlphaGeometricComplex(points, dimension=dimension)
real_edges = cmplx.get_real_edges(cmplx.limited_simplices)
real_triangles = cmplx.get_real_triangles(cmplx.limited_simplices)
return real_edges, real_triangles
| gpl-2.0 |
BryanCutler/spark | python/pyspark/pandas/tests/test_groupby.py | 1 | 116592 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import inspect
from distutils.version import LooseVersion
from itertools import product
import numpy as np
import pandas as pd
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.exceptions import PandasNotImplementedError, DataError
from pyspark.pandas.missing.groupby import (
MissingPandasLikeDataFrameGroupBy,
MissingPandasLikeSeriesGroupBy,
)
from pyspark.pandas.testing.utils import ReusedSQLTestCase, TestUtils
from pyspark.pandas.groupby import is_multi_agg_with_relabel
class GroupByTest(ReusedSQLTestCase, TestUtils):
def test_groupby_simple(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 6, 4, 4, 6, 4, 3, 7],
"b": [4, 2, 7, 3, 3, 1, 1, 1, 2],
"c": [4, 2, 7, 3, None, 1, 1, 1, 2],
"d": list("abcdefght"),
},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
kdf = ps.from_pandas(pdf)
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values("a").reset_index(drop=True)
self.assert_eq(
sort(kdf.groupby("a", as_index=as_index).sum()),
sort(pdf.groupby("a", as_index=as_index).sum()),
)
self.assert_eq(
sort(kdf.groupby("a", as_index=as_index).b.sum()),
sort(pdf.groupby("a", as_index=as_index).b.sum()),
)
self.assert_eq(
sort(kdf.groupby("a", as_index=as_index)["b"].sum()),
sort(pdf.groupby("a", as_index=as_index)["b"].sum()),
)
self.assert_eq(
sort(kdf.groupby("a", as_index=as_index)[["b", "c"]].sum()),
sort(pdf.groupby("a", as_index=as_index)[["b", "c"]].sum()),
)
self.assert_eq(
sort(kdf.groupby("a", as_index=as_index)[[]].sum()),
sort(pdf.groupby("a", as_index=as_index)[[]].sum()),
)
self.assert_eq(
sort(kdf.groupby("a", as_index=as_index)["c"].sum()),
sort(pdf.groupby("a", as_index=as_index)["c"].sum()),
)
self.assert_eq(kdf.groupby("a").a.sum().sort_index(), pdf.groupby("a").a.sum().sort_index())
self.assert_eq(
kdf.groupby("a")["a"].sum().sort_index(), pdf.groupby("a")["a"].sum().sort_index()
)
self.assert_eq(
kdf.groupby("a")[["a"]].sum().sort_index(), pdf.groupby("a")[["a"]].sum().sort_index()
)
self.assert_eq(
kdf.groupby("a")[["a", "c"]].sum().sort_index(),
pdf.groupby("a")[["a", "c"]].sum().sort_index(),
)
self.assert_eq(
kdf.a.groupby(kdf.b).sum().sort_index(), pdf.a.groupby(pdf.b).sum().sort_index()
)
for axis in [0, "index"]:
self.assert_eq(
kdf.groupby("a", axis=axis).a.sum().sort_index(),
pdf.groupby("a", axis=axis).a.sum().sort_index(),
)
self.assert_eq(
kdf.groupby("a", axis=axis)["a"].sum().sort_index(),
pdf.groupby("a", axis=axis)["a"].sum().sort_index(),
)
self.assert_eq(
kdf.groupby("a", axis=axis)[["a"]].sum().sort_index(),
pdf.groupby("a", axis=axis)[["a"]].sum().sort_index(),
)
self.assert_eq(
kdf.groupby("a", axis=axis)[["a", "c"]].sum().sort_index(),
pdf.groupby("a", axis=axis)[["a", "c"]].sum().sort_index(),
)
self.assert_eq(
kdf.a.groupby(kdf.b, axis=axis).sum().sort_index(),
pdf.a.groupby(pdf.b, axis=axis).sum().sort_index(),
)
self.assertRaises(ValueError, lambda: kdf.groupby("a", as_index=False).a)
self.assertRaises(ValueError, lambda: kdf.groupby("a", as_index=False)["a"])
self.assertRaises(ValueError, lambda: kdf.groupby("a", as_index=False)[["a"]])
self.assertRaises(ValueError, lambda: kdf.groupby("a", as_index=False)[["a", "c"]])
self.assertRaises(KeyError, lambda: kdf.groupby("z", as_index=False)[["a", "c"]])
self.assertRaises(KeyError, lambda: kdf.groupby(["z"], as_index=False)[["a", "c"]])
self.assertRaises(TypeError, lambda: kdf.a.groupby(kdf.b, as_index=False))
self.assertRaises(NotImplementedError, lambda: kdf.groupby("a", axis=1))
self.assertRaises(NotImplementedError, lambda: kdf.groupby("a", axis="columns"))
self.assertRaises(ValueError, lambda: kdf.groupby("a", "b"))
self.assertRaises(TypeError, lambda: kdf.a.groupby(kdf.a, kdf.b))
# we can't use column name/names as a parameter `by` for `SeriesGroupBy`.
self.assertRaises(KeyError, lambda: kdf.a.groupby(by="a"))
self.assertRaises(KeyError, lambda: kdf.a.groupby(by=["a", "b"]))
self.assertRaises(KeyError, lambda: kdf.a.groupby(by=("a", "b")))
# we can't use DataFrame as a parameter `by` for `DataFrameGroupBy`/`SeriesGroupBy`.
self.assertRaises(ValueError, lambda: kdf.groupby(kdf))
self.assertRaises(ValueError, lambda: kdf.a.groupby(kdf))
self.assertRaises(ValueError, lambda: kdf.a.groupby((kdf,)))
# non-string names
pdf = pd.DataFrame(
{
10: [1, 2, 6, 4, 4, 6, 4, 3, 7],
20: [4, 2, 7, 3, 3, 1, 1, 1, 2],
30: [4, 2, 7, 3, None, 1, 1, 1, 2],
40: list("abcdefght"),
},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
kdf = ps.from_pandas(pdf)
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(10).reset_index(drop=True)
self.assert_eq(
sort(kdf.groupby(10, as_index=as_index).sum()),
sort(pdf.groupby(10, as_index=as_index).sum()),
)
self.assert_eq(
sort(kdf.groupby(10, as_index=as_index)[20].sum()),
sort(pdf.groupby(10, as_index=as_index)[20].sum()),
)
self.assert_eq(
sort(kdf.groupby(10, as_index=as_index)[[20, 30]].sum()),
sort(pdf.groupby(10, as_index=as_index)[[20, 30]].sum()),
)
def test_groupby_multiindex_columns(self):
pdf = pd.DataFrame(
{
(10, "a"): [1, 2, 6, 4, 4, 6, 4, 3, 7],
(10, "b"): [4, 2, 7, 3, 3, 1, 1, 1, 2],
(20, "c"): [4, 2, 7, 3, None, 1, 1, 1, 2],
(30, "d"): list("abcdefght"),
},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
kdf = ps.from_pandas(pdf)
self.assert_eq(
kdf.groupby((10, "a")).sum().sort_index(), pdf.groupby((10, "a")).sum().sort_index()
)
self.assert_eq(
kdf.groupby((10, "a"), as_index=False)
.sum()
.sort_values((10, "a"))
.reset_index(drop=True),
pdf.groupby((10, "a"), as_index=False)
.sum()
.sort_values((10, "a"))
.reset_index(drop=True),
)
self.assert_eq(
kdf.groupby((10, "a"))[[(20, "c")]].sum().sort_index(),
pdf.groupby((10, "a"))[[(20, "c")]].sum().sort_index(),
)
# TODO: a pandas bug?
# expected = pdf.groupby((10, "a"))[(20, "c")].sum().sort_index()
expected = pd.Series(
[4.0, 2.0, 1.0, 4.0, 8.0, 2.0],
name=(20, "c"),
index=pd.Index([1, 2, 3, 4, 6, 7], name=(10, "a")),
)
self.assert_eq(kdf.groupby((10, "a"))[(20, "c")].sum().sort_index(), expected)
if LooseVersion(pd.__version__) < LooseVersion("1.1.3"):
self.assert_eq(
kdf[(20, "c")].groupby(kdf[(10, "a")]).sum().sort_index(),
pdf[(20, "c")].groupby(pdf[(10, "a")]).sum().sort_index(),
)
else:
# seems like a pandas bug introduced in pandas 1.1.3.
self.assert_eq(kdf[(20, "c")].groupby(kdf[(10, "a")]).sum().sort_index(), expected)
def test_split_apply_combine_on_series(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 6, 4, 4, 6, 4, 3, 7],
"b": [4, 2, 7, 3, 3, 1, 1, 1, 2],
"c": [4, 2, 7, 3, None, 1, 1, 1, 2],
"d": list("abcdefght"),
},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
kdf = ps.from_pandas(pdf)
funcs = [
((True, False), ["sum", "min", "max", "count", "first", "last"]),
((True, True), ["mean"]),
((False, False), ["var", "std"]),
]
funcs = [(check_exact, almost, f) for (check_exact, almost), fs in funcs for f in fs]
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(list(df.columns)).reset_index(drop=True)
for check_exact, almost, func in funcs:
for kkey, pkey in [("b", "b"), (kdf.b, pdf.b)]:
with self.subTest(as_index=as_index, func=func, key=pkey):
if as_index is True or func != "std":
self.assert_eq(
sort(getattr(kdf.groupby(kkey, as_index=as_index).a, func)()),
sort(getattr(pdf.groupby(pkey, as_index=as_index).a, func)()),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
sort(getattr(kdf.groupby(kkey, as_index=as_index), func)()),
sort(getattr(pdf.groupby(pkey, as_index=as_index), func)()),
check_exact=check_exact,
almost=almost,
)
else:
# seems like a pandas' bug for as_index=False and func == "std"?
self.assert_eq(
sort(getattr(kdf.groupby(kkey, as_index=as_index).a, func)()),
sort(pdf.groupby(pkey, as_index=True).a.std().reset_index()),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
sort(getattr(kdf.groupby(kkey, as_index=as_index), func)()),
sort(pdf.groupby(pkey, as_index=True).std().reset_index()),
check_exact=check_exact,
almost=almost,
)
for kkey, pkey in [(kdf.b + 1, pdf.b + 1), (kdf.copy().b, pdf.copy().b)]:
with self.subTest(as_index=as_index, func=func, key=pkey):
self.assert_eq(
sort(getattr(kdf.groupby(kkey, as_index=as_index).a, func)()),
sort(getattr(pdf.groupby(pkey, as_index=as_index).a, func)()),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
sort(getattr(kdf.groupby(kkey, as_index=as_index), func)()),
sort(getattr(pdf.groupby(pkey, as_index=as_index), func)()),
check_exact=check_exact,
almost=almost,
)
for check_exact, almost, func in funcs:
for i in [0, 4, 7]:
with self.subTest(as_index=as_index, func=func, i=i):
self.assert_eq(
sort(getattr(kdf.groupby(kdf.b > i, as_index=as_index).a, func)()),
sort(getattr(pdf.groupby(pdf.b > i, as_index=as_index).a, func)()),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
sort(getattr(kdf.groupby(kdf.b > i, as_index=as_index), func)()),
sort(getattr(pdf.groupby(pdf.b > i, as_index=as_index), func)()),
check_exact=check_exact,
almost=almost,
)
for check_exact, almost, func in funcs:
for kkey, pkey in [
(kdf.b, pdf.b),
(kdf.b + 1, pdf.b + 1),
(kdf.copy().b, pdf.copy().b),
(kdf.b.rename(), pdf.b.rename()),
]:
with self.subTest(func=func, key=pkey):
self.assert_eq(
getattr(kdf.a.groupby(kkey), func)().sort_index(),
getattr(pdf.a.groupby(pkey), func)().sort_index(),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
getattr((kdf.a + 1).groupby(kkey), func)().sort_index(),
getattr((pdf.a + 1).groupby(pkey), func)().sort_index(),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
getattr((kdf.b + 1).groupby(kkey), func)().sort_index(),
getattr((pdf.b + 1).groupby(pkey), func)().sort_index(),
check_exact=check_exact,
almost=almost,
)
self.assert_eq(
getattr(kdf.a.rename().groupby(kkey), func)().sort_index(),
getattr(pdf.a.rename().groupby(pkey), func)().sort_index(),
check_exact=check_exact,
almost=almost,
)
def test_aggregate(self):
pdf = pd.DataFrame(
{"A": [1, 1, 2, 2], "B": [1, 2, 3, 4], "C": [0.362, 0.227, 1.267, -0.562]}
)
kdf = ps.from_pandas(pdf)
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(list(df.columns)).reset_index(drop=True)
for kkey, pkey in [("A", "A"), (kdf.A, pdf.A)]:
with self.subTest(as_index=as_index, key=pkey):
self.assert_eq(
sort(kdf.groupby(kkey, as_index=as_index).agg("sum")),
sort(pdf.groupby(pkey, as_index=as_index).agg("sum")),
)
self.assert_eq(
sort(kdf.groupby(kkey, as_index=as_index).agg({"B": "min", "C": "sum"})),
sort(pdf.groupby(pkey, as_index=as_index).agg({"B": "min", "C": "sum"})),
)
self.assert_eq(
sort(
kdf.groupby(kkey, as_index=as_index).agg(
{"B": ["min", "max"], "C": "sum"}
)
),
sort(
pdf.groupby(pkey, as_index=as_index).agg(
{"B": ["min", "max"], "C": "sum"}
)
),
)
if as_index:
self.assert_eq(
sort(kdf.groupby(kkey, as_index=as_index).agg(["sum"])),
sort(pdf.groupby(pkey, as_index=as_index).agg(["sum"])),
)
else:
# seems like a pandas' bug for as_index=False and func_or_funcs is list?
self.assert_eq(
sort(kdf.groupby(kkey, as_index=as_index).agg(["sum"])),
sort(pdf.groupby(pkey, as_index=True).agg(["sum"]).reset_index()),
)
for kkey, pkey in [(kdf.A + 1, pdf.A + 1), (kdf.copy().A, pdf.copy().A)]:
with self.subTest(as_index=as_index, key=pkey):
self.assert_eq(
sort(kdf.groupby(kkey, as_index=as_index).agg("sum")),
sort(pdf.groupby(pkey, as_index=as_index).agg("sum")),
)
self.assert_eq(
sort(kdf.groupby(kkey, as_index=as_index).agg({"B": "min", "C": "sum"})),
sort(pdf.groupby(pkey, as_index=as_index).agg({"B": "min", "C": "sum"})),
)
self.assert_eq(
sort(
kdf.groupby(kkey, as_index=as_index).agg(
{"B": ["min", "max"], "C": "sum"}
)
),
sort(
pdf.groupby(pkey, as_index=as_index).agg(
{"B": ["min", "max"], "C": "sum"}
)
),
)
self.assert_eq(
sort(kdf.groupby(kkey, as_index=as_index).agg(["sum"])),
sort(pdf.groupby(pkey, as_index=as_index).agg(["sum"])),
)
expected_error_message = (
r"aggs must be a dict mapping from column name to aggregate functions "
r"\(string or list of strings\)."
)
with self.assertRaisesRegex(ValueError, expected_error_message):
kdf.groupby("A", as_index=as_index).agg(0)
# multi-index columns
columns = pd.MultiIndex.from_tuples([(10, "A"), (10, "B"), (20, "C")])
pdf.columns = columns
kdf.columns = columns
for as_index in [True, False]:
stats_kdf = kdf.groupby((10, "A"), as_index=as_index).agg(
{(10, "B"): "min", (20, "C"): "sum"}
)
stats_pdf = pdf.groupby((10, "A"), as_index=as_index).agg(
{(10, "B"): "min", (20, "C"): "sum"}
)
self.assert_eq(
stats_kdf.sort_values(by=[(10, "B"), (20, "C")]).reset_index(drop=True),
stats_pdf.sort_values(by=[(10, "B"), (20, "C")]).reset_index(drop=True),
)
stats_kdf = kdf.groupby((10, "A")).agg({(10, "B"): ["min", "max"], (20, "C"): "sum"})
stats_pdf = pdf.groupby((10, "A")).agg({(10, "B"): ["min", "max"], (20, "C"): "sum"})
self.assert_eq(
stats_kdf.sort_values(
by=[(10, "B", "min"), (10, "B", "max"), (20, "C", "sum")]
).reset_index(drop=True),
stats_pdf.sort_values(
by=[(10, "B", "min"), (10, "B", "max"), (20, "C", "sum")]
).reset_index(drop=True),
)
# non-string names
pdf.columns = [10, 20, 30]
kdf.columns = [10, 20, 30]
for as_index in [True, False]:
stats_kdf = kdf.groupby(10, as_index=as_index).agg({20: "min", 30: "sum"})
stats_pdf = pdf.groupby(10, as_index=as_index).agg({20: "min", 30: "sum"})
self.assert_eq(
stats_kdf.sort_values(by=[20, 30]).reset_index(drop=True),
stats_pdf.sort_values(by=[20, 30]).reset_index(drop=True),
)
stats_kdf = kdf.groupby(10).agg({20: ["min", "max"], 30: "sum"})
stats_pdf = pdf.groupby(10).agg({20: ["min", "max"], 30: "sum"})
self.assert_eq(
stats_kdf.sort_values(by=[(20, "min"), (20, "max"), (30, "sum")]).reset_index(
drop=True
),
stats_pdf.sort_values(by=[(20, "min"), (20, "max"), (30, "sum")]).reset_index(
drop=True
),
)
def test_aggregate_func_str_list(self):
# this is test for cases where only string or list is assigned
pdf = pd.DataFrame(
{
"kind": ["cat", "dog", "cat", "dog"],
"height": [9.1, 6.0, 9.5, 34.0],
"weight": [7.9, 7.5, 9.9, 198.0],
}
)
kdf = ps.from_pandas(pdf)
agg_funcs = ["max", "min", ["min", "max"]]
for aggfunc in agg_funcs:
# Since in Koalas groupby, the order of rows might be different
# so sort on index to ensure they have same output
sorted_agg_kdf = kdf.groupby("kind").agg(aggfunc).sort_index()
sorted_agg_pdf = pdf.groupby("kind").agg(aggfunc).sort_index()
self.assert_eq(sorted_agg_kdf, sorted_agg_pdf)
# test on multi index column case
pdf = pd.DataFrame(
{"A": [1, 1, 2, 2], "B": [1, 2, 3, 4], "C": [0.362, 0.227, 1.267, -0.562]}
)
kdf = ps.from_pandas(pdf)
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
kdf.columns = columns
for aggfunc in agg_funcs:
sorted_agg_kdf = kdf.groupby(("X", "A")).agg(aggfunc).sort_index()
sorted_agg_pdf = pdf.groupby(("X", "A")).agg(aggfunc).sort_index()
self.assert_eq(sorted_agg_kdf, sorted_agg_pdf)
@unittest.skipIf(pd.__version__ < "0.25.0", "not supported before pandas 0.25.0")
def test_aggregate_relabel(self):
# this is to test named aggregation in groupby
pdf = pd.DataFrame({"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]})
kdf = ps.from_pandas(pdf)
# different agg column, same function
agg_pdf = pdf.groupby("group").agg(a_max=("A", "max"), b_max=("B", "max")).sort_index()
agg_kdf = kdf.groupby("group").agg(a_max=("A", "max"), b_max=("B", "max")).sort_index()
self.assert_eq(agg_pdf, agg_kdf)
# same agg column, different functions
agg_pdf = pdf.groupby("group").agg(b_max=("B", "max"), b_min=("B", "min")).sort_index()
agg_kdf = kdf.groupby("group").agg(b_max=("B", "max"), b_min=("B", "min")).sort_index()
self.assert_eq(agg_pdf, agg_kdf)
# test on NamedAgg
agg_pdf = (
pdf.groupby("group").agg(b_max=pd.NamedAgg(column="B", aggfunc="max")).sort_index()
)
agg_kdf = (
kdf.groupby("group").agg(b_max=ps.NamedAgg(column="B", aggfunc="max")).sort_index()
)
self.assert_eq(agg_kdf, agg_pdf)
# test on NamedAgg multi columns aggregation
agg_pdf = (
pdf.groupby("group")
.agg(
b_max=pd.NamedAgg(column="B", aggfunc="max"),
b_min=pd.NamedAgg(column="B", aggfunc="min"),
)
.sort_index()
)
agg_kdf = (
kdf.groupby("group")
.agg(
b_max=ps.NamedAgg(column="B", aggfunc="max"),
b_min=ps.NamedAgg(column="B", aggfunc="min"),
)
.sort_index()
)
self.assert_eq(agg_kdf, agg_pdf)
def test_dropna(self):
pdf = pd.DataFrame(
{"A": [None, 1, None, 1, 2], "B": [1, 2, 3, None, None], "C": [4, 5, 6, 7, None]}
)
kdf = ps.from_pandas(pdf)
# pd.DataFrame.groupby with dropna parameter is implemented since pandas 1.1.0
if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"):
for dropna in [True, False]:
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values("A").reset_index(drop=True)
self.assert_eq(
sort(kdf.groupby("A", as_index=as_index, dropna=dropna).std()),
sort(pdf.groupby("A", as_index=as_index, dropna=dropna).std()),
)
self.assert_eq(
sort(kdf.groupby("A", as_index=as_index, dropna=dropna).B.std()),
sort(pdf.groupby("A", as_index=as_index, dropna=dropna).B.std()),
)
self.assert_eq(
sort(kdf.groupby("A", as_index=as_index, dropna=dropna)["B"].std()),
sort(pdf.groupby("A", as_index=as_index, dropna=dropna)["B"].std()),
)
self.assert_eq(
sort(
kdf.groupby("A", as_index=as_index, dropna=dropna).agg(
{"B": "min", "C": "std"}
)
),
sort(
pdf.groupby("A", as_index=as_index, dropna=dropna).agg(
{"B": "min", "C": "std"}
)
),
)
for dropna in [True, False]:
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(["A", "B"]).reset_index(drop=True)
self.assert_eq(
sort(
kdf.groupby(["A", "B"], as_index=as_index, dropna=dropna).agg(
{"C": ["min", "std"]}
)
),
sort(
pdf.groupby(["A", "B"], as_index=as_index, dropna=dropna).agg(
{"C": ["min", "std"]}
)
),
almost=True,
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
kdf.columns = columns
for dropna in [True, False]:
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(("X", "A")).reset_index(drop=True)
sorted_stats_kdf = sort(
kdf.groupby(("X", "A"), as_index=as_index, dropna=dropna).agg(
{("X", "B"): "min", ("Y", "C"): "std"}
)
)
sorted_stats_pdf = sort(
pdf.groupby(("X", "A"), as_index=as_index, dropna=dropna).agg(
{("X", "B"): "min", ("Y", "C"): "std"}
)
)
self.assert_eq(sorted_stats_kdf, sorted_stats_pdf)
else:
# Testing dropna=True (pandas default behavior)
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values("A").reset_index(drop=True)
self.assert_eq(
sort(kdf.groupby("A", as_index=as_index, dropna=True)["B"].min()),
sort(pdf.groupby("A", as_index=as_index)["B"].min()),
)
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(["A", "B"]).reset_index(drop=True)
self.assert_eq(
sort(
kdf.groupby(["A", "B"], as_index=as_index, dropna=True).agg(
{"C": ["min", "std"]}
)
),
sort(pdf.groupby(["A", "B"], as_index=as_index).agg({"C": ["min", "std"]})),
almost=True,
)
# Testing dropna=False
index = pd.Index([1.0, 2.0, np.nan], name="A")
expected = pd.Series([2.0, np.nan, 1.0], index=index, name="B")
result = kdf.groupby("A", as_index=True, dropna=False)["B"].min().sort_index()
self.assert_eq(expected, result)
expected = pd.DataFrame({"A": [1.0, 2.0, np.nan], "B": [2.0, np.nan, 1.0]})
result = (
kdf.groupby("A", as_index=False, dropna=False)["B"]
.min()
.sort_values("A")
.reset_index(drop=True)
)
self.assert_eq(expected, result)
index = pd.MultiIndex.from_tuples(
[(1.0, 2.0), (1.0, None), (2.0, None), (None, 1.0), (None, 3.0)], names=["A", "B"]
)
expected = pd.DataFrame(
{
("C", "min"): [5.0, 7.0, np.nan, 4.0, 6.0],
("C", "std"): [np.nan, np.nan, np.nan, np.nan, np.nan],
},
index=index,
)
result = (
kdf.groupby(["A", "B"], as_index=True, dropna=False)
.agg({"C": ["min", "std"]})
.sort_index()
)
self.assert_eq(expected, result)
expected = pd.DataFrame(
{
("A", ""): [1.0, 1.0, 2.0, np.nan, np.nan],
("B", ""): [2.0, np.nan, np.nan, 1.0, 3.0],
("C", "min"): [5.0, 7.0, np.nan, 4.0, 6.0],
("C", "std"): [np.nan, np.nan, np.nan, np.nan, np.nan],
}
)
result = (
kdf.groupby(["A", "B"], as_index=False, dropna=False)
.agg({"C": ["min", "std"]})
.sort_values(["A", "B"])
.reset_index(drop=True)
)
self.assert_eq(expected, result)
def test_describe(self):
# support for numeric type, not support for string type yet
datas = []
datas.append({"a": [1, 1, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
datas.append({"a": [-1, -1, -3], "b": [-4, -5, -6], "c": [-7, -8, -9]})
datas.append({"a": [0, 0, 0], "b": [0, 0, 0], "c": [0, 8, 0]})
# it is okay if string type column as a group key
datas.append({"a": ["a", "a", "c"], "b": [4, 5, 6], "c": [7, 8, 9]})
percentiles = [0.25, 0.5, 0.75]
formatted_percentiles = ["25%", "50%", "75%"]
non_percentile_stats = ["count", "mean", "std", "min", "max"]
for data in datas:
pdf = pd.DataFrame(data)
kdf = ps.from_pandas(pdf)
describe_pdf = pdf.groupby("a").describe().sort_index()
describe_kdf = kdf.groupby("a").describe().sort_index()
# since the result of percentile columns are slightly difference from pandas,
# we should check them separately: non-percentile columns & percentile columns
# 1. Check that non-percentile columns are equal.
agg_cols = [col.name for col in kdf.groupby("a")._agg_columns]
self.assert_eq(
describe_kdf.drop(list(product(agg_cols, formatted_percentiles))),
describe_pdf.drop(columns=formatted_percentiles, level=1),
check_exact=False,
)
# 2. Check that percentile columns are equal.
# The interpolation argument is yet to be implemented in Koalas.
quantile_pdf = pdf.groupby("a").quantile(percentiles, interpolation="nearest")
quantile_pdf = quantile_pdf.unstack(level=1).astype(float)
self.assert_eq(
describe_kdf.drop(list(product(agg_cols, non_percentile_stats))),
quantile_pdf.rename(columns="{:.0%}".format, level=1),
)
# not support for string type yet
datas = []
datas.append({"a": ["a", "a", "c"], "b": ["d", "e", "f"], "c": ["g", "h", "i"]})
datas.append({"a": ["a", "a", "c"], "b": [4, 0, 1], "c": ["g", "h", "i"]})
for data in datas:
pdf = pd.DataFrame(data)
kdf = ps.from_pandas(pdf)
self.assertRaises(NotImplementedError, lambda: kdf.groupby("a").describe().sort_index())
# multi-index columns
pdf = pd.DataFrame({("x", "a"): [1, 1, 3], ("x", "b"): [4, 5, 6], ("y", "c"): [7, 8, 9]})
kdf = ps.from_pandas(pdf)
describe_pdf = pdf.groupby(("x", "a")).describe().sort_index()
describe_kdf = kdf.groupby(("x", "a")).describe().sort_index()
# 1. Check that non-percentile columns are equal.
agg_column_labels = [col._column_label for col in kdf.groupby(("x", "a"))._agg_columns]
self.assert_eq(
describe_kdf.drop(
[
tuple(list(label) + [s])
for label, s in product(agg_column_labels, formatted_percentiles)
]
),
describe_pdf.drop(columns=formatted_percentiles, level=2),
check_exact=False,
)
# 2. Check that percentile columns are equal.
# The interpolation argument is yet to be implemented in Koalas.
quantile_pdf = pdf.groupby(("x", "a")).quantile(percentiles, interpolation="nearest")
quantile_pdf = quantile_pdf.unstack(level=1).astype(float)
self.assert_eq(
describe_kdf.drop(
[
tuple(list(label) + [s])
for label, s in product(agg_column_labels, non_percentile_stats)
]
),
quantile_pdf.rename(columns="{:.0%}".format, level=2),
)
def test_aggregate_relabel_multiindex(self):
pdf = pd.DataFrame({"A": [0, 1, 2, 3], "B": [5, 6, 7, 8], "group": ["a", "a", "b", "b"]})
pdf.columns = pd.MultiIndex.from_tuples([("y", "A"), ("y", "B"), ("x", "group")])
kdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
agg_pdf = pd.DataFrame(
{"a_max": [1, 3]}, index=pd.Index(["a", "b"], name=("x", "group"))
)
elif LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
agg_pdf = pdf.groupby(("x", "group")).agg(a_max=(("y", "A"), "max")).sort_index()
agg_kdf = kdf.groupby(("x", "group")).agg(a_max=(("y", "A"), "max")).sort_index()
self.assert_eq(agg_pdf, agg_kdf)
# same column, different methods
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
agg_pdf = pd.DataFrame(
{"a_max": [1, 3], "a_min": [0, 2]}, index=pd.Index(["a", "b"], name=("x", "group"))
)
elif LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
agg_pdf = (
pdf.groupby(("x", "group"))
.agg(a_max=(("y", "A"), "max"), a_min=(("y", "A"), "min"))
.sort_index()
)
agg_kdf = (
kdf.groupby(("x", "group"))
.agg(a_max=(("y", "A"), "max"), a_min=(("y", "A"), "min"))
.sort_index()
)
self.assert_eq(agg_pdf, agg_kdf)
# different column, different methods
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
agg_pdf = pd.DataFrame(
{"a_max": [6, 8], "a_min": [0, 2]}, index=pd.Index(["a", "b"], name=("x", "group"))
)
elif LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
agg_pdf = (
pdf.groupby(("x", "group"))
.agg(a_max=(("y", "B"), "max"), a_min=(("y", "A"), "min"))
.sort_index()
)
agg_kdf = (
kdf.groupby(("x", "group"))
.agg(a_max=(("y", "B"), "max"), a_min=(("y", "A"), "min"))
.sort_index()
)
self.assert_eq(agg_pdf, agg_kdf)
def test_all_any(self):
pdf = pd.DataFrame(
{
"A": [1, 1, 2, 2, 3, 3, 4, 4, 5, 5],
"B": [True, True, True, False, False, False, None, True, None, False],
}
)
kdf = ps.from_pandas(pdf)
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values("A").reset_index(drop=True)
self.assert_eq(
sort(kdf.groupby("A", as_index=as_index).all()),
sort(pdf.groupby("A", as_index=as_index).all()),
)
self.assert_eq(
sort(kdf.groupby("A", as_index=as_index).any()),
sort(pdf.groupby("A", as_index=as_index).any()),
)
self.assert_eq(
sort(kdf.groupby("A", as_index=as_index).all()).B,
sort(pdf.groupby("A", as_index=as_index).all()).B,
)
self.assert_eq(
sort(kdf.groupby("A", as_index=as_index).any()).B,
sort(pdf.groupby("A", as_index=as_index).any()).B,
)
self.assert_eq(
kdf.B.groupby(kdf.A).all().sort_index(), pdf.B.groupby(pdf.A).all().sort_index()
)
self.assert_eq(
kdf.B.groupby(kdf.A).any().sort_index(), pdf.B.groupby(pdf.A).any().sort_index()
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("Y", "B")])
pdf.columns = columns
kdf.columns = columns
for as_index in [True, False]:
if as_index:
sort = lambda df: df.sort_index()
else:
sort = lambda df: df.sort_values(("X", "A")).reset_index(drop=True)
self.assert_eq(
sort(kdf.groupby(("X", "A"), as_index=as_index).all()),
sort(pdf.groupby(("X", "A"), as_index=as_index).all()),
)
self.assert_eq(
sort(kdf.groupby(("X", "A"), as_index=as_index).any()),
sort(pdf.groupby(("X", "A"), as_index=as_index).any()),
)
def test_raises(self):
kdf = ps.DataFrame(
{"a": [1, 2, 6, 4, 4, 6, 4, 3, 7], "b": [4, 2, 7, 3, 3, 1, 1, 1, 2]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
# test raises with incorrect key
self.assertRaises(ValueError, lambda: kdf.groupby([]))
self.assertRaises(KeyError, lambda: kdf.groupby("x"))
self.assertRaises(KeyError, lambda: kdf.groupby(["a", "x"]))
self.assertRaises(KeyError, lambda: kdf.groupby("a")["x"])
self.assertRaises(KeyError, lambda: kdf.groupby("a")["b", "x"])
self.assertRaises(KeyError, lambda: kdf.groupby("a")[["b", "x"]])
def test_nunique(self):
pdf = pd.DataFrame(
{"a": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0], "b": [2, 2, 2, 3, 3, 4, 4, 5, 5, 5]}
)
kdf = ps.from_pandas(pdf)
self.assert_eq(
kdf.groupby("a").agg({"b": "nunique"}).sort_index(),
pdf.groupby("a").agg({"b": "nunique"}).sort_index(),
)
if LooseVersion(pd.__version__) < LooseVersion("1.1.0"):
expected = ps.DataFrame({"b": [2, 2]}, index=pd.Index([0, 1], name="a"))
self.assert_eq(kdf.groupby("a").nunique().sort_index(), expected)
self.assert_eq(
kdf.groupby("a").nunique(dropna=False).sort_index(), expected,
)
else:
self.assert_eq(
kdf.groupby("a").nunique().sort_index(), pdf.groupby("a").nunique().sort_index()
)
self.assert_eq(
kdf.groupby("a").nunique(dropna=False).sort_index(),
pdf.groupby("a").nunique(dropna=False).sort_index(),
)
self.assert_eq(
kdf.groupby("a")["b"].nunique().sort_index(),
pdf.groupby("a")["b"].nunique().sort_index(),
)
self.assert_eq(
kdf.groupby("a")["b"].nunique(dropna=False).sort_index(),
pdf.groupby("a")["b"].nunique(dropna=False).sort_index(),
)
nunique_kdf = kdf.groupby("a", as_index=False).agg({"b": "nunique"})
nunique_pdf = pdf.groupby("a", as_index=False).agg({"b": "nunique"})
self.assert_eq(
nunique_kdf.sort_values(["a", "b"]).reset_index(drop=True),
nunique_pdf.sort_values(["a", "b"]).reset_index(drop=True),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
kdf.columns = columns
if LooseVersion(pd.__version__) < LooseVersion("1.1.0"):
expected = ps.DataFrame({("y", "b"): [2, 2]}, index=pd.Index([0, 1], name=("x", "a")))
self.assert_eq(
kdf.groupby(("x", "a")).nunique().sort_index(), expected,
)
self.assert_eq(
kdf.groupby(("x", "a")).nunique(dropna=False).sort_index(), expected,
)
else:
self.assert_eq(
kdf.groupby(("x", "a")).nunique().sort_index(),
pdf.groupby(("x", "a")).nunique().sort_index(),
)
self.assert_eq(
kdf.groupby(("x", "a")).nunique(dropna=False).sort_index(),
pdf.groupby(("x", "a")).nunique(dropna=False).sort_index(),
)
def test_unique(self):
for pdf in [
pd.DataFrame(
{"a": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0], "b": [2, 2, 2, 3, 3, 4, 4, 5, 5, 5]}
),
pd.DataFrame(
{
"a": [1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
"b": ["w", "w", "w", "x", "x", "y", "y", "z", "z", "z"],
}
),
]:
with self.subTest(pdf=pdf):
kdf = ps.from_pandas(pdf)
actual = kdf.groupby("a")["b"].unique().sort_index().to_pandas()
expect = pdf.groupby("a")["b"].unique().sort_index()
self.assert_eq(len(actual), len(expect))
for act, exp in zip(actual, expect):
self.assertTrue(sorted(act) == sorted(exp))
def test_value_counts(self):
pdf = pd.DataFrame({"A": [1, 2, 2, 3, 3, 3], "B": [1, 1, 2, 3, 3, 3]}, columns=["A", "B"])
kdf = ps.from_pandas(pdf)
self.assert_eq(
kdf.groupby("A")["B"].value_counts().sort_index(),
pdf.groupby("A")["B"].value_counts().sort_index(),
)
self.assert_eq(
kdf.groupby("A")["B"].value_counts(sort=True, ascending=False).sort_index(),
pdf.groupby("A")["B"].value_counts(sort=True, ascending=False).sort_index(),
)
self.assert_eq(
kdf.groupby("A")["B"].value_counts(sort=True, ascending=True).sort_index(),
pdf.groupby("A")["B"].value_counts(sort=True, ascending=True).sort_index(),
)
self.assert_eq(
kdf.B.rename().groupby(kdf.A).value_counts().sort_index(),
pdf.B.rename().groupby(pdf.A).value_counts().sort_index(),
)
self.assert_eq(
kdf.B.groupby(kdf.A.rename()).value_counts().sort_index(),
pdf.B.groupby(pdf.A.rename()).value_counts().sort_index(),
)
self.assert_eq(
kdf.B.rename().groupby(kdf.A.rename()).value_counts().sort_index(),
pdf.B.rename().groupby(pdf.A.rename()).value_counts().sort_index(),
)
def test_size(self):
pdf = pd.DataFrame({"A": [1, 2, 2, 3, 3, 3], "B": [1, 1, 2, 3, 3, 3]})
kdf = ps.from_pandas(pdf)
self.assert_eq(kdf.groupby("A").size().sort_index(), pdf.groupby("A").size().sort_index())
self.assert_eq(
kdf.groupby("A")["B"].size().sort_index(), pdf.groupby("A")["B"].size().sort_index()
)
self.assert_eq(
kdf.groupby("A")[["B"]].size().sort_index(), pdf.groupby("A")[["B"]].size().sort_index()
)
self.assert_eq(
kdf.groupby(["A", "B"]).size().sort_index(), pdf.groupby(["A", "B"]).size().sort_index()
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("Y", "B")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("X", "A")).size().sort_index(), pdf.groupby(("X", "A")).size().sort_index()
)
self.assert_eq(
kdf.groupby([("X", "A"), ("Y", "B")]).size().sort_index(),
pdf.groupby([("X", "A"), ("Y", "B")]).size().sort_index(),
)
def test_diff(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
}
)
kdf = ps.from_pandas(pdf)
self.assert_eq(kdf.groupby("b").diff().sort_index(), pdf.groupby("b").diff().sort_index())
self.assert_eq(
kdf.groupby(["a", "b"]).diff().sort_index(), pdf.groupby(["a", "b"]).diff().sort_index()
)
self.assert_eq(
kdf.groupby(["b"])["a"].diff().sort_index(), pdf.groupby(["b"])["a"].diff().sort_index()
)
self.assert_eq(
kdf.groupby(["b"])[["a", "b"]].diff().sort_index(),
pdf.groupby(["b"])[["a", "b"]].diff().sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5).diff().sort_index(), pdf.groupby(pdf.b // 5).diff().sort_index()
)
self.assert_eq(
kdf.groupby(kdf.b // 5)["a"].diff().sort_index(),
pdf.groupby(pdf.b // 5)["a"].diff().sort_index(),
)
self.assert_eq(kdf.groupby("b").diff().sum(), pdf.groupby("b").diff().sum().astype(int))
self.assert_eq(kdf.groupby(["b"])["a"].diff().sum(), pdf.groupby(["b"])["a"].diff().sum())
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("x", "b")).diff().sort_index(), pdf.groupby(("x", "b")).diff().sort_index()
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")]).diff().sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).diff().sort_index(),
)
def test_rank(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
kdf = ps.from_pandas(pdf)
self.assert_eq(kdf.groupby("b").rank().sort_index(), pdf.groupby("b").rank().sort_index())
self.assert_eq(
kdf.groupby(["a", "b"]).rank().sort_index(), pdf.groupby(["a", "b"]).rank().sort_index()
)
self.assert_eq(
kdf.groupby(["b"])["a"].rank().sort_index(), pdf.groupby(["b"])["a"].rank().sort_index()
)
self.assert_eq(
kdf.groupby(["b"])[["a", "c"]].rank().sort_index(),
pdf.groupby(["b"])[["a", "c"]].rank().sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5).rank().sort_index(), pdf.groupby(pdf.b // 5).rank().sort_index()
)
self.assert_eq(
kdf.groupby(kdf.b // 5)["a"].rank().sort_index(),
pdf.groupby(pdf.b // 5)["a"].rank().sort_index(),
)
self.assert_eq(kdf.groupby("b").rank().sum(), pdf.groupby("b").rank().sum())
self.assert_eq(kdf.groupby(["b"])["a"].rank().sum(), pdf.groupby(["b"])["a"].rank().sum())
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("x", "b")).rank().sort_index(), pdf.groupby(("x", "b")).rank().sort_index()
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")]).rank().sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).rank().sort_index(),
)
def test_cumcount(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
kdf = ps.from_pandas(pdf)
for ascending in [True, False]:
self.assert_eq(
kdf.groupby("b").cumcount(ascending=ascending).sort_index(),
pdf.groupby("b").cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
kdf.groupby(["a", "b"]).cumcount(ascending=ascending).sort_index(),
pdf.groupby(["a", "b"]).cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])["a"].cumcount(ascending=ascending).sort_index(),
pdf.groupby(["b"])["a"].cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])[["a", "c"]].cumcount(ascending=ascending).sort_index(),
pdf.groupby(["b"])[["a", "c"]].cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5).cumcount(ascending=ascending).sort_index(),
pdf.groupby(pdf.b // 5).cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5)["a"].cumcount(ascending=ascending).sort_index(),
pdf.groupby(pdf.b // 5)["a"].cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
kdf.groupby("b").cumcount(ascending=ascending).sum(),
pdf.groupby("b").cumcount(ascending=ascending).sum(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b).cumcount(ascending=ascending).sort_index(),
pdf.a.rename().groupby(pdf.b).cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
kdf.a.groupby(kdf.b.rename()).cumcount(ascending=ascending).sort_index(),
pdf.a.groupby(pdf.b.rename()).cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b.rename()).cumcount(ascending=ascending).sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).cumcount(ascending=ascending).sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
for ascending in [True, False]:
self.assert_eq(
kdf.groupby(("x", "b")).cumcount(ascending=ascending).sort_index(),
pdf.groupby(("x", "b")).cumcount(ascending=ascending).sort_index(),
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")]).cumcount(ascending=ascending).sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).cumcount(ascending=ascending).sort_index(),
)
def test_cummin(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
kdf = ps.from_pandas(pdf)
self.assert_eq(
kdf.groupby("b").cummin().sort_index(), pdf.groupby("b").cummin().sort_index()
)
self.assert_eq(
kdf.groupby(["a", "b"]).cummin().sort_index(),
pdf.groupby(["a", "b"]).cummin().sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])["a"].cummin().sort_index(),
pdf.groupby(["b"])["a"].cummin().sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])[["a", "c"]].cummin().sort_index(),
pdf.groupby(["b"])[["a", "c"]].cummin().sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5).cummin().sort_index(),
pdf.groupby(pdf.b // 5).cummin().sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5)["a"].cummin().sort_index(),
pdf.groupby(pdf.b // 5)["a"].cummin().sort_index(),
)
self.assert_eq(
kdf.groupby("b").cummin().sum().sort_index(),
pdf.groupby("b").cummin().sum().sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b).cummin().sort_index(),
pdf.a.rename().groupby(pdf.b).cummin().sort_index(),
)
self.assert_eq(
kdf.a.groupby(kdf.b.rename()).cummin().sort_index(),
pdf.a.groupby(pdf.b.rename()).cummin().sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b.rename()).cummin().sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).cummin().sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("x", "b")).cummin().sort_index(),
pdf.groupby(("x", "b")).cummin().sort_index(),
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")]).cummin().sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).cummin().sort_index(),
)
kdf = ps.DataFrame([["a"], ["b"], ["c"]], columns=["A"])
self.assertRaises(DataError, lambda: kdf.groupby(["A"]).cummin())
kdf = ps.DataFrame([[1, "a"], [2, "b"], [3, "c"]], columns=["A", "B"])
self.assertRaises(DataError, lambda: kdf.groupby(["A"])["B"].cummin())
def test_cummax(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
kdf = ps.from_pandas(pdf)
self.assert_eq(
kdf.groupby("b").cummax().sort_index(), pdf.groupby("b").cummax().sort_index()
)
self.assert_eq(
kdf.groupby(["a", "b"]).cummax().sort_index(),
pdf.groupby(["a", "b"]).cummax().sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])["a"].cummax().sort_index(),
pdf.groupby(["b"])["a"].cummax().sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])[["a", "c"]].cummax().sort_index(),
pdf.groupby(["b"])[["a", "c"]].cummax().sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5).cummax().sort_index(),
pdf.groupby(pdf.b // 5).cummax().sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5)["a"].cummax().sort_index(),
pdf.groupby(pdf.b // 5)["a"].cummax().sort_index(),
)
self.assert_eq(
kdf.groupby("b").cummax().sum().sort_index(),
pdf.groupby("b").cummax().sum().sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b).cummax().sort_index(),
pdf.a.rename().groupby(pdf.b).cummax().sort_index(),
)
self.assert_eq(
kdf.a.groupby(kdf.b.rename()).cummax().sort_index(),
pdf.a.groupby(pdf.b.rename()).cummax().sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b.rename()).cummax().sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).cummax().sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("x", "b")).cummax().sort_index(),
pdf.groupby(("x", "b")).cummax().sort_index(),
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")]).cummax().sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).cummax().sort_index(),
)
kdf = ps.DataFrame([["a"], ["b"], ["c"]], columns=["A"])
self.assertRaises(DataError, lambda: kdf.groupby(["A"]).cummax())
kdf = ps.DataFrame([[1, "a"], [2, "b"], [3, "c"]], columns=["A", "B"])
self.assertRaises(DataError, lambda: kdf.groupby(["A"])["B"].cummax())
def test_cumsum(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
kdf = ps.from_pandas(pdf)
self.assert_eq(
kdf.groupby("b").cumsum().sort_index(), pdf.groupby("b").cumsum().sort_index()
)
self.assert_eq(
kdf.groupby(["a", "b"]).cumsum().sort_index(),
pdf.groupby(["a", "b"]).cumsum().sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])["a"].cumsum().sort_index(),
pdf.groupby(["b"])["a"].cumsum().sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])[["a", "c"]].cumsum().sort_index(),
pdf.groupby(["b"])[["a", "c"]].cumsum().sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5).cumsum().sort_index(),
pdf.groupby(pdf.b // 5).cumsum().sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5)["a"].cumsum().sort_index(),
pdf.groupby(pdf.b // 5)["a"].cumsum().sort_index(),
)
self.assert_eq(
kdf.groupby("b").cumsum().sum().sort_index(),
pdf.groupby("b").cumsum().sum().sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b).cumsum().sort_index(),
pdf.a.rename().groupby(pdf.b).cumsum().sort_index(),
)
self.assert_eq(
kdf.a.groupby(kdf.b.rename()).cumsum().sort_index(),
pdf.a.groupby(pdf.b.rename()).cumsum().sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b.rename()).cumsum().sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).cumsum().sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("x", "b")).cumsum().sort_index(),
pdf.groupby(("x", "b")).cumsum().sort_index(),
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")]).cumsum().sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).cumsum().sort_index(),
)
kdf = ps.DataFrame([["a"], ["b"], ["c"]], columns=["A"])
self.assertRaises(DataError, lambda: kdf.groupby(["A"]).cumsum())
kdf = ps.DataFrame([[1, "a"], [2, "b"], [3, "c"]], columns=["A", "B"])
self.assertRaises(DataError, lambda: kdf.groupby(["A"])["B"].cumsum())
def test_cumprod(self):
pdf = pd.DataFrame(
{
"a": [1, 2, -3, 4, -5, 6] * 3,
"b": [1, 1, 2, 3, 5, 8] * 3,
"c": [1, 0, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
kdf = ps.from_pandas(pdf)
self.assert_eq(
kdf.groupby("b").cumprod().sort_index(),
pdf.groupby("b").cumprod().sort_index(),
check_exact=False,
)
self.assert_eq(
kdf.groupby(["a", "b"]).cumprod().sort_index(),
pdf.groupby(["a", "b"]).cumprod().sort_index(),
check_exact=False,
)
self.assert_eq(
kdf.groupby(["b"])["a"].cumprod().sort_index(),
pdf.groupby(["b"])["a"].cumprod().sort_index(),
check_exact=False,
)
self.assert_eq(
kdf.groupby(["b"])[["a", "c"]].cumprod().sort_index(),
pdf.groupby(["b"])[["a", "c"]].cumprod().sort_index(),
check_exact=False,
)
self.assert_eq(
kdf.groupby(kdf.b // 3).cumprod().sort_index(),
pdf.groupby(pdf.b // 3).cumprod().sort_index(),
check_exact=False,
)
self.assert_eq(
kdf.groupby(kdf.b // 3)["a"].cumprod().sort_index(),
pdf.groupby(pdf.b // 3)["a"].cumprod().sort_index(),
check_exact=False,
)
self.assert_eq(
kdf.groupby("b").cumprod().sum().sort_index(),
pdf.groupby("b").cumprod().sum().sort_index(),
check_exact=False,
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b).cumprod().sort_index(),
pdf.a.rename().groupby(pdf.b).cumprod().sort_index(),
check_exact=False,
)
self.assert_eq(
kdf.a.groupby(kdf.b.rename()).cumprod().sort_index(),
pdf.a.groupby(pdf.b.rename()).cumprod().sort_index(),
check_exact=False,
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b.rename()).cumprod().sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).cumprod().sort_index(),
check_exact=False,
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("x", "b")).cumprod().sort_index(),
pdf.groupby(("x", "b")).cumprod().sort_index(),
check_exact=False,
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")]).cumprod().sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).cumprod().sort_index(),
check_exact=False,
)
kdf = ps.DataFrame([["a"], ["b"], ["c"]], columns=["A"])
self.assertRaises(DataError, lambda: kdf.groupby(["A"]).cumprod())
kdf = ps.DataFrame([[1, "a"], [2, "b"], [3, "c"]], columns=["A", "B"])
self.assertRaises(DataError, lambda: kdf.groupby(["A"])["B"].cumprod())
def test_nsmallest(self):
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 2, 2, 2, 3, 3, 3] * 3,
"b": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,
"c": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,
"d": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,
},
index=np.random.rand(9 * 3),
)
kdf = ps.from_pandas(pdf)
self.assert_eq(
kdf.groupby(["a"])["b"].nsmallest(1).sort_values(),
pdf.groupby(["a"])["b"].nsmallest(1).sort_values(),
)
self.assert_eq(
kdf.groupby(["a"])["b"].nsmallest(2).sort_index(),
pdf.groupby(["a"])["b"].nsmallest(2).sort_index(),
)
self.assert_eq(
(kdf.b * 10).groupby(kdf.a).nsmallest(2).sort_index(),
(pdf.b * 10).groupby(pdf.a).nsmallest(2).sort_index(),
)
self.assert_eq(
kdf.b.rename().groupby(kdf.a).nsmallest(2).sort_index(),
pdf.b.rename().groupby(pdf.a).nsmallest(2).sort_index(),
)
self.assert_eq(
kdf.b.groupby(kdf.a.rename()).nsmallest(2).sort_index(),
pdf.b.groupby(pdf.a.rename()).nsmallest(2).sort_index(),
)
self.assert_eq(
kdf.b.rename().groupby(kdf.a.rename()).nsmallest(2).sort_index(),
pdf.b.rename().groupby(pdf.a.rename()).nsmallest(2).sort_index(),
)
with self.assertRaisesRegex(ValueError, "nsmallest do not support multi-index now"):
kdf.set_index(["a", "b"]).groupby(["c"])["d"].nsmallest(1)
def test_nlargest(self):
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 2, 2, 2, 3, 3, 3] * 3,
"b": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,
"c": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,
"d": [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3,
},
index=np.random.rand(9 * 3),
)
kdf = ps.from_pandas(pdf)
self.assert_eq(
kdf.groupby(["a"])["b"].nlargest(1).sort_values(),
pdf.groupby(["a"])["b"].nlargest(1).sort_values(),
)
self.assert_eq(
kdf.groupby(["a"])["b"].nlargest(2).sort_index(),
pdf.groupby(["a"])["b"].nlargest(2).sort_index(),
)
self.assert_eq(
(kdf.b * 10).groupby(kdf.a).nlargest(2).sort_index(),
(pdf.b * 10).groupby(pdf.a).nlargest(2).sort_index(),
)
self.assert_eq(
kdf.b.rename().groupby(kdf.a).nlargest(2).sort_index(),
pdf.b.rename().groupby(pdf.a).nlargest(2).sort_index(),
)
self.assert_eq(
kdf.b.groupby(kdf.a.rename()).nlargest(2).sort_index(),
pdf.b.groupby(pdf.a.rename()).nlargest(2).sort_index(),
)
self.assert_eq(
kdf.b.rename().groupby(kdf.a.rename()).nlargest(2).sort_index(),
pdf.b.rename().groupby(pdf.a.rename()).nlargest(2).sort_index(),
)
with self.assertRaisesRegex(ValueError, "nlargest do not support multi-index now"):
kdf.set_index(["a", "b"]).groupby(["c"])["d"].nlargest(1)
def test_fillna(self):
pdf = pd.DataFrame(
{
"A": [1, 1, 2, 2] * 3,
"B": [2, 4, None, 3] * 3,
"C": [None, None, None, 1] * 3,
"D": [0, 1, 5, 4] * 3,
}
)
kdf = ps.from_pandas(pdf)
self.assert_eq(
kdf.groupby("A").fillna(0).sort_index(), pdf.groupby("A").fillna(0).sort_index()
)
self.assert_eq(
kdf.groupby("A")["C"].fillna(0).sort_index(),
pdf.groupby("A")["C"].fillna(0).sort_index(),
)
self.assert_eq(
kdf.groupby("A")[["C"]].fillna(0).sort_index(),
pdf.groupby("A")[["C"]].fillna(0).sort_index(),
)
self.assert_eq(
kdf.groupby("A").fillna(method="bfill").sort_index(),
pdf.groupby("A").fillna(method="bfill").sort_index(),
)
self.assert_eq(
kdf.groupby("A")["C"].fillna(method="bfill").sort_index(),
pdf.groupby("A")["C"].fillna(method="bfill").sort_index(),
)
self.assert_eq(
kdf.groupby("A")[["C"]].fillna(method="bfill").sort_index(),
pdf.groupby("A")[["C"]].fillna(method="bfill").sort_index(),
)
self.assert_eq(
kdf.groupby("A").fillna(method="ffill").sort_index(),
pdf.groupby("A").fillna(method="ffill").sort_index(),
)
self.assert_eq(
kdf.groupby("A")["C"].fillna(method="ffill").sort_index(),
pdf.groupby("A")["C"].fillna(method="ffill").sort_index(),
)
self.assert_eq(
kdf.groupby("A")[["C"]].fillna(method="ffill").sort_index(),
pdf.groupby("A")[["C"]].fillna(method="ffill").sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.A // 5).fillna(method="bfill").sort_index(),
pdf.groupby(pdf.A // 5).fillna(method="bfill").sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.A // 5)["C"].fillna(method="bfill").sort_index(),
pdf.groupby(pdf.A // 5)["C"].fillna(method="bfill").sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.A // 5)[["C"]].fillna(method="bfill").sort_index(),
pdf.groupby(pdf.A // 5)[["C"]].fillna(method="bfill").sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.A // 5).fillna(method="ffill").sort_index(),
pdf.groupby(pdf.A // 5).fillna(method="ffill").sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.A // 5)["C"].fillna(method="ffill").sort_index(),
pdf.groupby(pdf.A // 5)["C"].fillna(method="ffill").sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.A // 5)[["C"]].fillna(method="ffill").sort_index(),
pdf.groupby(pdf.A // 5)[["C"]].fillna(method="ffill").sort_index(),
)
self.assert_eq(
kdf.C.rename().groupby(kdf.A).fillna(0).sort_index(),
pdf.C.rename().groupby(pdf.A).fillna(0).sort_index(),
)
self.assert_eq(
kdf.C.groupby(kdf.A.rename()).fillna(0).sort_index(),
pdf.C.groupby(pdf.A.rename()).fillna(0).sort_index(),
)
self.assert_eq(
kdf.C.rename().groupby(kdf.A.rename()).fillna(0).sort_index(),
pdf.C.rename().groupby(pdf.A.rename()).fillna(0).sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Z", "D")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("X", "A")).fillna(0).sort_index(),
pdf.groupby(("X", "A")).fillna(0).sort_index(),
)
self.assert_eq(
kdf.groupby(("X", "A")).fillna(method="bfill").sort_index(),
pdf.groupby(("X", "A")).fillna(method="bfill").sort_index(),
)
self.assert_eq(
kdf.groupby(("X", "A")).fillna(method="ffill").sort_index(),
pdf.groupby(("X", "A")).fillna(method="ffill").sort_index(),
)
def test_ffill(self):
idx = np.random.rand(4 * 3)
pdf = pd.DataFrame(
{
"A": [1, 1, 2, 2] * 3,
"B": [2, 4, None, 3] * 3,
"C": [None, None, None, 1] * 3,
"D": [0, 1, 5, 4] * 3,
},
index=idx,
)
kdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) <= LooseVersion("0.24.2"):
self.assert_eq(
kdf.groupby("A").ffill().sort_index(),
pdf.groupby("A").ffill().sort_index().drop("A", 1),
)
self.assert_eq(
kdf.groupby("A")[["B"]].ffill().sort_index(),
pdf.groupby("A")[["B"]].ffill().sort_index().drop("A", 1),
)
else:
self.assert_eq(
kdf.groupby("A").ffill().sort_index(), pdf.groupby("A").ffill().sort_index()
)
self.assert_eq(
kdf.groupby("A")[["B"]].ffill().sort_index(),
pdf.groupby("A")[["B"]].ffill().sort_index(),
)
self.assert_eq(
kdf.groupby("A")["B"].ffill().sort_index(), pdf.groupby("A")["B"].ffill().sort_index()
)
self.assert_eq(kdf.groupby("A")["B"].ffill()[idx[6]], pdf.groupby("A")["B"].ffill()[idx[6]])
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Z", "D")])
pdf.columns = columns
kdf.columns = columns
if LooseVersion(pd.__version__) <= LooseVersion("0.24.2"):
self.assert_eq(
kdf.groupby(("X", "A")).ffill().sort_index(),
pdf.groupby(("X", "A")).ffill().sort_index().drop(("X", "A"), 1),
)
else:
self.assert_eq(
kdf.groupby(("X", "A")).ffill().sort_index(),
pdf.groupby(("X", "A")).ffill().sort_index(),
)
def test_bfill(self):
idx = np.random.rand(4 * 3)
pdf = pd.DataFrame(
{
"A": [1, 1, 2, 2] * 3,
"B": [2, 4, None, 3] * 3,
"C": [None, None, None, 1] * 3,
"D": [0, 1, 5, 4] * 3,
},
index=idx,
)
kdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) <= LooseVersion("0.24.2"):
self.assert_eq(
kdf.groupby("A").bfill().sort_index(),
pdf.groupby("A").bfill().sort_index().drop("A", 1),
)
self.assert_eq(
kdf.groupby("A")[["B"]].bfill().sort_index(),
pdf.groupby("A")[["B"]].bfill().sort_index().drop("A", 1),
)
else:
self.assert_eq(
kdf.groupby("A").bfill().sort_index(), pdf.groupby("A").bfill().sort_index()
)
self.assert_eq(
kdf.groupby("A")[["B"]].bfill().sort_index(),
pdf.groupby("A")[["B"]].bfill().sort_index(),
)
self.assert_eq(
kdf.groupby("A")["B"].bfill().sort_index(), pdf.groupby("A")["B"].bfill().sort_index(),
)
self.assert_eq(kdf.groupby("A")["B"].bfill()[idx[6]], pdf.groupby("A")["B"].bfill()[idx[6]])
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Z", "D")])
pdf.columns = columns
kdf.columns = columns
if LooseVersion(pd.__version__) <= LooseVersion("0.24.2"):
self.assert_eq(
kdf.groupby(("X", "A")).bfill().sort_index(),
pdf.groupby(("X", "A")).bfill().sort_index().drop(("X", "A"), 1),
)
else:
self.assert_eq(
kdf.groupby(("X", "A")).bfill().sort_index(),
pdf.groupby(("X", "A")).bfill().sort_index(),
)
@unittest.skipIf(pd.__version__ < "0.24.0", "not supported before pandas 0.24.0")
def test_shift(self):
pdf = pd.DataFrame(
{
"a": [1, 1, 2, 2, 3, 3] * 3,
"b": [1, 1, 2, 2, 3, 4] * 3,
"c": [1, 4, 9, 16, 25, 36] * 3,
},
index=np.random.rand(6 * 3),
)
kdf = ps.from_pandas(pdf)
self.assert_eq(kdf.groupby("a").shift().sort_index(), pdf.groupby("a").shift().sort_index())
# TODO: seems like a pandas' bug when fill_value is not None?
# self.assert_eq(kdf.groupby(['a', 'b']).shift(periods=-1, fill_value=0).sort_index(),
# pdf.groupby(['a', 'b']).shift(periods=-1, fill_value=0).sort_index())
self.assert_eq(
kdf.groupby(["b"])["a"].shift().sort_index(),
pdf.groupby(["b"])["a"].shift().sort_index(),
)
self.assert_eq(
kdf.groupby(["a", "b"])["c"].shift().sort_index(),
pdf.groupby(["a", "b"])["c"].shift().sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5).shift().sort_index(),
pdf.groupby(pdf.b // 5).shift().sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5)["a"].shift().sort_index(),
pdf.groupby(pdf.b // 5)["a"].shift().sort_index(),
)
# TODO: known pandas' bug when fill_value is not None pandas>=1.0.0
# https://github.com/pandas-dev/pandas/issues/31971#issue-565171762
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
self.assert_eq(
kdf.groupby(["b"])[["a", "c"]].shift(periods=-1, fill_value=0).sort_index(),
pdf.groupby(["b"])[["a", "c"]].shift(periods=-1, fill_value=0).sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b).shift().sort_index(),
pdf.a.rename().groupby(pdf.b).shift().sort_index(),
)
self.assert_eq(
kdf.a.groupby(kdf.b.rename()).shift().sort_index(),
pdf.a.groupby(pdf.b.rename()).shift().sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b.rename()).shift().sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).shift().sort_index(),
)
self.assert_eq(kdf.groupby("a").shift().sum(), pdf.groupby("a").shift().sum().astype(int))
self.assert_eq(
kdf.a.rename().groupby(kdf.b).shift().sum(),
pdf.a.rename().groupby(pdf.b).shift().sum(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("x", "a")).shift().sort_index(),
pdf.groupby(("x", "a")).shift().sort_index(),
)
# TODO: seems like a pandas' bug when fill_value is not None?
# self.assert_eq(kdf.groupby([('x', 'a'), ('x', 'b')]).shift(periods=-1,
# fill_value=0).sort_index(),
# pdf.groupby([('x', 'a'), ('x', 'b')]).shift(periods=-1,
# fill_value=0).sort_index())
def test_apply(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]},
columns=["a", "b", "c"],
)
kdf = ps.from_pandas(pdf)
self.assert_eq(
kdf.groupby("b").apply(lambda x: x + x.min()).sort_index(),
pdf.groupby("b").apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby("b").apply(len).sort_index(), pdf.groupby("b").apply(len).sort_index(),
)
self.assert_eq(
kdf.groupby("b")["a"].apply(lambda x, y, z: x + x.min() + y * z, 10, z=20).sort_index(),
pdf.groupby("b")["a"].apply(lambda x, y, z: x + x.min() + y * z, 10, z=20).sort_index(),
)
self.assert_eq(
kdf.groupby("b")[["a"]].apply(lambda x: x + x.min()).sort_index(),
pdf.groupby("b")[["a"]].apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby(["a", "b"]).apply(lambda x, y, z: x + x.min() + y + z, 1, z=2).sort_index(),
pdf.groupby(["a", "b"]).apply(lambda x, y, z: x + x.min() + y + z, 1, z=2).sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])["c"].apply(lambda x: 1).sort_index(),
pdf.groupby(["b"])["c"].apply(lambda x: 1).sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])["c"].apply(len).sort_index(),
pdf.groupby(["b"])["c"].apply(len).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5).apply(lambda x: x + x.min()).sort_index(),
pdf.groupby(pdf.b // 5).apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5)["a"].apply(lambda x: x + x.min()).sort_index(),
pdf.groupby(pdf.b // 5)["a"].apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5)[["a"]].apply(lambda x: x + x.min()).sort_index(),
pdf.groupby(pdf.b // 5)[["a"]].apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5)[["a"]].apply(len).sort_index(),
pdf.groupby(pdf.b // 5)[["a"]].apply(len).sort_index(),
almost=True,
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b).apply(lambda x: x + x.min()).sort_index(),
pdf.a.rename().groupby(pdf.b).apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.a.groupby(kdf.b.rename()).apply(lambda x: x + x.min()).sort_index(),
pdf.a.groupby(pdf.b.rename()).apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b.rename()).apply(lambda x: x + x.min()).sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).apply(lambda x: x + x.min()).sort_index(),
)
with self.assertRaisesRegex(TypeError, "int object is not callable"):
kdf.groupby("b").apply(1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("x", "b")).apply(lambda x: 1).sort_index(),
pdf.groupby(("x", "b")).apply(lambda x: 1).sort_index(),
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")]).apply(lambda x: x + x.min()).sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).apply(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby(("x", "b")).apply(len).sort_index(),
pdf.groupby(("x", "b")).apply(len).sort_index(),
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")]).apply(len).sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).apply(len).sort_index(),
)
def test_apply_without_shortcut(self):
with option_context("compute.shortcut_limit", 0):
self.test_apply()
def test_apply_negative(self):
def func(_) -> ps.Series[int]:
return pd.Series([1])
with self.assertRaisesRegex(TypeError, "Series as a return type hint at frame groupby"):
ps.range(10).groupby("id").apply(func)
def test_apply_with_new_dataframe(self):
pdf = pd.DataFrame(
{"timestamp": [0.0, 0.5, 1.0, 0.0, 0.5], "car_id": ["A", "A", "A", "B", "B"]}
)
kdf = ps.from_pandas(pdf)
self.assert_eq(
kdf.groupby("car_id").apply(lambda _: pd.DataFrame({"column": [0.0]})).sort_index(),
pdf.groupby("car_id").apply(lambda _: pd.DataFrame({"column": [0.0]})).sort_index(),
)
self.assert_eq(
kdf.groupby("car_id")
.apply(lambda df: pd.DataFrame({"mean": [df["timestamp"].mean()]}))
.sort_index(),
pdf.groupby("car_id")
.apply(lambda df: pd.DataFrame({"mean": [df["timestamp"].mean()]}))
.sort_index(),
)
# dataframe with 1000+ records
pdf = pd.DataFrame(
{
"timestamp": [0.0, 0.5, 1.0, 0.0, 0.5] * 300,
"car_id": ["A", "A", "A", "B", "B"] * 300,
}
)
kdf = ps.from_pandas(pdf)
self.assert_eq(
kdf.groupby("car_id").apply(lambda _: pd.DataFrame({"column": [0.0]})).sort_index(),
pdf.groupby("car_id").apply(lambda _: pd.DataFrame({"column": [0.0]})).sort_index(),
)
self.assert_eq(
kdf.groupby("car_id")
.apply(lambda df: pd.DataFrame({"mean": [df["timestamp"].mean()]}))
.sort_index(),
pdf.groupby("car_id")
.apply(lambda df: pd.DataFrame({"mean": [df["timestamp"].mean()]}))
.sort_index(),
)
def test_apply_with_new_dataframe_without_shortcut(self):
with option_context("compute.shortcut_limit", 0):
self.test_apply_with_new_dataframe()
def test_apply_key_handling(self):
pdf = pd.DataFrame(
{"d": [1.0, 1.0, 1.0, 2.0, 2.0, 2.0], "v": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]}
)
kdf = ps.from_pandas(pdf)
self.assert_eq(
kdf.groupby("d").apply(sum).sort_index(), pdf.groupby("d").apply(sum).sort_index()
)
with ps.option_context("compute.shortcut_limit", 1):
self.assert_eq(
kdf.groupby("d").apply(sum).sort_index(), pdf.groupby("d").apply(sum).sort_index()
)
def test_apply_with_side_effect(self):
pdf = pd.DataFrame(
{"d": [1.0, 1.0, 1.0, 2.0, 2.0, 2.0], "v": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]}
)
kdf = ps.from_pandas(pdf)
acc = ps.utils.default_session().sparkContext.accumulator(0)
def sum_with_acc_frame(x) -> ps.DataFrame[np.float64, np.float64]:
nonlocal acc
acc += 1
return np.sum(x)
actual = kdf.groupby("d").apply(sum_with_acc_frame).sort_index()
actual.columns = ["d", "v"]
self.assert_eq(actual, pdf.groupby("d").apply(sum).sort_index().reset_index(drop=True))
self.assert_eq(acc.value, 2)
def sum_with_acc_series(x) -> np.float64:
nonlocal acc
acc += 1
return np.sum(x)
self.assert_eq(
kdf.groupby("d")["v"].apply(sum_with_acc_series).sort_index(),
pdf.groupby("d")["v"].apply(sum).sort_index().reset_index(drop=True),
)
self.assert_eq(acc.value, 4)
def test_transform(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]},
columns=["a", "b", "c"],
)
kdf = ps.from_pandas(pdf)
self.assert_eq(
kdf.groupby("b").transform(lambda x: x + x.min()).sort_index(),
pdf.groupby("b").transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby("b")["a"].transform(lambda x: x + x.min()).sort_index(),
pdf.groupby("b")["a"].transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby("b")[["a"]].transform(lambda x: x + x.min()).sort_index(),
pdf.groupby("b")[["a"]].transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby(["a", "b"]).transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(["a", "b"]).transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby(["b"])["c"].transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(["b"])["c"].transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5).transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(pdf.b // 5).transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5)["a"].transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(pdf.b // 5)["a"].transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf.b // 5)[["a"]].transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(pdf.b // 5)[["a"]].transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b).transform(lambda x: x + x.min()).sort_index(),
pdf.a.rename().groupby(pdf.b).transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.a.groupby(kdf.b.rename()).transform(lambda x: x + x.min()).sort_index(),
pdf.a.groupby(pdf.b.rename()).transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b.rename()).transform(lambda x: x + x.min()).sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).transform(lambda x: x + x.min()).sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("x", "b")).transform(lambda x: x + x.min()).sort_index(),
pdf.groupby(("x", "b")).transform(lambda x: x + x.min()).sort_index(),
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")]).transform(lambda x: x + x.min()).sort_index(),
pdf.groupby([("x", "a"), ("x", "b")]).transform(lambda x: x + x.min()).sort_index(),
)
def test_transform_without_shortcut(self):
with option_context("compute.shortcut_limit", 0):
self.test_transform()
def test_filter(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]},
columns=["a", "b", "c"],
)
kdf = ps.from_pandas(pdf)
self.assert_eq(
kdf.groupby("b").filter(lambda x: any(x.a == 2)).sort_index(),
pdf.groupby("b").filter(lambda x: any(x.a == 2)).sort_index(),
)
self.assert_eq(
kdf.groupby("b")["a"].filter(lambda x: any(x == 2)).sort_index(),
pdf.groupby("b")["a"].filter(lambda x: any(x == 2)).sort_index(),
)
self.assert_eq(
kdf.groupby("b")[["a"]].filter(lambda x: any(x.a == 2)).sort_index(),
pdf.groupby("b")[["a"]].filter(lambda x: any(x.a == 2)).sort_index(),
)
self.assert_eq(
kdf.groupby(["a", "b"]).filter(lambda x: any(x.a == 2)).sort_index(),
pdf.groupby(["a", "b"]).filter(lambda x: any(x.a == 2)).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf["b"] // 5).filter(lambda x: any(x.a == 2)).sort_index(),
pdf.groupby(pdf["b"] // 5).filter(lambda x: any(x.a == 2)).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf["b"] // 5)["a"].filter(lambda x: any(x == 2)).sort_index(),
pdf.groupby(pdf["b"] // 5)["a"].filter(lambda x: any(x == 2)).sort_index(),
)
self.assert_eq(
kdf.groupby(kdf["b"] // 5)[["a"]].filter(lambda x: any(x.a == 2)).sort_index(),
pdf.groupby(pdf["b"] // 5)[["a"]].filter(lambda x: any(x.a == 2)).sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b).filter(lambda x: any(x == 2)).sort_index(),
pdf.a.rename().groupby(pdf.b).filter(lambda x: any(x == 2)).sort_index(),
)
self.assert_eq(
kdf.a.groupby(kdf.b.rename()).filter(lambda x: any(x == 2)).sort_index(),
pdf.a.groupby(pdf.b.rename()).filter(lambda x: any(x == 2)).sort_index(),
)
self.assert_eq(
kdf.a.rename().groupby(kdf.b.rename()).filter(lambda x: any(x == 2)).sort_index(),
pdf.a.rename().groupby(pdf.b.rename()).filter(lambda x: any(x == 2)).sort_index(),
)
with self.assertRaisesRegex(TypeError, "int object is not callable"):
kdf.groupby("b").filter(1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
kdf.groupby(("x", "b")).filter(lambda x: any(x[("x", "a")] == 2)).sort_index(),
pdf.groupby(("x", "b")).filter(lambda x: any(x[("x", "a")] == 2)).sort_index(),
)
self.assert_eq(
kdf.groupby([("x", "a"), ("x", "b")])
.filter(lambda x: any(x[("x", "a")] == 2))
.sort_index(),
pdf.groupby([("x", "a"), ("x", "b")])
.filter(lambda x: any(x[("x", "a")] == 2))
.sort_index(),
)
def test_idxmax(self):
pdf = pd.DataFrame(
{"a": [1, 1, 2, 2, 3] * 3, "b": [1, 2, 3, 4, 5] * 3, "c": [5, 4, 3, 2, 1] * 3}
)
kdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.groupby(["a"]).idxmax().sort_index(), kdf.groupby(["a"]).idxmax().sort_index()
)
self.assert_eq(
pdf.groupby(["a"]).idxmax(skipna=False).sort_index(),
kdf.groupby(["a"]).idxmax(skipna=False).sort_index(),
)
self.assert_eq(
pdf.groupby(["a"])["b"].idxmax().sort_index(),
kdf.groupby(["a"])["b"].idxmax().sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a).idxmax().sort_index(),
kdf.b.rename().groupby(kdf.a).idxmax().sort_index(),
)
self.assert_eq(
pdf.b.groupby(pdf.a.rename()).idxmax().sort_index(),
kdf.b.groupby(kdf.a.rename()).idxmax().sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a.rename()).idxmax().sort_index(),
kdf.b.rename().groupby(kdf.a.rename()).idxmax().sort_index(),
)
with self.assertRaisesRegex(ValueError, "idxmax only support one-level index now"):
kdf.set_index(["a", "b"]).groupby(["c"]).idxmax()
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
pdf.groupby(("x", "a")).idxmax().sort_index(),
kdf.groupby(("x", "a")).idxmax().sort_index(),
)
self.assert_eq(
pdf.groupby(("x", "a")).idxmax(skipna=False).sort_index(),
kdf.groupby(("x", "a")).idxmax(skipna=False).sort_index(),
)
def test_idxmin(self):
pdf = pd.DataFrame(
{"a": [1, 1, 2, 2, 3] * 3, "b": [1, 2, 3, 4, 5] * 3, "c": [5, 4, 3, 2, 1] * 3}
)
kdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.groupby(["a"]).idxmin().sort_index(), kdf.groupby(["a"]).idxmin().sort_index()
)
self.assert_eq(
pdf.groupby(["a"]).idxmin(skipna=False).sort_index(),
kdf.groupby(["a"]).idxmin(skipna=False).sort_index(),
)
self.assert_eq(
pdf.groupby(["a"])["b"].idxmin().sort_index(),
kdf.groupby(["a"])["b"].idxmin().sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a).idxmin().sort_index(),
kdf.b.rename().groupby(kdf.a).idxmin().sort_index(),
)
self.assert_eq(
pdf.b.groupby(pdf.a.rename()).idxmin().sort_index(),
kdf.b.groupby(kdf.a.rename()).idxmin().sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a.rename()).idxmin().sort_index(),
kdf.b.rename().groupby(kdf.a.rename()).idxmin().sort_index(),
)
with self.assertRaisesRegex(ValueError, "idxmin only support one-level index now"):
kdf.set_index(["a", "b"]).groupby(["c"]).idxmin()
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
pdf.groupby(("x", "a")).idxmin().sort_index(),
kdf.groupby(("x", "a")).idxmin().sort_index(),
)
self.assert_eq(
pdf.groupby(("x", "a")).idxmin(skipna=False).sort_index(),
kdf.groupby(("x", "a")).idxmin(skipna=False).sort_index(),
)
def test_head(self):
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3] * 3,
"b": [2, 3, 1, 4, 6, 9, 8, 10, 7, 5] * 3,
"c": [3, 5, 2, 5, 1, 2, 6, 4, 3, 6] * 3,
},
index=np.random.rand(10 * 3),
)
kdf = ps.from_pandas(pdf)
self.assert_eq(pdf.groupby("a").head(2).sort_index(), kdf.groupby("a").head(2).sort_index())
self.assert_eq(
pdf.groupby("a").head(-2).sort_index(), kdf.groupby("a").head(-2).sort_index()
)
self.assert_eq(
pdf.groupby("a").head(100000).sort_index(), kdf.groupby("a").head(100000).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].head(2).sort_index(), kdf.groupby("a")["b"].head(2).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].head(-2).sort_index(), kdf.groupby("a")["b"].head(-2).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].head(100000).sort_index(),
kdf.groupby("a")["b"].head(100000).sort_index(),
)
self.assert_eq(
pdf.groupby("a")[["b"]].head(2).sort_index(),
kdf.groupby("a")[["b"]].head(2).sort_index(),
)
self.assert_eq(
pdf.groupby("a")[["b"]].head(-2).sort_index(),
kdf.groupby("a")[["b"]].head(-2).sort_index(),
)
self.assert_eq(
pdf.groupby("a")[["b"]].head(100000).sort_index(),
kdf.groupby("a")[["b"]].head(100000).sort_index(),
)
self.assert_eq(
pdf.groupby(pdf.a // 2).head(2).sort_index(),
kdf.groupby(kdf.a // 2).head(2).sort_index(),
)
self.assert_eq(
pdf.groupby(pdf.a // 2)["b"].head(2).sort_index(),
kdf.groupby(kdf.a // 2)["b"].head(2).sort_index(),
)
self.assert_eq(
pdf.groupby(pdf.a // 2)[["b"]].head(2).sort_index(),
kdf.groupby(kdf.a // 2)[["b"]].head(2).sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a).head(2).sort_index(),
kdf.b.rename().groupby(kdf.a).head(2).sort_index(),
)
self.assert_eq(
pdf.b.groupby(pdf.a.rename()).head(2).sort_index(),
kdf.b.groupby(kdf.a.rename()).head(2).sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a.rename()).head(2).sort_index(),
kdf.b.rename().groupby(kdf.a.rename()).head(2).sort_index(),
)
# multi-index
midx = pd.MultiIndex(
[["x", "y"], ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"]],
[[0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]],
)
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3],
"b": [2, 3, 1, 4, 6, 9, 8, 10, 7, 5],
"c": [3, 5, 2, 5, 1, 2, 6, 4, 3, 6],
},
columns=["a", "b", "c"],
index=midx,
)
kdf = ps.from_pandas(pdf)
self.assert_eq(pdf.groupby("a").head(2).sort_index(), kdf.groupby("a").head(2).sort_index())
self.assert_eq(
pdf.groupby("a").head(-2).sort_index(), kdf.groupby("a").head(-2).sort_index()
)
self.assert_eq(
pdf.groupby("a").head(100000).sort_index(), kdf.groupby("a").head(100000).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].head(2).sort_index(), kdf.groupby("a")["b"].head(2).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].head(-2).sort_index(), kdf.groupby("a")["b"].head(-2).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].head(100000).sort_index(),
kdf.groupby("a")["b"].head(100000).sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
pdf.groupby(("x", "a")).head(2).sort_index(),
kdf.groupby(("x", "a")).head(2).sort_index(),
)
self.assert_eq(
pdf.groupby(("x", "a")).head(-2).sort_index(),
kdf.groupby(("x", "a")).head(-2).sort_index(),
)
self.assert_eq(
pdf.groupby(("x", "a")).head(100000).sort_index(),
kdf.groupby(("x", "a")).head(100000).sort_index(),
)
def test_missing(self):
kdf = ps.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9]})
# DataFrameGroupBy functions
missing_functions = inspect.getmembers(
MissingPandasLikeDataFrameGroupBy, inspect.isfunction
)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*GroupBy.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.groupby("a"), name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*GroupBy.*{}.*is deprecated".format(name)
):
getattr(kdf.groupby("a"), name)()
# SeriesGroupBy functions
missing_functions = inspect.getmembers(MissingPandasLikeSeriesGroupBy, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*GroupBy.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.a.groupby(kdf.a), name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*GroupBy.*{}.*is deprecated".format(name)
):
getattr(kdf.a.groupby(kdf.a), name)()
# DataFrameGroupBy properties
missing_properties = inspect.getmembers(
MissingPandasLikeDataFrameGroupBy, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*GroupBy.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.groupby("a"), name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*GroupBy.*{}.*is deprecated".format(name)
):
getattr(kdf.groupby("a"), name)
# SeriesGroupBy properties
missing_properties = inspect.getmembers(
MissingPandasLikeSeriesGroupBy, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*GroupBy.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.a.groupby(kdf.a), name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*GroupBy.*{}.*is deprecated".format(name)
):
getattr(kdf.a.groupby(kdf.a), name)
@staticmethod
def test_is_multi_agg_with_relabel():
assert is_multi_agg_with_relabel(a="max") is False
assert is_multi_agg_with_relabel(a_min=("a", "max"), a_max=("a", "min")) is True
def test_get_group(self):
pdf = pd.DataFrame(
[
("falcon", "bird", 389.0),
("parrot", "bird", 24.0),
("lion", "mammal", 80.5),
("monkey", "mammal", np.nan),
],
columns=["name", "class", "max_speed"],
index=[0, 2, 3, 1],
)
pdf.columns.name = "Koalas"
kdf = ps.from_pandas(pdf)
self.assert_eq(
kdf.groupby("class").get_group("bird"), pdf.groupby("class").get_group("bird"),
)
self.assert_eq(
kdf.groupby("class")["name"].get_group("mammal"),
pdf.groupby("class")["name"].get_group("mammal"),
)
self.assert_eq(
kdf.groupby("class")[["name"]].get_group("mammal"),
pdf.groupby("class")[["name"]].get_group("mammal"),
)
self.assert_eq(
kdf.groupby(["class", "name"]).get_group(("mammal", "lion")),
pdf.groupby(["class", "name"]).get_group(("mammal", "lion")),
)
self.assert_eq(
kdf.groupby(["class", "name"])["max_speed"].get_group(("mammal", "lion")),
pdf.groupby(["class", "name"])["max_speed"].get_group(("mammal", "lion")),
)
self.assert_eq(
kdf.groupby(["class", "name"])[["max_speed"]].get_group(("mammal", "lion")),
pdf.groupby(["class", "name"])[["max_speed"]].get_group(("mammal", "lion")),
)
self.assert_eq(
(kdf.max_speed + 1).groupby(kdf["class"]).get_group("mammal"),
(pdf.max_speed + 1).groupby(pdf["class"]).get_group("mammal"),
)
self.assert_eq(
kdf.groupby("max_speed").get_group(80.5), pdf.groupby("max_speed").get_group(80.5),
)
self.assertRaises(KeyError, lambda: kdf.groupby("class").get_group("fish"))
self.assertRaises(TypeError, lambda: kdf.groupby("class").get_group(["bird", "mammal"]))
self.assertRaises(KeyError, lambda: kdf.groupby("class")["name"].get_group("fish"))
self.assertRaises(
TypeError, lambda: kdf.groupby("class")["name"].get_group(["bird", "mammal"])
)
self.assertRaises(
KeyError, lambda: kdf.groupby(["class", "name"]).get_group(("lion", "mammal"))
)
self.assertRaises(ValueError, lambda: kdf.groupby(["class", "name"]).get_group(("lion",)))
self.assertRaises(ValueError, lambda: kdf.groupby(["class", "name"]).get_group(("mammal",)))
self.assertRaises(ValueError, lambda: kdf.groupby(["class", "name"]).get_group("mammal"))
# MultiIndex columns
pdf.columns = pd.MultiIndex.from_tuples([("A", "name"), ("B", "class"), ("C", "max_speed")])
pdf.columns.names = ["Hello", "Koalas"]
kdf = ps.from_pandas(pdf)
self.assert_eq(
kdf.groupby(("B", "class")).get_group("bird"),
pdf.groupby(("B", "class")).get_group("bird"),
)
self.assert_eq(
kdf.groupby(("B", "class"))[[("A", "name")]].get_group("mammal"),
pdf.groupby(("B", "class"))[[("A", "name")]].get_group("mammal"),
)
self.assert_eq(
kdf.groupby([("B", "class"), ("A", "name")]).get_group(("mammal", "lion")),
pdf.groupby([("B", "class"), ("A", "name")]).get_group(("mammal", "lion")),
)
self.assert_eq(
kdf.groupby([("B", "class"), ("A", "name")])[[("C", "max_speed")]].get_group(
("mammal", "lion")
),
pdf.groupby([("B", "class"), ("A", "name")])[[("C", "max_speed")]].get_group(
("mammal", "lion")
),
)
self.assert_eq(
(kdf[("C", "max_speed")] + 1).groupby(kdf[("B", "class")]).get_group("mammal"),
(pdf[("C", "max_speed")] + 1).groupby(pdf[("B", "class")]).get_group("mammal"),
)
self.assert_eq(
kdf.groupby(("C", "max_speed")).get_group(80.5),
pdf.groupby(("C", "max_speed")).get_group(80.5),
)
self.assertRaises(KeyError, lambda: kdf.groupby(("B", "class")).get_group("fish"))
self.assertRaises(
TypeError, lambda: kdf.groupby(("B", "class")).get_group(["bird", "mammal"])
)
self.assertRaises(
KeyError, lambda: kdf.groupby(("B", "class"))[("A", "name")].get_group("fish")
)
self.assertRaises(
KeyError,
lambda: kdf.groupby([("B", "class"), ("A", "name")]).get_group(("lion", "mammal")),
)
self.assertRaises(
ValueError, lambda: kdf.groupby([("B", "class"), ("A", "name")]).get_group(("lion",)),
)
self.assertRaises(
ValueError, lambda: kdf.groupby([("B", "class"), ("A", "name")]).get_group(("mammal",))
)
self.assertRaises(
ValueError, lambda: kdf.groupby([("B", "class"), ("A", "name")]).get_group("mammal")
)
def test_median(self):
kdf = ps.DataFrame(
{
"a": [1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0],
"b": [2.0, 3.0, 1.0, 4.0, 6.0, 9.0, 8.0, 10.0, 7.0, 5.0],
"c": [3.0, 5.0, 2.0, 5.0, 1.0, 2.0, 6.0, 4.0, 3.0, 6.0],
},
columns=["a", "b", "c"],
index=[7, 2, 4, 1, 3, 4, 9, 10, 5, 6],
)
# DataFrame
expected_result = ps.DataFrame(
{"b": [2.0, 8.0, 7.0], "c": [3.0, 2.0, 4.0]}, index=pd.Index([1.0, 2.0, 3.0], name="a")
)
self.assert_eq(expected_result, kdf.groupby("a").median().sort_index())
# Series
expected_result = ps.Series(
[2.0, 8.0, 7.0], name="b", index=pd.Index([1.0, 2.0, 3.0], name="a")
)
self.assert_eq(expected_result, kdf.groupby("a")["b"].median().sort_index())
with self.assertRaisesRegex(ValueError, "accuracy must be an integer; however"):
kdf.groupby("a").median(accuracy="a")
def test_tail(self):
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3] * 3,
"b": [2, 3, 1, 4, 6, 9, 8, 10, 7, 5] * 3,
"c": [3, 5, 2, 5, 1, 2, 6, 4, 3, 6] * 3,
},
index=np.random.rand(10 * 3),
)
kdf = ps.from_pandas(pdf)
self.assert_eq(pdf.groupby("a").tail(2).sort_index(), kdf.groupby("a").tail(2).sort_index())
self.assert_eq(
pdf.groupby("a").tail(-2).sort_index(), kdf.groupby("a").tail(-2).sort_index()
)
self.assert_eq(
pdf.groupby("a").tail(100000).sort_index(), kdf.groupby("a").tail(100000).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].tail(2).sort_index(), kdf.groupby("a")["b"].tail(2).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].tail(-2).sort_index(), kdf.groupby("a")["b"].tail(-2).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].tail(100000).sort_index(),
kdf.groupby("a")["b"].tail(100000).sort_index(),
)
self.assert_eq(
pdf.groupby("a")[["b"]].tail(2).sort_index(),
kdf.groupby("a")[["b"]].tail(2).sort_index(),
)
self.assert_eq(
pdf.groupby("a")[["b"]].tail(-2).sort_index(),
kdf.groupby("a")[["b"]].tail(-2).sort_index(),
)
self.assert_eq(
pdf.groupby("a")[["b"]].tail(100000).sort_index(),
kdf.groupby("a")[["b"]].tail(100000).sort_index(),
)
self.assert_eq(
pdf.groupby(pdf.a // 2).tail(2).sort_index(),
kdf.groupby(kdf.a // 2).tail(2).sort_index(),
)
self.assert_eq(
pdf.groupby(pdf.a // 2)["b"].tail(2).sort_index(),
kdf.groupby(kdf.a // 2)["b"].tail(2).sort_index(),
)
self.assert_eq(
pdf.groupby(pdf.a // 2)[["b"]].tail(2).sort_index(),
kdf.groupby(kdf.a // 2)[["b"]].tail(2).sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a).tail(2).sort_index(),
kdf.b.rename().groupby(kdf.a).tail(2).sort_index(),
)
self.assert_eq(
pdf.b.groupby(pdf.a.rename()).tail(2).sort_index(),
kdf.b.groupby(kdf.a.rename()).tail(2).sort_index(),
)
self.assert_eq(
pdf.b.rename().groupby(pdf.a.rename()).tail(2).sort_index(),
kdf.b.rename().groupby(kdf.a.rename()).tail(2).sort_index(),
)
# multi-index
midx = pd.MultiIndex(
[["x", "y"], ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"]],
[[0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]],
)
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3],
"b": [2, 3, 1, 4, 6, 9, 8, 10, 7, 5],
"c": [3, 5, 2, 5, 1, 2, 6, 4, 3, 6],
},
columns=["a", "b", "c"],
index=midx,
)
kdf = ps.from_pandas(pdf)
self.assert_eq(pdf.groupby("a").tail(2).sort_index(), kdf.groupby("a").tail(2).sort_index())
self.assert_eq(
pdf.groupby("a").tail(-2).sort_index(), kdf.groupby("a").tail(-2).sort_index()
)
self.assert_eq(
pdf.groupby("a").tail(100000).sort_index(), kdf.groupby("a").tail(100000).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].tail(2).sort_index(), kdf.groupby("a")["b"].tail(2).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].tail(-2).sort_index(), kdf.groupby("a")["b"].tail(-2).sort_index()
)
self.assert_eq(
pdf.groupby("a")["b"].tail(100000).sort_index(),
kdf.groupby("a")["b"].tail(100000).sort_index(),
)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
kdf.columns = columns
self.assert_eq(
pdf.groupby(("x", "a")).tail(2).sort_index(),
kdf.groupby(("x", "a")).tail(2).sort_index(),
)
self.assert_eq(
pdf.groupby(("x", "a")).tail(-2).sort_index(),
kdf.groupby(("x", "a")).tail(-2).sort_index(),
)
self.assert_eq(
pdf.groupby(("x", "a")).tail(100000).sort_index(),
kdf.groupby(("x", "a")).tail(100000).sort_index(),
)
def test_ddof(self):
pdf = pd.DataFrame(
{
"a": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3] * 3,
"b": [2, 3, 1, 4, 6, 9, 8, 10, 7, 5] * 3,
"c": [3, 5, 2, 5, 1, 2, 6, 4, 3, 6] * 3,
},
index=np.random.rand(10 * 3),
)
kdf = ps.from_pandas(pdf)
for ddof in (0, 1):
# std
self.assert_eq(
pdf.groupby("a").std(ddof=ddof).sort_index(),
kdf.groupby("a").std(ddof=ddof).sort_index(),
check_exact=False,
)
self.assert_eq(
pdf.groupby("a")["b"].std(ddof=ddof).sort_index(),
kdf.groupby("a")["b"].std(ddof=ddof).sort_index(),
check_exact=False,
)
# var
self.assert_eq(
pdf.groupby("a").var(ddof=ddof).sort_index(),
kdf.groupby("a").var(ddof=ddof).sort_index(),
check_exact=False,
)
self.assert_eq(
pdf.groupby("a")["b"].var(ddof=ddof).sort_index(),
kdf.groupby("a")["b"].var(ddof=ddof).sort_index(),
check_exact=False,
)
if __name__ == "__main__":
from pyspark.pandas.tests.test_groupby import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
cython-testbed/pandas | pandas/tests/extension/test_common.py | 1 | 2358 | import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
from pandas.core.arrays import ExtensionArray
from pandas.core.dtypes.common import is_extension_array_dtype
from pandas.core.dtypes import dtypes
class DummyDtype(dtypes.ExtensionDtype):
pass
class DummyArray(ExtensionArray):
def __init__(self, data):
self.data = data
def __array__(self, dtype):
return self.data
@property
def dtype(self):
return DummyDtype()
def astype(self, dtype, copy=True):
# we don't support anything but a single dtype
if isinstance(dtype, DummyDtype):
if copy:
return type(self)(self.data)
return self
return np.array(self, dtype=dtype, copy=copy)
class TestExtensionArrayDtype(object):
@pytest.mark.parametrize('values', [
pd.Categorical([]),
pd.Categorical([]).dtype,
pd.Series(pd.Categorical([])),
DummyDtype(),
DummyArray(np.array([1, 2])),
])
def test_is_extension_array_dtype(self, values):
assert is_extension_array_dtype(values)
@pytest.mark.parametrize('values', [
np.array([]),
pd.Series(np.array([])),
])
def test_is_not_extension_array_dtype(self, values):
assert not is_extension_array_dtype(values)
def test_astype():
arr = DummyArray(np.array([1, 2, 3]))
expected = np.array([1, 2, 3], dtype=object)
result = arr.astype(object)
tm.assert_numpy_array_equal(result, expected)
result = arr.astype('object')
tm.assert_numpy_array_equal(result, expected)
def test_astype_no_copy():
arr = DummyArray(np.array([1, 2, 3], dtype=np.int64))
result = arr.astype(arr.dtype, copy=False)
assert arr is result
result = arr.astype(arr.dtype)
assert arr is not result
@pytest.mark.parametrize('dtype', [
dtypes.DatetimeTZDtype('ns', 'US/Central'),
dtypes.PeriodDtype("D"),
])
def test_is_not_extension_array_dtype(dtype):
assert not isinstance(dtype, dtypes.ExtensionDtype)
assert not is_extension_array_dtype(dtype)
@pytest.mark.parametrize('dtype', [
dtypes.CategoricalDtype(),
dtypes.IntervalDtype(),
])
def test_is_extension_array_dtype(dtype):
assert isinstance(dtype, dtypes.ExtensionDtype)
assert is_extension_array_dtype(dtype)
| bsd-3-clause |
daodaoliang/neural-network-animation | matplotlib/tests/test_animation.py | 9 | 2643 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
import tempfile
import numpy as np
from nose import with_setup
from matplotlib import pyplot as plt
from matplotlib import animation
from matplotlib.testing.noseclasses import KnownFailureTest
from matplotlib.testing.decorators import cleanup
from matplotlib.testing.decorators import CleanupTest
WRITER_OUTPUT = dict(ffmpeg='mp4', ffmpeg_file='mp4',
mencoder='mp4', mencoder_file='mp4',
avconv='mp4', avconv_file='mp4',
imagemagick='gif', imagemagick_file='gif')
# Smoke test for saving animations. In the future, we should probably
# design more sophisticated tests which compare resulting frames a-la
# matplotlib.testing.image_comparison
def test_save_animation_smoketest():
for writer, extension in six.iteritems(WRITER_OUTPUT):
yield check_save_animation, writer, extension
@cleanup
def check_save_animation(writer, extension='mp4'):
if not animation.writers.is_available(writer):
raise KnownFailureTest("writer '%s' not available on this system"
% writer)
fig, ax = plt.subplots()
line, = ax.plot([], [])
ax.set_xlim(0, 10)
ax.set_ylim(-1, 1)
def init():
line.set_data([], [])
return line,
def animate(i):
x = np.linspace(0, 10, 100)
y = np.sin(x + i)
line.set_data(x, y)
return line,
# Use NamedTemporaryFile: will be automatically deleted
F = tempfile.NamedTemporaryFile(suffix='.' + extension)
F.close()
anim = animation.FuncAnimation(fig, animate, init_func=init, frames=5)
try:
anim.save(F.name, fps=30, writer=writer, bitrate=500)
except UnicodeDecodeError:
raise KnownFailureTest("There can be errors in the numpy " +
"import stack, " +
"see issues #1891 and #2679")
finally:
try:
os.remove(F.name)
except Exception:
pass
@cleanup
def test_no_length_frames():
fig, ax = plt.subplots()
line, = ax.plot([], [])
def init():
line.set_data([], [])
return line,
def animate(i):
x = np.linspace(0, 10, 100)
y = np.sin(x + i)
line.set_data(x, y)
return line,
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=iter(range(5)))
if __name__ == "__main__":
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
iLoop2/ResInsight | ThirdParty/Ert/devel/python/test/ert_tests/enkf/export/test_numpy_and_pandas.py | 1 | 2566 | import numpy
from pandas import MultiIndex, DataFrame, pandas
from ert.test import ExtendedTestCase
class NumpyAndPandasTest(ExtendedTestCase):
def test_numpy(self):
data = numpy.empty(shape=(10, 10), dtype=numpy.float64)
data.fill(numpy.nan)
self.assertTrue(numpy.isnan(data[0][0]))
self.assertTrue(numpy.isnan(data[9][9]))
with self.assertRaises(IndexError):
v = data[10][9]
data[5][5] = 1.0
self.assertEqual(data[5][5], 1.0)
data[0] = 5.0
test_data = numpy.empty(shape=10)
test_data.fill(5.0)
self.assertTrue(numpy.array_equal(data[0], test_data))
data = numpy.transpose(data)
self.assertTrue(numpy.array_equal(data[:,0], test_data))
row = data[0]
row[5] = 11
self.assertEqual(data[0][5], 11)
def test_pandas_join(self):
multi_index = MultiIndex.from_product([[1, 2], ["A", "B", "C"]], names=["REALIZATION", "LABEL"])
data = DataFrame(data=[[1, 2, 3], [2, 4, 6], [4, 8, 12]] * 2, index=multi_index, columns=["C1", "C2", "C3"])
new_column = DataFrame(data=[4.0, 4.4, 4.8], index=[1, 2, 3], columns=["C4"])
new_column.index.name = "REALIZATION"
result = data.join(new_column, how='inner')
self.assertFloatEqual(result["C4"][1]["A"], 4.0)
self.assertFloatEqual(result["C4"][1]["B"], 4.0)
self.assertFloatEqual(result["C4"][1]["C"], 4.0)
self.assertFloatEqual(result["C4"][2]["A"], 4.4)
self.assertFloatEqual(result["C4"][2]["B"], 4.4)
self.assertFloatEqual(result["C4"][2]["C"], 4.4)
def test_pandas_concatenate(self):
d1 = DataFrame(data=[2, 4, 6, 8], columns=["A"], index=[1, 2, 3, 4])
d2 = DataFrame(data=[[1, 1.1], [3, 3.3], [5, 5.5], [7, 7.7], [9, 9.9]], columns=["A", "B"], index=[1, 2, 3, 4, 5])
result = pandas.concat([d1, d2], keys=[1, 2])
self.assertEqual(result["A"][1][2], 4)
self.assertEqual(result["A"][2][2], 3)
self.assertTrue(numpy.isnan(result["B"][1][1]))
self.assertFloatEqual(result["B"][2][4], 7.7)
def test_pandas_extend_index(self):
d1 = DataFrame(data=[2, 4, 6, 8], columns=["A"], index=[1, 2, 3, 4])
d1.index.name = "first"
d1["second"] = "default"
d1.set_index(["second"], append=True, inplace=True)
self.assertEqual(d1.index.names, ["first", "second"])
d1 = d1.reorder_levels(["second", "first"])
self.assertEqual(d1.index.names, ["second", "first"])
| gpl-3.0 |
webbpinner/OpenVDMv2 | server/lib/openvdm_plugin.py | 1 | 15200 | #!/usr/bin/env python3
"""
FILE: openvdm_plugin.py
DESCRIPTION: OpenVDM parser/plugin python module
BUGS:
NOTES:
AUTHOR: Webb Pinner
COMPANY: Capable Solutions
VERSION: 2.5
CREATED: 2016-02-02
REVISION: 2020-12-21
LICENSE INFO: Open Vessel Data Management v2.5 (OpenVDMv2)
Copyright (C) OceanDataRat.org 2021
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/gpl-3.0.html>.
"""
import json
import fnmatch
import logging
from datetime import datetime
import numpy as np
import pandas as pd
STAT_TYPES = [
'bounds',
'geoBounds',
'rowValidity',
'timeBounds',
'totalValue',
'valueValidity'
]
QUALITY_TEST_RESULT_TYPES = [
'Failed',
'Warning',
'Passed'
]
class NpEncoder(json.JSONEncoder):
"""
Custom JSON string encoder used to deal with NumPy arrays
"""
def default(self, obj): # pylint: disable=arguments-differ
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
if isinstance(obj, datetime):
return obj.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
return super().default(obj)
class OpenVDMParserQualityTest():
"""
Defines data object and methods for OpenVDM parser QA tests
"""
def __init__(self, test_name, test_value):
if test_value not in QUALITY_TEST_RESULT_TYPES:
raise ValueError("Invalid test result type: type must be one of: %s" % ', '.join(QUALITY_TEST_RESULT_TYPES))
self.test_data = {
'testName':test_name,
'results': test_value
}
def get_test_data(self):
"""
Return test data object
"""
return self.test_data
def to_json(self):
"""
Return test data object as a json-formatted string
"""
return json.dumps(self.get_test_data(), cls=NpEncoder)
class OpenVDMParserQualityTestFailed(OpenVDMParserQualityTest):
"""
Defines data object for a failed OpenVDM QA test
"""
def __init__(self, test_name):
super().__init__(test_name=test_name, test_value='Failed')
class OpenVDMParserQualityTestWarning(OpenVDMParserQualityTest):
"""
Defines data object for a partially failed (warning) OpenVDM QA test
"""
def __init__(self, test_name):
super().__init__(test_name=test_name, test_value='Warning')
class OpenVDMParserQualityTestPassed(OpenVDMParserQualityTest):
"""
Defines data object for a passing OpenVDM QA test
"""
def __init__(self, test_name):
super().__init__(test_name=test_name, test_value='Passed')
class OpenVDMParserStat():
"""
Defines data object and methods for OpenVDM plugin statistic
"""
def __init__(self, stat_name, stat_type, stat_value, stat_uom=''): # pylint: disable=too-many-branches
if stat_type not in STAT_TYPES:
raise ValueError("Invalid stat type, must be one of: %s" % ', '.join(STAT_TYPES))
if stat_type == 'bounds':
if not isinstance(stat_value, list) or len(stat_value) != 2:
raise ValueError("bounds stat requires list of length 2")
for element in stat_value:
if not isinstance(element, float) and not isinstance(element, int):
raise ValueError("bounds stat requires list of 2 numbers")
elif stat_type == 'geoBounds':
if not isinstance(stat_value, list) or len(stat_value) != 4:
raise ValueError("geoBounds stat requires list of 4 numbers")
for element in stat_value:
if not isinstance(element, float) and not isinstance(element, int):
raise ValueError("geoBounds stat requires list of 4 numbers")
elif stat_type == 'rowValidity':
if not isinstance(stat_value, list) or len(stat_value) != 2:
raise ValueError("rowValidity stat requires list 2 integers")
for element in stat_value:
if not isinstance(element, int):
raise ValueError("rowValidity stat requires list 2 integers")
elif stat_type == 'timeBounds':
if not isinstance(stat_value, list) or len(stat_value) != 2:
raise ValueError("timeBounds stat requires list 2 datetime")
for element in stat_value:
if not isinstance(element, datetime):
logging.debug(type(element))
raise ValueError("timeBounds stat requires list 2 datetime objects")
elif stat_type == 'totalValue':
if not isinstance(stat_value, list) or len(stat_value) != 1:
raise ValueError("totalValue stat requires list 1 number")
for element in stat_value:
if not isinstance(element, float) and not isinstance(element, int):
raise ValueError("totalValue stat requires list 1 number")
elif stat_type == 'valueValidity':
if not isinstance(stat_value, list) or len(stat_value) != 2:
raise ValueError("valueValidity stat requires list 2 numbers")
for element in stat_value:
if not isinstance(element, float) and not isinstance(element, int):
raise ValueError("valueValidity stat requires list 2 numbers")
self.stat_data = {
'statName': stat_name,
'statType': stat_type,
'statUnit': stat_uom,
'statValue': stat_value
}
def get_stat_data(self):
"""
Return the statistic data
"""
return self.stat_data
def to_json(self):
"""
Return the statistic data as a json-formatted string
"""
return json.dumps(self.get_stat_data(), cls=NpEncoder)
class OpenVDMParserBoundsStat(OpenVDMParserStat):
"""
Defines data object and methods for OpenVDM bounds statistic
"""
def __init__(self, stat_value, stat_name, stat_uom=''):
super().__init__(stat_name=stat_name, stat_type="bounds", stat_value=stat_value, stat_uom=stat_uom)
class OpenVDMParserGeoBoundsStat(OpenVDMParserStat):
"""
Defines data object and methods for OpenVDM geoBounds statistic
"""
def __init__(self, stat_value, stat_name='Geographic Bounds', stat_uom='ddeg'):
super().__init__(stat_name=stat_name, stat_type="geoBounds", stat_value=stat_value, stat_uom=stat_uom)
class OpenVDMParserRowValidityStat(OpenVDMParserStat):
"""
Defines data object and methods for OpenVDM rowValidity statistic
"""
def __init__(self, stat_value):
super().__init__(stat_name="Row Validity", stat_type="rowValidity", stat_value=stat_value, stat_uom='')
class OpenVDMParserTimeBoundsStat(OpenVDMParserStat):
"""
Defines data object and methods for OpenVDM timeBounds statistic
"""
def __init__(self, stat_value, stat_name='Temporal Bounds', stat_uom='seconds'):
super().__init__(stat_name=stat_name, stat_type="timeBounds", stat_value=stat_value, stat_uom=stat_uom)
class OpenVDMParserTotalValueStat(OpenVDMParserStat):
"""
Defines data object and methods for OpenVDM totalValue statistic
"""
def __init__(self, stat_value, stat_name, stat_uom=''):
super().__init__(stat_name=stat_name, stat_type="totalValue", stat_value=stat_value, stat_uom=stat_uom)
class OpenVDMParserValueValidityStat(OpenVDMParserStat):
"""
Defines data object and methods for OpenVDM valueValidity statistic
"""
def __init__(self, stat_value, stat_name):
super().__init__(stat_name=stat_name, stat_type="valueValidity", stat_value=stat_value, stat_uom='')
class OpenVDMParser():
"""
Root Class for a OpenVDM parser object
"""
def __init__(self):
self.plugin_data = {
'visualizerData': [],
'qualityTests': [],
'stats': []
}
def get_plugin_data(self):
"""
Return the plugin data
"""
if len(self.plugin_data['visualizerData']) > 0 or len(self.plugin_data['qualityTests']) > 0 or len(self.plugin_data['stats']) > 0:
return self.plugin_data
return None
def process_file(self, filepath):
"""
Process the given file
"""
raise NotImplementedError('process_file must be implemented by subclass')
def add_visualization_data(self, data):
"""
Add the visualization data to the
"""
self.plugin_data['visualizerData'].append(data)
def add_quality_test_failed(self, name):
"""
Add a failed QA test with the provided name
"""
test = OpenVDMParserQualityTestFailed(name)
self.plugin_data['qualityTests'].append(test.get_test_data())
def add_quality_test_warning(self, name):
"""
Add a partially failed QA test with the provided name
"""
test = OpenVDMParserQualityTestWarning(name)
self.plugin_data['qualityTests'].append(test.get_test_data())
def add_quality_test_passed(self, name):
"""
Add a passing QA test with the provided name
"""
test = OpenVDMParserQualityTestPassed(name)
self.plugin_data['qualityTests'].append(test.get_test_data())
def add_bounds_stat(self, value, name, uom=''):
"""
Add a bounds statistic with the given name, value and unit of measure
"""
stat = OpenVDMParserBoundsStat(value, name, uom)
self.plugin_data['stats'].append(stat.get_stat_data())
def add_geobounds_stat(self, value, name='Geographic Bounds', uom='ddeg'):
"""
Add a geoBounds statistic with the given name, value and unit of measure
"""
stat = OpenVDMParserGeoBoundsStat(value, name, uom)
self.plugin_data['stats'].append(stat.get_stat_data())
def add_row_validity_stat(self, value):
"""
Add a rowValidity statistic with the given name, value and unit of measure
"""
stat = OpenVDMParserRowValidityStat(value)
self.plugin_data['stats'].append(stat.get_stat_data())
def add_time_bounds_stat(self, value, name='Temporal Bounds', uom='seconds'):
"""
Add a timeBounds statistic with the given name, value and unit of measure
"""
stat = OpenVDMParserTimeBoundsStat(value, name, uom)
self.plugin_data['stats'].append(stat.get_stat_data())
def add_total_value_stat(self, value, name, uom=''):
"""
Add a totalValue statistic with the given name, value and unit of measure
"""
stat = OpenVDMParserTotalValueStat(value, name, uom)
self.plugin_data['stats'].append(stat.get_stat_data())
def add_value_validity_stat(self, value, name):
"""
Add a valueValidity statistic with the given name, value and unit of measure
"""
stat = OpenVDMParserValueValidityStat(value, name)
self.plugin_data['stats'].append(stat.get_stat_data())
def to_json(self):
"""
Return the plugin data and a json-formatted string
"""
return json.dumps(self.get_plugin_data())
class OpenVDMCSVParser(OpenVDMParser):
"""
OpenVDM parser for a CSV-style input file
"""
def __init__(self, start_dt=None, stop_dt=None):
self.start_dt = start_dt
self.stop_dt = stop_dt
self.tmpdir = None
super().__init__()
def process_file(self, filepath):
"""
Process the given file
"""
raise NotImplementedError('process_file must be implemented by subclass')
def crop_data(self, data_frame):
"""
Crop the data to the start/stop time specified in the parser object
"""
try:
if self.start_dt is not None:
logging.debug(" start_dt: %s", self.start_dt)
data_frame = data_frame[(data_frame['date_time'] >= self.start_dt)]
if self.stop_dt is not None:
logging.debug(" stop_dt: %s", self.stop_dt)
data_frame = data_frame[(data_frame['date_time'] <= self.stop_dt)]
except Exception as err:
logging.error("Could not crop data")
logging.error(str(err))
raise err
return data_frame
@staticmethod
def resample_data(data_frame, resample_interval='1T'):
"""
Resample the data to the specified interval
"""
try:
resample_df = data_frame.resample(resample_interval, label='right', closed='right').mean()
except Exception as err:
logging.error("Could not resample data")
logging.error(str(err))
raise err
# reset index
return resample_df.reset_index()
@staticmethod
def round_data(data_frame, precision=None):
"""
Round the data to the specified precision
"""
if precision is None or bool(precision):
try:
decimals = pd.Series(precision.values(), index=precision.keys())
return data_frame.round(decimals)
except Exception as err:
logging.error("Could not round data")
logging.error(str(err))
raise err
return data_frame
def to_json(self):
"""
Output the plugin data as a json-formatted string.
"""
return json.dumps(self.get_plugin_data(), cls=NpEncoder)
class OpenVDMPlugin():
"""
OpenVDM plugin object
"""
def __init__(self, file_type_filters):
self.file_type_filters = file_type_filters
def get_data_type(self, filepath):
"""
Return the data type for the given file
"""
file_type_filter = list(filter(lambda file_type_filter: fnmatch.fnmatch(filepath, file_type_filter['regex']), self.file_type_filters))
if len(file_type_filter) == 0:
return None
return file_type_filter[0]['data_type']
def get_parser(self, filepath):
"""
Return the OpenVDM parser object appropriate for the given file
"""
raise NotImplementedError('process_file must be implemented by subclass')
def get_json_str(self, filepath):
"""
Return the plugin output corresponding to the given file.
"""
parser = self.get_parser(filepath)
if parser is None:
return None
parser.process_file(filepath)
return parser.to_json()
| gpl-3.0 |
Leotrinos/agpy | agpy/__init__.py | 5 | 1642 | """
====
agpy
====
The functions included below are the 'mature' codes from the agpy package.
.. moduleauthor:: Adam Ginsburg <adam.g.ginsburg@gmail.com>
"""
##from luminosity import luminosity
#import readcol as readcol_mod
from readcol import readcol
##from UCHIIfitter import HIIregion
import gaussfitter
from gaussfitter import moments,twodgaussian,gaussfit,onedgaussian,onedgaussfit
#import kdist
from kdist import kdist,vector_kdist
#from plfit import plfit
from reg_gal2cel import gal2cel
from posang import posang
#import densitymap
#import downsample as downsample_mod
from AG_image_tools import downsample,downsample_cube
#from asinh_norm import AsinhNorm
#import showspec # imports matplotlib = BAD
from contributed import parallel_map
from timer import print_timing
from region_photometry import region_photometry
from region_photometry_files import region_photometry_files
from PCA_tools import efuncs,pca_subtract,unpca_subtract,smooth_waterfall
import constants
import blackbody
import AG_fft_tools
from AG_fft_tools import *
import AG_image_tools
from AG_image_tools import *
import cutout
import get_cutouts
import pymc_plotting
import imf
import montage_wrapper as montage
from __version__ import __version__
# import all of the functions but not the modules...
__all__ = ['readcol', 'gaussfitter', 'kdist', 'reg_gal2cel', 'posang',
'densitymap', 'downsample', 'correlate2d', 'psds', 'convolve', 'radialprofile',
'constants','gal2cel', 'convolve', 'smooth', 'azimuthalAverage',
'azimuthalAverageBins', 'kdist', 'vector_kdist', 'moments', 'twodgaussian',
'gaussfit', 'onedgaussian', 'onedgaussfit']
| mit |
elenita1221/BDA_py_demos | demos_ch5/demo5_2.py | 19 | 3326 | """Bayesian Data Analysis, 3rd ed
Chapter 5, demo 2
Hierarchical model for SAT-example data (BDA3, p. 102)
"""
from __future__ import division
import numpy as np
from scipy.stats import norm
import scipy.io # For importing a matlab file
import matplotlib.pyplot as plt
# Edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8', linewidth=2)
plt.rc('axes', color_cycle=(plt.rcParams['lines.color'],)) # Disable color cycle
# SAT-example data (BDA3 p. 120)
# y is the estimated treatment effect
# s is the standard error of effect estimate
y = np.array([28, 8, -3, 7, -1, 1, 18, 12])
s = np.array([15, 10, 16, 11, 9, 11, 10, 18])
M = len(y)
# load the pre-computed results for the hierarchical model
# replace this with your own code in Ex 5.1*
hres_path = '../utilities_and_data/demo5_2.mat'
hres = scipy.io.loadmat(hres_path)
''' Content information of the precalculated results:
>>> scipy.io.whosmat('demo5_2.mat')
[('pxm', (8, 500), 'double'),
('t', (1, 1000), 'double'),
('tp', (1, 1000), 'double'),
('tsd', (8, 1000), 'double'),
('tm', (8, 1000), 'double')]
'''
pxm = hres['pxm']
t = hres['t'][0]
tp = hres['tp'][0]
tsd = hres['tsd']
tm = hres['tm']
# plot the separate, pooled and hierarchical models
fig, axes = plt.subplots(3, 1, sharex=True, figsize=(8,10))
x = np.linspace(-40, 60, 500)
# separate
lines = axes[0].plot(x, norm.pdf(x[:,None], y[1:], s[1:]), linewidth=1)
line, = axes[0].plot(x, norm.pdf(x, y[0], s[0]), 'r')
axes[0].legend((line, lines[1]), ('school A', 'other schools'),
loc='upper left')
axes[0].set_yticks(())
axes[0].set_title('separate model')
# pooled
axes[1].plot(
x,
norm.pdf(
x,
np.sum(y/s**2)/np.sum(1/s**2),
np.sqrt(1/np.sum(1/s**2))
),
label='All schools'
)
axes[1].legend(loc='upper left')
axes[1].set_yticks(())
axes[1].set_title('pooled model')
# hierarchical
lines = axes[2].plot(x, pxm[1:].T, linewidth=1)
line, = axes[2].plot(x, pxm[0], 'r')
axes[2].legend((line, lines[1]), ('school A', 'other schools'),
loc='upper left')
axes[2].set_yticks(())
axes[2].set_title('hierarchical model')
axes[2].set_xlabel('Treatment effect')
# plot various marginal and conditional posterior summaries
fig, axes = plt.subplots(3, 1, sharex=True, figsize=(8,10))
axes[0].plot(t, tp)
axes[0].set_yticks(())
axes[0].set_title(r'marginal posterior density $p(\tau|y)$')
axes[0].set_ylabel(r'$p(\tau|y)$', fontsize=20)
axes[0].set_xlim([0,35])
lines = axes[1].plot(t, tm[1:].T, linewidth=1)
line, = axes[1].plot(t, tm[0].T, 'r')
axes[1].legend((line, lines[1]), ('school A', 'other schools'),
loc='upper left')
axes[1].set_title(r'conditional posterior means of effects '
r'$\operatorname{E}(\theta_j|\tau,y)$')
axes[1].set_ylabel(r'$\operatorname{E}(\theta_j|\tau,y)$', fontsize=20)
lines = axes[2].plot(t, tsd[1:].T, linewidth=1)
line, = axes[2].plot(t, tsd[0].T, 'r')
axes[2].legend((line, lines[1]), ('school A', 'other schools'),
loc='upper left')
axes[2].set_title(r'standard deviations of effects '
r'$\operatorname{sd}(\theta_j|\tau,y)$')
axes[2].set_ylabel(r'$\operatorname{sd}(\theta_j|\tau,y)$', fontsize=20)
axes[2].set_xlabel(r'$\tau$', fontsize=20)
plt.show()
| gpl-3.0 |
Sentient07/scikit-learn | sklearn/linear_model/coordinate_descent.py | 7 | 81810 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Gael Varoquaux <gael.varoquaux@inria.fr>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import _preprocess_data
from ..utils import check_array, check_X_y
from ..utils.validation import check_random_state
from ..model_selection import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..exceptions import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 < l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. (currently not
supported) ``For l1_ratio = 1`` it is an L1 penalty. For
``0 < l1_ratio <1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
if l1_ratio == 0:
raise ValueError("Automatic alpha grid generation is not supported for"
" l1_ratio=0. Please supply a grid by providing "
"your estimator with the appropriate `alphas=` "
"argument.")
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = _preprocess_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept,
normalize,
return_mean=True)
mean_dot = X_offset * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_scale[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, return_n_iter=return_n_iter, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False,
check_input=True, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
check_input : bool, default True
Skip input validation checks, including the Gram matrix when provided
assuming there are handled by the caller when check_input=False.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
# We expect X and y to be already Fortran ordered when bypassing
# checks
if check_input:
X = check_array(X, 'csc', dtype=[np.float64, np.float32],
order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=X.dtype.type, order='F', copy=False,
ensure_2d=False)
if Xy is not None:
# Xy should be a 1d contiguous array or a 2D C ordered array
Xy = check_array(Xy, dtype=X.dtype.type, order='C', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_offset' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_offset'] / params['X_scale']
X_sparse_scaling = np.asarray(X_sparse_scaling, dtype=X.dtype)
else:
X_sparse_scaling = np.zeros(n_features, dtype=X.dtype)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if check_input:
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False, copy=False)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=X.dtype)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=X.dtype)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1], dtype=X.dtype))
else:
coef_ = np.asfortranarray(coef_init, dtype=X.dtype)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, dtype=X.dtype.type,
order='C')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like. Got %r" % precompute)
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations.' +
' Fitting data with very small alpha' +
' may cause precision problems.',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the penalty terms. Defaults to 1.0.
See the notes for the exact mathematical meaning of this
parameter.``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.
Given this, you should use the :class:`LinearRegression` object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
precompute : True | False | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. The Gram matrix can also be passed as argument.
For sparse input this option is always ``True`` to preserve sparsity.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y, check_input=True):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if isinstance(self.precompute, six.string_types):
raise ValueError('precompute should be one of True, False or'
' array-like. Got %r' % self.precompute)
# We expect X and y to be float64 or float32 Fortran ordered arrays
# when bypassing checks
if check_input:
X, y = check_X_y(X, y, accept_sparse='csc',
order='F', dtype=[np.float64, np.float32],
copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
y = check_array(y, order='F', copy=False, dtype=X.dtype.type,
ensure_2d=False)
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=False)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or not hasattr(self, "coef_"):
coef_ = np.zeros((n_targets, n_features), dtype=X.dtype,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=X.dtype)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_offset=X_offset, X_scale=X_scale, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
check_input=False)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_offset, y_offset, X_scale)
# workaround since _set_intercept will cast self.coef_ into X.dtype
self.coef_ = np.asarray(self.coef_, dtype=X.dtype)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted ``coef_`` """
return sparse.csr_matrix(self.coef_)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.
Given this, you should use the :class:`LinearRegression` object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | array-like, default=False
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_offset'] = X_offset
path_params['X_scale'] = X_scale
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_offset = np.atleast_1d(y_offset)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_scale)
coefs[:, nonzeros] /= X_scale[nonzeros][:, np.newaxis]
intercepts = y_offset[:, np.newaxis] - np.dot(X_offset, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matrices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = check_array(y, copy=False, dtype=[np.float64, np.float32],
ensure_2d=False)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if (hasattr(reference_to_old_X, "data") and
not np.may_share_memory(reference_to_old_X.data, X.data)):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=[np.float64, np.float32],
order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv.split(X))
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=X.dtype.type)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/plot_lasso_model_selection.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float or array of floats, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/plot_lasso_model_selection.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_elastic_net>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 1 the penalty is an L1/L2 penalty. For l1_ratio = 0 it
is an L2 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array.
Note that ``coef_`` stores the transpose of ``W``, ``W.T``.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskElasticNet model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
X = check_array(X, dtype=[np.float64, np.float32], order='F',
copy=self.copy_X and self.fit_intercept)
y = check_array(y, dtype=X.dtype.type, ensure_2d=False)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=X.dtype.type,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_offset, y_offset, X_scale)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations',
ConvergenceWarning)
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
Note that ``coef_`` stores the transpose of ``W``, ``W.T``.
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 1 the penalty is an L1/L2 penalty. For l1_ratio = 0 it
is an L2 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
Note that ``coef_`` stores the transpose of ``W``, ``W.T``.
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
Note that ``coef_`` stores the transpose of ``W``, ``W.T``.
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
| bsd-3-clause |
dsullivan7/scikit-learn | sklearn/datasets/twenty_newsgroups.py | 4 | 13432 | """Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
The data is downloaded, extracted and cached in the '~/scikit_learn_data'
folder.
The `fetch_20newsgroups` function will not vectorize the data into numpy
arrays but the dataset lists the filenames of the posts and their categories
as target labels.
The `fetch_20newsgroups_tfidf` function will in addition do a simple tf-idf
vectorization step.
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import os
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from .base import load_files
from ..utils import check_random_state
from ..feature_extraction.text import CountVectorizer
from ..preprocessing import normalize
from ..externals import joblib, six
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
logger = logging.getLogger(__name__)
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = "20news-bydate.tar.gz"
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if os.path.exists(archive_path):
# Download is not complete as the .tar.gz file is removed after
# download.
logger.warning("Download was incomplete, downloading again.")
os.remove(archive_path)
logger.warning("Downloading dataset from %s (14 MB)", URL)
opener = urlopen(URL)
open(archive_path, 'wb').write(opener.read())
logger.info("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
open(cache_path, 'wb').write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
def fetch_20newsgroups(data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True):
"""Load the filenames and data from the 20 newsgroups dataset.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify a download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
categories: None or collection of string or unicode
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle: bool, optional
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state: numpy random number generator or seed integer
Used to shuffle the dataset.
download_if_missing: optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
"""
data_home = get_data_home(data_home=data_home)
cache_path = os.path.join(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
cache = download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
data.description = 'the 20 newsgroups by date dataset'
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
return data
def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None):
"""Load the 20 newsgroups dataset and transform it into tf-idf vectors.
This is a convenience function; the tf-idf transformation is done using the
default settings for `sklearn.feature_extraction.text.Vectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
Returns
-------
bunch : Bunch object
bunch.data: sparse matrix, shape [n_samples, n_features]
bunch.target: array, shape [n_samples]
bunch.target_names: list, length [n_classes]
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = os.path.join(data_home, filebase + ".pk")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
normalize(X_train, copy=False)
normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
return Bunch(data=data, target=target, target_names=target_names)
| bsd-3-clause |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/sklearn/manifold/setup.py | 1 | 1245 | import os
from os.path import join
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("manifold", parent_package, top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension("_utils",
sources=["_utils.c"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
cblas_libs, blas_info = get_blas_info()
eca = blas_info.pop('extra_compile_args', [])
eca.append("-O4")
config.add_extension("_barnes_hut_tsne",
libraries=cblas_libs,
sources=["_barnes_hut_tsne.c"],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=eca, **blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| mit |
gdikos/trade-smart | market_sim.py | 3 | 5585 | import datetime as dt
import numpy as np
import pandas as pd
# QSTK Imports
import QSTK.qstkutil.DataAccess as da
import QSTK.qstkutil.qsdateutil as du
def get_orders_list(s_file_path):
l_columns = ["year", "month", "day", "sym", "type", "num"]
df_orders_list = pd.read_csv(s_file_path, sep=',', header=None)
df_orders_list = df_orders_list.dropna(axis=1, how='all')
df_orders_list.columns = l_columns
return df_orders_list
def get_orders(df_orders_list):
na_orders_list = df_orders_list.values
l_orders = []
ld_daily_orders = None
for order in na_orders_list:
dt_date = dt.datetime(order[0], order[1], order[2], hour=16)
d_order = {df_orders_list.columns[3]: order[3], \
df_orders_list.columns[4]: order[4], \
df_orders_list.columns[5]: int(order[5])}
if l_orders != [] and dt_date == l_orders[-1][0]:
l_orders[-1][1].append(d_order)
else:
ld_daily_orders = []
ld_daily_orders.append(d_order)
l_orders.append([dt_date, ld_daily_orders])
na_orders = np.array(l_orders)
df_orders = pd.DataFrame(na_orders[:, 1], index=na_orders[:, 0], columns=["ord"])
df_orders = df_orders.sort()
dt_start = df_orders.ix[0].name
dt_end = df_orders.ix[-1].name
ls_symbols = list(set(df_orders_list["sym"]))
ls_symbols.sort() # It is neccesary to sort due the use of set
return df_orders, dt_start, dt_end, ls_symbols
def get_data(dt_start, dt_end, ls_symbols):
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt.timedelta(hours=16))
ls_keys = ["open", "high", "low", "close", "volume", "actual_close"]
dataobj = da.DataAccess('Yahoo')
ldf_data = dataobj.get_data(ldt_timestamps, ls_symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
for s_key in ls_keys:
d_data[s_key] = d_data[s_key].fillna(method="ffill")
d_data[s_key] = d_data[s_key].fillna(method="bfill")
d_data[s_key] = d_data[s_key].fillna(1.0)
return d_data
def get_prices(dt_start, dt_end, ls_symbols, s_key="close"):
# close = adjusted close
# actual_close = actual close
d_data = get_data(dt_start, dt_end, ls_symbols)
return d_data[s_key]
def process_daily_orders(dt_date, df_orders, df_prices, df_num, df_val, df_res):
op = 0
daily_orders = list(df_orders.ix[dt_date, "ord"])
for order in daily_orders:
if order["type"] == "Buy":
op = 1
elif order["type"] == "Sell":
op = -1
df_num.ix[dt_date, order["sym"]] += op * order["num"]
df_res.ix[dt_date, "cash"] += -op * order["num"] * df_prices.ix[dt_date, order["sym"]]
def update_port(dt_date, dt_last_orders_date, ls_symbols, df_num, df_res):
for s_symbol in ls_symbols:
df_num.ix[dt_date, s_symbol] = df_num.ix[dt_last_orders_date, s_symbol]
df_res.ix[dt_date, "cash"] = df_res.ix[dt_last_orders_date, "cash"]
def value_port(dt_date, ls_symbols, df_prices, df_num, df_val, df_res):
for s_symbol in ls_symbols:
df_val.ix[dt_date, s_symbol] = df_num.ix[dt_date, s_symbol] * df_prices.ix[dt_date, s_symbol]
df_res.ix[dt_date, "port"] = np.sum(df_val.ix[dt_date, :])
df_res.ix[dt_date, "total"] = df_res.ix[dt_date, "port"] + df_res.ix[dt_date, "cash"]
def process_orders(df_orders, df_prices, cash):
ldt_dates = list(df_prices.index)
ls_symbols = list(df_prices.columns)
df_num = pd.DataFrame(index=ldt_dates, columns=ls_symbols)
df_val = pd.DataFrame(index=ldt_dates, columns=ls_symbols)
df_res = pd.DataFrame(index=ldt_dates, columns=["port", "cash", "total"])
df_num = df_num.fillna(0.0)
df_val = df_val.fillna(0.0)
df_res = df_res.fillna(0.0)
df_res.ix[0, "cash"] = cash
ldt_orders_dates = list(df_orders.index)
iter_orders_dates = iter(ldt_orders_dates)
dt_orders_date = iter_orders_dates.next()
dt_last_orders_date = dt_orders_date
for dt_date in ldt_dates:
update_port(dt_date, dt_last_orders_date, ls_symbols, df_num, df_res)
if dt_date == dt_orders_date:
process_daily_orders(dt_date, df_orders, df_prices, df_num, df_val, df_res)
try:
dt_last_orders_date = dt_orders_date
dt_orders_date = iter_orders_dates.next()
except StopIteration:
pass
value_port(dt_date, ls_symbols, df_prices, df_num, df_val, df_res)
df_port = df_num.join(df_val, lsuffix="_num", rsuffix="_val").join(df_res)
#df_port.to_csv("port.csv")
return df_port
def save_values(df_port, s_out_file_path):
ldt_dates = df_port.index
na_dates = np.array([[dt_date.year, dt_date.month, dt_date.day] for dt_date in ldt_dates])
na_total = np.array(df_port["total"])
na_values = np.insert(arr=na_dates, obj=3, values=na_total, axis=1)
df_values = pd.DataFrame(na_values, columns=["year", "month", "day", "total"])
df_values.to_csv(s_out_file_path, sep=",", header=False, index=False)
if __name__ == '__main__':
print "start market_sim.py"
s_in_file_path = "data\\q1_orders.csv"
s_out_file_path = "data\\q1_values.csv"
s_cash = "100000"
f_cash = float(s_cash)
df_orders_list = get_orders_list(s_in_file_path)
df_orders, dt_start, dt_end, ls_symbols = get_orders(df_orders_list)
df_prices = get_prices(dt_start, dt_end, ls_symbols)
df_port = process_orders(df_orders, df_prices, f_cash)
save_values(df_port, s_out_file_path)
print "end market_sim.py" | mit |
jeiros/Scripts | AnalysisMDTraj/rmsd.py | 1 | 2404 | #!/usr/bin/env python
import mdtraj
import argparse
import msmexplorer as msme
from cycler import cycler
from itertools import cycle
from matplotlib import pyplot as plt
import numpy as np
from matplotlib.ticker import FuncFormatter
import seaborn as sns
parser = argparse.ArgumentParser(prog='rmsd.py',
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''version1''')
parser.add_argument('prmtop', help="""Topology file matching Trajectories""")
parser.add_argument("Trajectories", help="""An indefinite amount of AMBER
trajectories""", nargs="+")
parser.add_argument('-st', '--stride', type=int, required=False, default=1)
parser.add_argument('-sl', '--select', type=str, required=False, default='all')
parser.add_argument('-o', '--out_file', type=str, required=False,
default='rmsd.pdf')
palette = cycler('color', sns.color_palette('colorblind', 10))
palette_cycled = cycle(palette)
def to_ns(x, pos):
timestep = mdtraj.load_netcdf(args.Trajectories[0],
args.prmtop, args.stride).timestep
return '%d' % (int(x * timestep / 1000))
def plot_rsmd(traj_list, fout=None):
rmsd_list = [mdtraj.rmsd(traj, traj, 0) * 10 for traj in traj_list]
ax, side_ax = msme.plot_trace(rmsd_list[0], ylabel='RMSD (Å)', xlabel='Time (ns)',
label=args.Trajectories[0][:-3],
**next(palette_cycled))
formatter = FuncFormatter(to_ns)
ax.xaxis.set_major_formatter(formatter)
if len(rmsd_list) > 1:
for i, rmsd in enumerate(rmsd_list[1:]):
msme.plot_trace(rmsd, ylabel='RMSD (Å)', xlabel='Time (ns)', ax=ax,
side_ax=side_ax,
label=args.Trajectories[i + 1][:-3],
**next(palette_cycled))
if len(rmsd_list) > 5:
ax.legend_.remove()
sns.despine()
f = plt.gcf()
f.savefig(fout)
if __name__ == '__main__':
args = parser.parse_args()
print(args)
top = mdtraj.load_prmtop(args.prmtop)
atom_indices = top.select(args.select)
trajs = [mdtraj.load(trj, top=args.prmtop, stride=args.stride,
atom_indices=atom_indices) for trj in args.Trajectories]
print(trajs)
plot_rsmd(trajs, args.out_file)
| mit |
zuku1985/scikit-learn | sklearn/linear_model/tests/test_randomized_l1.py | 57 | 4736 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import _preprocess_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
# labels should not be centered
X, _, _, _, _ = _preprocess_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |
loli/sklearn-ensembletrees | sklearn/cross_decomposition/tests/test_pls.py | 22 | 9838 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_
from nose.tools import assert_equal
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
np.multiply(pls_bysvd.x_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
np.multiply(pls_bysvd.y_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
assert_array_almost_equal(pls_ca.x_rotations_, x_rotations)
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
assert_array_almost_equal(pls_ca.y_rotations_, y_rotations)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
assert_array_almost_equal(pls_2.x_weights_, x_weights)
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
assert_array_almost_equal(pls_2.x_loadings_, x_loadings)
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_weights_, y_weights)
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_loadings_, y_loadings)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
np.random.seed(11)
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
np.random.seed(None)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
assert_array_almost_equal(pls_ca.x_loadings_, x_loadings)
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
assert_array_almost_equal(pls_ca.y_loadings_, y_loadings)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specificied number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_scale():
d = load_linnerud()
X = d.data
Y = d.target
# causes X[:, -1].std() to be zero
X[:, -1] = 1.0
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
clf.fit(X, Y)
| bsd-3-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/misc/patheffect_demo.py | 1 | 2340 | """
===============
Patheffect Demo
===============
"""
import matplotlib.pyplot as plt
import matplotlib.patheffects as PathEffects
import numpy as np
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
if 1:
plt.figure(1, figsize=(8, 3))
ax1 = plt.subplot(131)
ax1.imshow([[1, 2], [2, 3]])
txt = ax1.annotate("test", (1., 1.), (0., 0),
arrowprops=dict(arrowstyle="->",
connectionstyle="angle3", lw=2),
size=20, ha="center",
path_effects=[PathEffects.withStroke(linewidth=3,
foreground="w")])
txt.arrow_patch.set_path_effects([
PathEffects.Stroke(linewidth=5, foreground="w"),
PathEffects.Normal()])
pe = [PathEffects.withStroke(linewidth=3,
foreground="w")]
ax1.grid(True, linestyle="-", path_effects=pe)
ax2 = plt.subplot(132)
arr = np.arange(25).reshape((5, 5))
ax2.imshow(arr)
cntr = ax2.contour(arr, colors="k")
plt.setp(cntr.collections, path_effects=[
PathEffects.withStroke(linewidth=3, foreground="w")])
clbls = ax2.clabel(cntr, fmt="%2.0f", use_clabeltext=True)
plt.setp(clbls, path_effects=[
PathEffects.withStroke(linewidth=3, foreground="w")])
# shadow as a path effect
ax3 = plt.subplot(133)
p1, = ax3.plot([0, 1], [0, 1])
leg = ax3.legend([p1], ["Line 1"], fancybox=True, loc=2)
leg.legendPatch.set_path_effects([PathEffects.withSimplePatchShadow()])
pltshow(plt)
| mit |
scholer/staplestatter | setup.py | 1 | 2239 | from setuptools import setup
setup(
name='staplestatter',
version='1.1',
packages=['staplestatter'],
url='https://github.com/scholer/staplestatter',
license='GNU General Public License v3 (GPLv3)',
author='Rasmus Scholer Sorensen',
author_email='rasmusscholer@gmail.com',
description='Library, tools, and cadnano plugin for analyzing DNA origami designs created with cadnano.',
keywords=[
# "GEL", "Image", "Annotation", "PAGE", "Agarose", "Protein",
# "SDS", "Gel electrophoresis", "Typhoon", "GelDoc",
"DNA Origami", "cadnano", "plugin",
"DNA", "DNA sequences", "melting temperature", "sequence manipulation",
"Data analysis", "statistics", "plotting", "Data visualization",
],
install_requires=[
'pyyaml',
'six',
'biopython',
'matplotlib',
'svgwrite',
],
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'Intended Audience :: End Users/Desktop',
# 'Topic :: Software Development :: Build Tools',
'Topic :: Education',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Bio-Informatics',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Operating System :: MacOS',
'Operating System :: Microsoft',
'Operating System :: POSIX :: Linux',
],
)
| gpl-3.0 |
fzalkow/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 268 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
canavandl/bokeh | examples/charts/file/bar.py | 37 | 2221 | from collections import OrderedDict
import numpy as np
import pandas as pd
from bokeh.charts import Bar, output_file, show, vplot, hplot
from bokeh.models import Range1d
from bokeh.sampledata.olympics2014 import data as original_data
width = 700
height = 500
legend_position = "top_right"
data = {d['abbr']: d['medals'] for d in original_data['data'] if d['medals']['total'] > 0}
countries = sorted(data.keys(), key=lambda x: data[x]['total'], reverse=True)
gold = np.array([data[abbr]['gold'] for abbr in countries], dtype=np.float)
silver = np.array([data[abbr]['silver'] for abbr in countries], dtype=np.float)
bronze = np.array([data[abbr]['bronze'] for abbr in countries], dtype=np.float)
# dict input
medals = OrderedDict(bronze=bronze, silver=silver, gold=gold)
dict_stacked = Bar(
medals, countries, title="OrderedDict input | Stacked", legend=legend_position,
xlabel="countries", ylabel="medals", width=width, height=height,
stacked=True
)
# data frame input
df = pd.DataFrame(medals, index=countries)
df_grouped = Bar(
df, title="Data Frame input | Grouped", legend=legend_position,
xlabel="countries", ylabel="medals", width=width, height=height
)
# Numpy array input with different data to affect the ranges
random = np.random.rand(3, 8)
mixed = random - np.random.rand(3, 8)
categories = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
np_stacked = Bar(
random, cat=categories, title="Numpy Array input | Stacked",
ylabel="Random Number", xlabel="", width=width, height=height,
stacked=True
)
np_negative_grouped = Bar(
random * -1, cat=categories, title="All negative input | Grouped",
ylabel="Random Number", width=width, height=height
)
np_custom = Bar(
mixed, cat=categories, title="Custom range (start=-3, end=0.4)",
ylabel="Random Number", width=width, height=height,
continuous_range=Range1d(start=-3, end=0.4)
)
np_mixed_grouped = Bar(
mixed, cat=categories, title="Mixed-sign input | Grouped",
ylabel="Random Number", width=width, height=height
)
# collect and display
output_file("bar.html")
show(vplot(
hplot(dict_stacked, df_grouped),
hplot(np_stacked, np_negative_grouped),
hplot(np_mixed_grouped, np_custom),
))
| bsd-3-clause |
dannygoldstein/sncosmo | docs/conf.py | 3 | 7855 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# See astropy.sphinx.conf for which values are set there.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory (if "python setup.py build_sphinx" is used).
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
import sys
import os
import datetime
import sphinx_rtd_theme
import sphinx_gallery
import matplotlib.sphinxext.plot_directive
sys.path.insert(0, os.path.abspath("_helpers"))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.3'
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'astropy': ('http://docs.astropy.org/en/stable/', None),
'emcee': ('https://emcee.readthedocs.io/en/stable/', None)
}
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.mathjax',
'sphinx.ext.linkcode',
'sphinx_gallery.gen_gallery',
'numpydoc',
matplotlib.sphinxext.plot_directive.__name__
]
numpydoc_show_class_members = False
autosummary_generate = True
autoclass_content = "class"
autodoc_default_flags = ["members", "inherited-members"]
autodoc_docstring_signature = False
sphinx_gallery_conf = {
'examples_dirs': '_examples', # path to examples scripts
'gallery_dirs': 'examples', # path to gallery generated examples
'backreferences_dir': 'modules/generated', # path to store the module
# using example template
'doc_module': ('sncosmo',), # documented module(s)
'download_section_examples': False,
'download_all_examples': False, # don't package up examples.
'default_thumb_file': os.path.join(os.path.dirname(__file__), '_logo',
'spectral_white_bkg.png')
}
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# -- Project information ------------------------------------------------------
# This does not *have* to match the package name, but typically does
project = 'sncosmo'
author = 'Kyle Barbary and contributors'
current_year = datetime.datetime.now().year
copyright = '2013-{:d}, {}'.format(current_year, author)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
import sncosmo
# The short X.Y version.
version = sncosmo.__version__.split('-', 1)[0]
# The full version, including alpha/beta/rc tags.
release = sncosmo.__version__
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'obj'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output ---------------------------------------------------
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = [sphinx_gallery.glr_path_static()]
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
from os.path import join
html_favicon = join('_static','sncosmo_logo_32.ico')
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = ''
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{0} v{1}'.format(project, release)
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# Add local templates path to modify autosummary templates
#templates_path = ['_templates']
# Static files to copy after template files
html_static_path = ['_static']
#html_style = 'sncosmo.css'
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', project + '.tex', project + u' Documentation',
author, 'manual')]
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', project.lower(), project + u' Documentation',
[author], 1)]
# -----------------------------------------------------------------------------
# Source code links
#
# Lifted from numpy docs conf.py
# -----------------------------------------------------------------------------
import inspect
from os.path import relpath, dirname
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
return None
try:
source, lineno = inspect.findsource(obj)
except:
lineno = None
if lineno:
linespec = "#L%d" % (lineno + 1)
else:
linespec = ""
fn = relpath(fn, start=dirname(sncosmo.__file__))
if 'dev' in sncosmo.__version__:
return "http://github.com/sncosmo/sncosmo/blob/master/sncosmo/%s%s" % (
fn, linespec)
else:
return "http://github.com/sncosmo/sncosmo/blob/v%s/sncosmo/%s%s" % (
sncosmo.__version__, fn, linespec)
| bsd-3-clause |
wy36101299/technical-analysis | technical-analysis.py | 2 | 12671 |
# coding: utf-8
# In[1]:
# 啟動互動式繪圖環境
get_ipython().magic(u'pylab inline')
# In[2]:
# 引入相依套件
import numpy as np
import pandas as pd
from numpy import random
import matplotlib.pyplot as plt
# 引入檔案
# 資料來源 3008 大立光 2012-8/1 ~ 2014-12/09
# http://www.twse.com.tw/ch/trading/exchange/STOCK_DAY/genpage/Report201412/201412_F3_1_8_3008.php?STK_NO=3008&myear=2014&mmon=12
datapath = '/Users/wy/Desktop/3008.txt'
data = pd.read_csv(datapath)
# In[3]:
# 看檔案前N筆
data.head(5)
# In[4]:
# data詳細資料 總數,平均數,標準差...
data.describe()
# In[5]:
# 技術分析資料來源
# http://hymar.myweb.hinet.net/study/stock/theory/
# In[6]:
# Rise Ratio 漲幅比
def RR(data):
# 由於 data 新到舊 0~xxx,遞增,因此需反轉陣列
dataList = range(data['Date'].size)
dataList.reverse()
tmpList = []
for item in dataList:
# 防止 第一筆data沒有更舊的
if item-1 >=0:
# (今日收盤價 - 昨日收盤價)/昨日收盤價
tmp = (data['Close'][item-1]-data['Close'][item])/data['Close'][item]*100
tmpList.append(tmp)
# 前day 沒data會出現NA
tmpList.reverse()
tmpSeries = pd.Series(tmpList)
# create RR 欄位
data['RR']=tmpSeries
# In[7]:
# 威廉指標(WMS%R或%R)
def WMS(data,day):
# 由於 data 新到舊 0~xxx,遞增,因此需反轉陣列
dataList = range(data['Date'].size)
dataList.reverse()
tmpList = []
for item in dataList:
# 防止前day沒有data
if item-day+1 >= 0:
# 9日WMS%R =(9日內最高價-第9日收盤價) / (9日內最高價-9日內最低價)*100
# [item-day+1:item+1] 今日區間 [item-day+1] 第N日 583-9=574+1=575
tmp = (data['High'][item-day+1:item+1].max()-data['Close'][item-day+1])/(data['High'][item-day+1:item+1].max()-data['Low'][item-day+1:item+1].min())*100
tmpList.append(tmp)
# 前day 沒data會出現NA
tmpList.reverse()
tmpSeries = pd.Series(tmpList)
# create WMS 欄位
data['WMS']=tmpSeries
# In[8]:
# 買賣意願指標 day 建議26
def BR(data,day):
# 由於 data 新到舊 0~xxx,遞增,因此需反轉陣列
dataList = range(data['Date'].size)
dataList.reverse()
tmpList = []
for item in dataList:
# 防止前day沒有data
if item-day >= 0:
# 26日BR = (今日最高價 - 昨日收盤價)26天累計總數 / (昨日收盤價 - 今日最低價)26天累計總數
# [(item-day+1)-1:(item+1)-1] 有-1 今日區間 [(item-day+1):(item+1)] 昨日區間
tmp = (data['High'][(item-day+1)-1:(item+1)-1].sum()-data['Close'][item-day+1:item+1].sum())/(data['Close'][item-day+1:item+1].sum()-data['Low'][(item-day+1)-1:(item+1)-1].sum())
tmpList.append(tmp)
# 前day 沒data會出現NA
tmpList.reverse()
tmpSeries = pd.Series(tmpList)
# create BR 欄位
data['BR']=tmpSeries
# In[9]:
# 買賣氣勢指標 day建議26
def AR(data,day):
# 由於 data 新到舊 0~xxx,遞增,因此需反轉陣列
dataList = range(data['Date'].size)
dataList.reverse()
tmpList = []
for item in dataList:
# 防止前day沒有data
if item-day+1 >= 0:
# 26日AR = (最高價 - 開盤價)26天累計總數 / (開盤價 - 最低價)26天累計總數
# [item-day+1:item+1] 今日區間
tmp = (data['High'][item-day+1:item+1].sum()-data['Open'][item-day+1:item+1].sum())/(data['Open'][item-day+1:item+1].sum()-data['Low'][item-day+1:item+1].sum())
tmpList.append(tmp)
# 前day 沒data會出現NA
tmpList.reverse()
tmpSeries = pd.Series(tmpList)
# create AR 欄位
data['AR']=tmpSeries
# In[10]:
# 平均成交量 mean volumn day建議12
def MV(data,day):
# 由於 data 新到舊 0~xxx,遞增,因此需反轉陣列
dataList = range(data['Date'].size)
dataList.reverse()
tmpList = []
for item in dataList:
# 防止前day沒有data
if item-day+1 >= 0:
# N日平均量 = N日內的成交量總和 / N
# [item-day+1:item+1] 今日區間
tmp = data['Volume'][item-day+1:item+1].mean()
tmpList.append(tmp)
# 前day 沒data會出現NA
tmpList.reverse()
tmpSeries = pd.Series(tmpList)
# create MV 欄位
data['MV']=tmpSeries
# In[11]:
# 移動平均線(MA,Moving Average) 建議12
def MA(data,day):
# 由於 data 新到舊 0~xxx,遞增,因此需反轉陣列
dataList = range(data['Date'].size)
dataList.reverse()
tmpList = []
for item in dataList:
# 防止前day沒有data
if item-day+1 >= 0:
# 移動平均數 = 採樣天數的股價合計 / 採樣天數
# [item-day+1:item+1] 今日區間
tmp = data['Close'][item-day+1:item+1].mean()
tmpList.append(tmp)
# 前day 沒data會出現NA
tmpList.reverse()
tmpSeries = pd.Series(tmpList)
# create MA 欄位
data['MA'+str(day)]=tmpSeries
# In[12]:
# 心理線(PSY) 建議13
def PSY(data,day):
# 由於 data 新到舊 0~xxx,遞增,因此需反轉陣列
dataList = range(data['Date'].size)
dataList.reverse()
tmpList = []
for item in dataList:
# 防止前day沒有data
if item-day >= 0:
# 13日PSY值 = ( 13日內之上漲天數 / 13 ) * 100
# [item-day+1-1:item+1-1] 跳一天 最早的天沒有RR值
count = 0
for a in data['RR'][item-day+1-1:item+1-1]:
if a > 0:
count+=1
tmp = float(count)/float(13)*100
tmpList.append(tmp)
# 前day 沒data會出現NA
tmpList.reverse()
tmpSeries = pd.Series(tmpList)
# create PSY 欄位
data['PSY']=tmpSeries
# In[13]:
# 能量潮(OBV) 建議12
def OBV(data,day):
# 由於 data 新到舊 0~xxx,遞增,因此需反轉陣列
dataList = range(data['Date'].size)
dataList.reverse()
tmpList = []
for item in dataList:
# 防止前day沒有data
if item-day >= 0:
# 今日OBV值 = 最近12天股價上漲日成交量總和 - 最近12天股價下跌日成交量總和
# 先由 ['RR'] 求出boolean值 > 0 True 套入['Volume']符合True全加起來
bolRise = data['RR'][item-day+1-1:item+1-1]>0
sumVolRise = data['Volume'][item-day+1-1:item+1-1][bolRise].sum()
bolDesc = data['RR'][item-day+1-1:item+1-1]<0
sumVolDesc = data['Volume'][item-day+1-1:item+1-1][bolDesc].sum()
tmp = sumVolRise-sumVolDesc
# 可切換 OBV累積12日移動平均值 = (最近12天股價上漲日成交量總和 - 最近12天股價下跌日成交量總和) / 12
# tmp = (sumVolRise-sumVolDesc)/12
tmpList.append(tmp)
# 前day 沒data會出現NA
tmpList.reverse()
tmpSeries = pd.Series(tmpList)
# create OBV 欄位
data['OBV']=tmpSeries
# In[14]:
# 數量指標(VR) 建議12
def VR(data,day):
# 由於 data 新到舊 0~xxx,遞增,因此需反轉陣列
dataList = range(data['Date'].size)
dataList.reverse()
tmpList = []
for item in dataList:
# 防止前day沒有data
if item-day >= 0:
# VR = ( N日內上漲日成交值總和 + 1/2*N日內平盤日成交值總和) / ( N日內下跌日成交值總和 + 1/2*N日內平盤日成交值總和)* 100%
# 先由 ['RR'] 求出boolean值 > 0 True 套入['Volume']符合True全加起來
bolRise = data['RR'][item-day+1-1:item+1-1]>0
sumVolRise = data['Volume'][item-day+1-1:item+1-1][bolRise].sum()
bolNorm = data['RR'][item-day+1-1:item+1-1] == 0
sumVolNorm = data['Volume'][item-day+1-1:item+1-1][bolNorm].sum()
bolDesc = data['RR'][item-day+1-1:item+1-1]<0
sumVolDesc = data['Volume'][item-day+1-1:item+1-1][bolDesc].sum()
tmp = (sumVolRise+0.5*sumVolNorm)/(sumVolDesc+0.5*sumVolNorm)*100
tmpList.append(tmp)
# 前day 沒data會出現NA
tmpList.reverse()
tmpSeries = pd.Series(tmpList)
# create VR 欄位
data['VR']=tmpSeries
# In[15]:
# 相對強弱指標(RSI) 建議6
def RSI(data,day):
# 由於 data 新到舊 0~xxx,遞增,因此需反轉陣列
dataList = range(data['Date'].size)
dataList.reverse()
tmpList = []
for item in dataList:
# 防止前day沒有data
if item-day >= 0:
# 6日RSI=100*6日內收盤上漲總幅度平均值 / (6日內收盤上漲總幅度平均值 - 6日內收盤下跌總幅度平均值)
# 先由 ['RR'] 求出boolean值 > 0 True 套入['Volume']符合True全加起來
bolRise = data['RR'][item-day+1-1:item+1-1]>0
meanRise = data['RR'][item-day+1-1:item+1-1][bolRise].mean()
bolDesc = data['RR'][item-day+1-1:item+1-1]<0
meanDesc = data['RR'][item-day+1-1:item+1-1][bolDesc].mean()
tmp = 100*meanRise/(meanRise-meanDesc)
tmpList.append(tmp)
# 前day 沒data會出現NA
tmpList.reverse()
tmpSeries = pd.Series(tmpList)
# create RSI 欄位
data['RSI']=tmpSeries
# In[16]:
# 乖離率(BIAS)
def BIAS(data,day):
# 由於 data 新到舊 0~xxx,遞增,因此需反轉陣列
dataList = range(data['Date'].size)
dataList.reverse()
tmpList = []
for item in dataList:
# 防止前day沒有data
if item-day+1 >= 0:
# N日乖離率 = (當日股價 - N日股價移動平均數) / N日平均股價
tmp = (data['Close'][item-day+1]-data['MA'+str(day)][item-day+1])/data['MA'+str(day)][item-day+1]*100
tmpList.append(tmp)
# 前day 沒data會出現NA
tmpList.reverse()
tmpSeries = pd.Series(tmpList)
# create BIAS 欄位
data['BIAS']=tmpSeries
# In[17]:
# RR漲幅比須先算出來,後續指標需用到此項
RR(data)
WMS(data,9)
BR(data,26)
AR(data,26)
MV(data,12)
MA(data,12)
# 算BIAS須先求出MA值
BIAS(data,12)
PSY(data,13)
OBV(data,12)
VR(data,12)
RSI(data,6)
# In[18]:
MA(data,20)
MA(data,60)
# In[19]:
data
# In[20]:
# Rise Ratio 漲幅比
# 0為最新資料 向左越來越新
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(data['RR'])
ax.set_title('RR')
# In[21]:
# 威廉指標(WMS%R或%R)
# 0為最新資料 向左越來越新
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(data['WMS'])
ax.set_title('WMS')
# In[22]:
# 買賣意願指標
# 0為最新資料 向左越來越新
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(data['BR'])
ax.set_title('BR')
# In[23]:
# 買賣氣勢指標
# 0為最新資料 向左越來越新
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(data['AR'])
ax.set_title('AR')
# In[24]:
# 平均成交量 mean volumn
# 0為最新資料 向左越來越新
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(data['MV'])
ax.set_title('MV')
# In[25]:
# 移動平均線(MA,Moving Average)
# 0為最新資料 向左越來越新
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(data['MA12'])
ax.set_title('MA12')
# In[26]:
# 乖離率(BIAS)
# 0為最新資料 向左越來越新
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(data['BIAS'])
ax.set_title('BIAS')
# In[27]:
# 心理線(PSY)
# 0為最新資料 向左越來越新
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(data['PSY'])
ax.set_title('PSY')
# In[28]:
# 能量潮(OBV)
# 0為最新資料 向左越來越新
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(data['OBV'])
ax.set_title('OBV')
# In[29]:
# 數量指標(VR)
# 0為最新資料 向左越來越新
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(data['VR'])
ax.set_title('VR')
# In[30]:
# 相對強弱指標(RSI)
# 0為最新資料 向左越來越新
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(data['RSI'])
ax.set_title('RSI')
# In[32]:
# MA20 和 MA60 比較 看出黃金交叉和死亡交叉
# 0為最新資料 向左越來越新
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(data['MA20'],color='#9D442F',label='MA20')
ax.plot(data['MA60'],color='#5D947E',label='MA60')
ax.legend(loc='best')
# In[ ]:
| mit |
almarklein/scikit-image | doc/examples/plot_equalize.py | 1 | 2802 | """
======================
Histogram Equalization
======================
This examples enhances an image with low contrast, using a method called
*histogram equalization*, which "spreads out the most frequent intensity
values" in an image [1]_. The equalized image has a roughly linear cumulative
distribution function.
While histogram equalization has the advantage that it requires no parameters,
it sometimes yields unnatural looking images. An alternative method is
*contrast stretching*, where the image is rescaled to include all intensities
that fall within the 2nd and 98th percentiles [2]_.
.. [1] http://en.wikipedia.org/wiki/Histogram_equalization
.. [2] http://homepages.inf.ed.ac.uk/rbf/HIPR2/stretch.htm
"""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from skimage import data, img_as_float
from skimage import exposure
matplotlib.rcParams['font.size'] = 8
def plot_img_and_hist(img, axes, bins=256):
"""Plot an image along with its histogram and cumulative histogram.
"""
img = img_as_float(img)
ax_img, ax_hist = axes
ax_cdf = ax_hist.twinx()
# Display image
ax_img.imshow(img, cmap=plt.cm.gray)
ax_img.set_axis_off()
# Display histogram
ax_hist.hist(img.ravel(), bins=bins, histtype='step', color='black')
ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))
ax_hist.set_xlabel('Pixel intensity')
ax_hist.set_xlim(0, 1)
ax_hist.set_yticks([])
# Display cumulative distribution
img_cdf, bins = exposure.cumulative_distribution(img, bins)
ax_cdf.plot(bins, img_cdf, 'r')
ax_cdf.set_yticks([])
return ax_img, ax_hist, ax_cdf
# Load an example image
img = data.moon()
# Contrast stretching
p2 = np.percentile(img, 2)
p98 = np.percentile(img, 98)
img_rescale = exposure.rescale_intensity(img, in_range=(p2, p98))
# Equalization
img_eq = exposure.equalize_hist(img)
# Adaptive Equalization
img_adapteq = exposure.equalize_adapthist(img, clip_limit=0.03)
# Display results
f, axes = plt.subplots(nrows=2, ncols=4, figsize=(8, 5))
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img, axes[:, 0])
ax_img.set_title('Low contrast image')
y_min, y_max = ax_hist.get_ylim()
ax_hist.set_ylabel('Number of pixels')
ax_hist.set_yticks(np.linspace(0, y_max, 5))
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_rescale, axes[:, 1])
ax_img.set_title('Contrast stretching')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_eq, axes[:, 2])
ax_img.set_title('Histogram equalization')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_adapteq, axes[:, 3])
ax_img.set_title('Adaptive equalization')
ax_cdf.set_ylabel('Fraction of total intensity')
ax_cdf.set_yticks(np.linspace(0, 1, 5))
# prevent overlap of y-axis labels
plt.subplots_adjust(wspace=0.4)
plt.show()
| bsd-3-clause |
guiquanz/Dato-Core | src/unity/python/graphlab/test/test_io.py | 13 | 15881 | '''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the DATO-PYTHON-LICENSE file for details.
'''
import commands
import json
import logging
import os
import re
import tempfile
import unittest
import pandas
import graphlab
import graphlab.connect.main as glconnect
import graphlab.sys_util as _sys_util
from graphlab.test.util import create_server, start_test_tcp_server
from pandas.util.testing import assert_frame_equal
def _test_save_load_object_helper(testcase, obj, url):
"""
Helper function to test save and load a server side object to a given url.
"""
def cleanup(url):
"""
Remove the saved file from temp directory.
"""
protocol = None
path = None
splits = url.split("://")
if len(splits) > 1:
protocol = splits[0]
path = splits[1]
else:
path = url
if not protocol or protocol is "local" or protocol is "remote":
tempdir = tempfile.gettempdir()
pattern = path + ".*"
for f in os.listdir(tempdir):
if re.search(pattern, f):
os.remove(os.path.join(tempdir, f))
if isinstance(obj, graphlab.SGraph):
obj.save(url + ".graph")
newobj = graphlab.load_graph(url + ".graph")
testcase.assertItemsEqual(obj.get_fields(), newobj.get_fields())
testcase.assertDictEqual(obj.summary(), newobj.summary())
elif isinstance(obj, graphlab.Model):
obj.save(url + ".model")
newobj = graphlab.load_model(url + ".model")
testcase.assertItemsEqual(obj.list_fields(), newobj.list_fields())
testcase.assertEqual(type(obj), type(newobj))
elif isinstance(obj, graphlab.SFrame):
obj.save(url + ".frame_idx")
newobj = graphlab.load_sframe(url + ".frame_idx")
testcase.assertEqual(obj.shape, newobj.shape)
testcase.assertEqual(obj.column_names(), newobj.column_names())
testcase.assertEqual(obj.column_types(), newobj.column_types())
assert_frame_equal(obj.head(obj.num_rows()).to_dataframe(),
newobj.head(newobj.num_rows()).to_dataframe())
else:
raise TypeError
cleanup(url)
def create_test_objects():
vertices = pandas.DataFrame({'vid': ['1', '2', '3'],
'color': ['g', 'r', 'b'],
'vec': [[.1, .1, .1], [.1, .1, .1], [.1, .1, .1]]})
edges = pandas.DataFrame({'src_id': ['1', '2', '3'],
'dst_id': ['2', '3', '4'],
'weight': [0., 0.1, 1.]})
graph = graphlab.SGraph().add_vertices(vertices, 'vid').add_edges(edges, 'src_id', 'dst_id')
sframe = graphlab.SFrame(edges)
model = graphlab.pagerank.create(graph)
return (graph, sframe, model)
class LocalFSConnectorTests(unittest.TestCase):
@classmethod
def setUpClass(self):
self.tempfile = tempfile.NamedTemporaryFile().name
(self.graph, self.sframe, self.model) = create_test_objects()
def _test_read_write_helper(self, url, content):
url = graphlab.util._make_internal_url(url)
glconnect.get_unity().__write__(url, content)
content_read = glconnect.get_unity().__read__(url)
self.assertEquals(content_read, content)
if os.path.exists(url):
os.remove(url)
def test_object_save_load(self):
for prefix in ['', 'local://', 'remote://']:
_test_save_load_object_helper(self, self.graph, prefix + self.tempfile)
_test_save_load_object_helper(self, self.model, prefix + self.tempfile)
_test_save_load_object_helper(self, self.sframe, prefix + self.tempfile)
def test_basic(self):
self._test_read_write_helper(self.tempfile, 'hello world')
self._test_read_write_helper("local://" + self.tempfile + ".csv", 'hello,world,woof')
self._test_read_write_helper("remote://" + self.tempfile + ".csv", 'hello,world,woof')
def test_gzip(self):
self._test_read_write_helper(self.tempfile + ".gz", 'hello world')
self._test_read_write_helper(self.tempfile + ".csv.gz", 'hello world')
self._test_read_write_helper("local://" + self.tempfile + ".csv.gz", 'hello world')
self._test_read_write_helper("remote://" + self.tempfile + ".csv.gz", 'hello world')
def test_exception(self):
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("/root/tmp"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__("/root/tmp", '.....'))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("/root/tmp"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__("/root/tmp", '.....'))
self.assertRaises(IOError, lambda: self.graph.save("/root/tmp.graph"))
self.assertRaises(IOError, lambda: self.sframe.save("/root/tmp.frame_idx"))
self.assertRaises(IOError, lambda: self.model.save("/root/tmp.model"))
self.assertRaises(IOError, lambda: graphlab.load_graph("/root/tmp.graph"))
self.assertRaises(IOError, lambda: graphlab.load_sframe("/root/tmp.frame_idx"))
self.assertRaises(IOError, lambda: graphlab.load_model("/root/tmp.model"))
class RemoteFSConnectorTests(unittest.TestCase):
@classmethod
def setUpClass(self):
glconnect.stop()
auth_token = 'graphlab_awesome'
self.server = start_test_tcp_server(auth_token=auth_token)
glconnect.launch(self.server.get_server_addr(), auth_token=auth_token)
self.tempfile = tempfile.NamedTemporaryFile().name
(self.graph, self.sframe, self.model) = create_test_objects()
@classmethod
def tearDownClass(self):
glconnect.stop()
self.server.stop()
def _test_read_write_helper(self, url, content):
url = graphlab.util._make_internal_url(url)
glconnect.get_unity().__write__(url, content)
content_read = glconnect.get_unity().__read__(url)
self.assertEquals(content_read, content)
def test_basic(self):
self._test_read_write_helper("remote://" + self.tempfile, 'hello,world,woof')
def test_gzip(self):
self._test_read_write_helper("remote://" + self.tempfile + ".csv.gz", 'hello,world,woof')
def test_object_save_load(self):
prefix = "remote://"
_test_save_load_object_helper(self, self.graph, prefix + self.tempfile)
_test_save_load_object_helper(self, self.model, prefix + self.tempfile)
_test_save_load_object_helper(self, self.sframe, prefix + self.tempfile)
def test_exception(self):
self.assertRaises(ValueError, lambda: self._test_read_write_helper(self.tempfile, 'hello world'))
self.assertRaises(ValueError, lambda: self._test_read_write_helper("local://" + self.tempfile + ".csv.gz", 'hello,world,woof'))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("remote:///root/tmp"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("remote:///root/tmp"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__("remote:///root/tmp", '.....'))
self.assertRaises(IOError, lambda: self.graph.save("remote:///root/tmp.graph"))
self.assertRaises(IOError, lambda: self.sframe.save("remote:///root/tmp.frame_idx"))
self.assertRaises(IOError, lambda: self.model.save("remote:///root/tmp.model"))
self.assertRaises(IOError, lambda: graphlab.load_graph("remote:///root/tmp.graph"))
self.assertRaises(IOError, lambda: graphlab.load_sframe("remote:///root/tmp.frame_idx"))
self.assertRaises(IOError, lambda: graphlab.load_model("remote:///root/tmp.model"))
class HttpConnectorTests(unittest.TestCase):
@classmethod
def setUpClass(self):
self.url = "http://s3-us-west-2.amazonaws.com/testdatasets/a_to_z.txt.gz"
def _test_read_helper(self, url, content_expected):
url = graphlab.util._make_internal_url(url)
content_read = glconnect.get_unity().__read__(url)
self.assertEquals(content_read, content_expected)
def test_read(self):
expected = "\n".join([str(unichr(i + ord('a'))) for i in range(26)])
expected = expected + "\n"
self._test_read_helper(self.url, expected)
def test_exception(self):
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__(self.url, '.....'))
@unittest.skip("Disabling HDFS Connector Tests")
class HDFSConnectorTests(unittest.TestCase):
# This test requires hadoop to be installed and avaiable in $PATH.
# If not, the tests will be skipped.
@classmethod
def setUpClass(self):
self.has_hdfs = len(_sys_util.get_hadoop_class_path()) > 0
self.tempfile = tempfile.NamedTemporaryFile().name
(self.graph, self.sframe, self.model) = create_test_objects()
def _test_read_write_helper(self, url, content_expected):
url = graphlab.util._make_internal_url(url)
glconnect.get_unity().__write__(url, content_expected)
content_read = glconnect.get_unity().__read__(url)
self.assertEquals(content_read, content_expected)
# clean up the file we wrote
status, output = commands.getstatusoutput('hadoop fs -test -e ' + url)
if status is 0:
commands.getstatusoutput('hadoop fs -rm ' + url)
def test_basic(self):
if self.has_hdfs:
self._test_read_write_helper("hdfs://" + self.tempfile, 'hello,world,woof')
else:
logging.getLogger(__name__).info("No hdfs avaiable. Test pass.")
def test_gzip(self):
if self.has_hdfs:
self._test_read_write_helper("hdfs://" + self.tempfile + ".gz", 'hello,world,woof')
self._test_read_write_helper("hdfs://" + self.tempfile + ".csv.gz", 'hello,world,woof')
else:
logging.getLogger(__name__).info("No hdfs avaiable. Test pass.")
def test_object_save_load(self):
if self.has_hdfs:
prefix = "hdfs://"
_test_save_load_object_helper(self, self.graph, prefix + self.tempfile)
_test_save_load_object_helper(self, self.model, prefix + self.tempfile)
_test_save_load_object_helper(self, self.sframe, prefix + self.tempfile)
else:
logging.getLogger(__name__).info("No hdfs avaiable. Test pass.")
def test_exception(self):
bad_url = "hdfs:///root/"
if self.has_hdfs:
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("hdfs:///"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("hdfs:///tmp"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("hdfs://" + self.tempfile))
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__(bad_url + "/tmp", "somerandomcontent"))
self.assertRaises(IOError, lambda: self.graph.save(bad_url + "x.graph"))
self.assertRaises(IOError, lambda: self.sframe.save(bad_url + "x.frame_idx"))
self.assertRaises(IOError, lambda: self.model.save(bad_url + "x.model"))
self.assertRaises(IOError, lambda: graphlab.load_graph(bad_url + "mygraph"))
self.assertRaises(IOError, lambda: graphlab.load_sframe(bad_url + "x.frame_idx"))
self.assertRaises(IOError, lambda: graphlab.load_model(bad_url + "x.model"))
else:
logging.getLogger(__name__).info("No hdfs avaiable. Test pass.")
@unittest.skip("Disabling S3 Connector Tests")
class S3ConnectorTests(unittest.TestCase):
# This test requires aws cli to be installed. If not, the tests will be skipped.
@classmethod
def setUpClass(self):
status, output = commands.getstatusoutput('aws s3api list-buckets')
self.has_s3 = (status is 0)
self.standard_bucket = None
self.regional_bucket = None
# Use aws cli s3api to find a bucket with "gl-testdata" in the name, and use it as out test bucket.
# Temp files will be read from /written to the test bucket's /tmp folder and be cleared on exist.
if self.has_s3:
try:
json_output = json.loads(output)
bucket_list = [b['Name'] for b in json_output['Buckets']]
assert 'gl-testdata' in bucket_list
assert 'gl-testdata-oregon' in bucket_list
self.standard_bucket = 'gl-testdata'
self.regional_bucket = 'gl-testdata-oregon'
self.tempfile = tempfile.NamedTemporaryFile().name
(self.graph, self.sframe, self.model) = create_test_objects()
except:
logging.getLogger(__name__).warning("Fail parsing ioutput of s3api into json. Please check your awscli version.")
self.has_s3 = False
def _test_read_write_helper(self, url, content_expected):
s3url = graphlab.util._make_internal_url(url)
glconnect.get_unity().__write__(s3url, content_expected)
content_read = glconnect.get_unity().__read__(s3url)
self.assertEquals(content_read, content_expected)
(status, output) = commands.getstatusoutput('aws s3 rm --region us-west-2 ' + url)
if status is not 0:
logging.getLogger(__name__).warning("Cannot remove file: " + url)
def test_basic(self):
if self.has_s3:
for bucket in [self.standard_bucket, self.regional_bucket]:
self._test_read_write_helper("s3://" + bucket + self.tempfile, 'hello,world,woof')
else:
logging.getLogger(__name__).info("No s3 bucket avaiable. Test pass.")
def test_gzip(self):
if self.has_s3:
self._test_read_write_helper("s3://" + self.standard_bucket + self.tempfile + ".gz", 'hello,world,woof')
else:
logging.getLogger(__name__).info("No s3 bucket avaiable. Test pass.")
def test_object_save_load(self):
if self.has_s3:
prefix = "s3://" + self.standard_bucket
_test_save_load_object_helper(self, self.graph, prefix + self.tempfile)
_test_save_load_object_helper(self, self.model, prefix + self.tempfile)
_test_save_load_object_helper(self, self.sframe, prefix + self.tempfile)
else:
logging.getLogger(__name__).info("No s3 bucket avaiable. Test pass.")
def test_exception(self):
if self.has_s3:
bad_bucket = "i_am_a_bad_bucket"
prefix = "s3://" + bad_bucket
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("s3:///"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("s3://" + self.standard_bucket + "/somerandomfile"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("s3://" + "/somerandomfile"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__("s3://" + "/somerandomfile", "somerandomcontent"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__("s3://" + self.standard_bucket + "I'amABadUrl/", "somerandomcontent"))
self.assertRaises(IOError, lambda: self.graph.save(prefix + "/x.graph"))
self.assertRaises(IOError, lambda: self.sframe.save(prefix + "/x.frame_idx"))
self.assertRaises(IOError, lambda: self.model.save(prefix + "/x.model"))
self.assertRaises(IOError, lambda: graphlab.load_graph(prefix + "/x.graph"))
self.assertRaises(IOError, lambda: graphlab.load_sframe(prefix + "/x.frame_idx"))
self.assertRaises(IOError, lambda: graphlab.load_model(prefix + "/x.model"))
else:
logging.getLogger(__name__).info("No s3 bucket avaiable. Test pass.")
| agpl-3.0 |
samzhang111/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
cfjhallgren/shogun | examples/undocumented/python/graphical/so_multiclass_BMRM.py | 11 | 2823 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from shogun import RealFeatures
from shogun import MulticlassModel, MulticlassSOLabels, RealNumber, DualLibQPBMSOSVM
from shogun import BMRM, PPBMRM, P3BMRM
from shogun import StructuredAccuracy
def fill_data(cnt, minv, maxv):
x1 = np.linspace(minv, maxv, cnt)
a, b = np.meshgrid(x1, x1)
X = np.array((np.ravel(a), np.ravel(b)))
y = np.zeros((1, cnt*cnt))
tmp = cnt*cnt;
y[0, tmp/3:(tmp/3)*2]=1
y[0, tmp/3*2:(tmp/3)*3]=2
return X, y.flatten()
def gen_data():
covs = np.array([[[0., -1. ], [2.5, .7]],
[[3., -1.5], [1.2, .3]],
[[ 2, 0 ], [ .0, 1.5 ]]])
X = np.r_[np.dot(np.random.randn(N, dim), covs[0]) + np.array([0, 10]),
np.dot(np.random.randn(N, dim), covs[1]) + np.array([-10, -10]),
np.dot(np.random.randn(N, dim), covs[2]) + np.array([10, -10])];
Y = np.hstack((np.zeros(N), np.ones(N), 2*np.ones(N)))
return X, Y
def get_so_labels(out):
N = out.get_num_labels()
l = np.zeros(N)
for i in xrange(N):
l[i] = RealNumber.obtain_from_generic(out.get_label(i)).value
return l
# Number of classes
M = 3
# Number of samples of each class
N = 1000
# Dimension of the data
dim = 2
X, y = gen_data()
cnt = 250
X2, y2 = fill_data(cnt, np.min(X), np.max(X))
labels = MulticlassSOLabels(y)
features = RealFeatures(X.T)
model = MulticlassModel(features, labels)
lambda_ = 1e1
sosvm = DualLibQPBMSOSVM(model, labels, lambda_)
sosvm.set_cleanAfter(10) # number of iterations that cutting plane has to be inactive for to be removed
sosvm.set_cleanICP(True) # enables inactive cutting plane removal feature
sosvm.set_TolRel(0.001) # set relative tolerance
sosvm.set_verbose(True) # enables verbosity of the solver
sosvm.set_cp_models(16) # set number of cutting plane models
sosvm.set_solver(BMRM) # select training algorithm
#sosvm.set_solver(PPBMRM)
#sosvm.set_solver(P3BMRM)
sosvm.train()
res = sosvm.get_result()
Fps = np.array(res.get_hist_Fp_vector())
Fds = np.array(res.get_hist_Fp_vector())
wdists = np.array(res.get_hist_wdist_vector())
plt.figure()
plt.subplot(221)
plt.title('Fp and Fd history')
plt.plot(xrange(res.get_n_iters()), Fps, hold=True)
plt.plot(xrange(res.get_n_iters()), Fds, hold=True)
plt.subplot(222)
plt.title('w dist history')
plt.plot(xrange(res.get_n_iters()), wdists)
# Evaluation
out = sosvm.apply()
Evaluation = StructuredAccuracy()
acc = Evaluation.evaluate(out, labels)
print "Correct classification rate: %0.4f%%" % ( 100.0*acc )
# show figure
Z = get_so_labels(sosvm.apply(RealFeatures(X2)))
x = (X2[0,:]).reshape(cnt, cnt)
y = (X2[1,:]).reshape(cnt, cnt)
z = Z.reshape(cnt, cnt)
plt.subplot(223)
plt.pcolor(x, y, z)
plt.contour(x, y, z, linewidths=1, colors='black', hold=True)
plt.plot(X[:,0], X[:,1], 'yo')
plt.axis('tight')
plt.title('Classification')
plt.show()
| gpl-3.0 |
mikebenfield/scikit-learn | sklearn/decomposition/tests/test_sparse_pca.py | 63 | 6459 | # Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
# Test that deprecated ridge_alpha parameter throws warning
warning_msg = "The ridge_alpha parameter on transform()"
assert_warns_message(DeprecationWarning, warning_msg, spca_lars.transform,
Y, ridge_alpha=0.01)
assert_warns_message(DeprecationWarning, warning_msg, spca_lars.transform,
Y, ridge_alpha=None)
@if_safe_multiprocessing_with_blas
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
| bsd-3-clause |
RPGOne/Skynet | imbalanced-learn-master/examples/over-sampling/plot_adasyn.py | 3 | 1878 | """
======
ADASYN
======
An illustration of the Adaptive Synthetic Sampling Approach for Imbalanced
Learning ADASYN method.
"""
print(__doc__)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# Define some color for the plotting
almost_black = '#262626'
palette = sns.color_palette()
from sklearn.datasets import make_classification
from sklearn.decomposition import PCA
from imblearn.over_sampling import ADASYN
# Generate the dataset
X, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9],
n_informative=3, n_redundant=1, flip_y=0,
n_features=20, n_clusters_per_class=1,
n_samples=5000, random_state=10)
# Instanciate a PCA object for the sake of easy visualisation
pca = PCA(n_components=2)
# Fit and transform x to visualise inside a 2D feature space
X_vis = pca.fit_transform(X)
# Apply the random over-sampling
ada = ADASYN()
X_resampled, y_resampled = ada.fit_sample(X, y)
X_res_vis = pca.transform(X_resampled)
# Two subplots, unpack the axes array immediately
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.scatter(X_vis[y == 0, 0], X_vis[y == 0, 1], label="Class #0", alpha=0.5,
edgecolor=almost_black, facecolor=palette[0], linewidth=0.15)
ax1.scatter(X_vis[y == 1, 0], X_vis[y == 1, 1], label="Class #1", alpha=0.5,
edgecolor=almost_black, facecolor=palette[2], linewidth=0.15)
ax1.set_title('Original set')
ax2.scatter(X_res_vis[y_resampled == 0, 0], X_res_vis[y_resampled == 0, 1],
label="Class #0", alpha=.5, edgecolor=almost_black,
facecolor=palette[0], linewidth=0.15)
ax2.scatter(X_res_vis[y_resampled == 1, 0], X_res_vis[y_resampled == 1, 1],
label="Class #1", alpha=.5, edgecolor=almost_black,
facecolor=palette[2], linewidth=0.15)
ax2.set_title('ADASYN')
plt.show()
| bsd-3-clause |
mengli/PcmAudioRecorder | self_driving/optical_flow/python/common.py | 11 | 6701 | #!/usr/bin/env python
'''
This module contains some common routines used by other samples.
'''
# Python 2/3 compatibility
from __future__ import print_function
import sys
PY3 = sys.version_info[0] == 3
if PY3:
from functools import reduce
import numpy as np
import cv2
# built-in modules
import os
import itertools as it
from contextlib import contextmanager
image_extensions = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.pbm', '.pgm', '.ppm']
class Bunch(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def __str__(self):
return str(self.__dict__)
def splitfn(fn):
path, fn = os.path.split(fn)
name, ext = os.path.splitext(fn)
return path, name, ext
def anorm2(a):
return (a*a).sum(-1)
def anorm(a):
return np.sqrt( anorm2(a) )
def homotrans(H, x, y):
xs = H[0, 0]*x + H[0, 1]*y + H[0, 2]
ys = H[1, 0]*x + H[1, 1]*y + H[1, 2]
s = H[2, 0]*x + H[2, 1]*y + H[2, 2]
return xs/s, ys/s
def to_rect(a):
a = np.ravel(a)
if len(a) == 2:
a = (0, 0, a[0], a[1])
return np.array(a, np.float64).reshape(2, 2)
def rect2rect_mtx(src, dst):
src, dst = to_rect(src), to_rect(dst)
cx, cy = (dst[1] - dst[0]) / (src[1] - src[0])
tx, ty = dst[0] - src[0] * (cx, cy)
M = np.float64([[ cx, 0, tx],
[ 0, cy, ty],
[ 0, 0, 1]])
return M
def lookat(eye, target, up = (0, 0, 1)):
fwd = np.asarray(target, np.float64) - eye
fwd /= anorm(fwd)
right = np.cross(fwd, up)
right /= anorm(right)
down = np.cross(fwd, right)
R = np.float64([right, down, fwd])
tvec = -np.dot(R, eye)
return R, tvec
def mtx2rvec(R):
w, u, vt = cv2.SVDecomp(R - np.eye(3))
p = vt[0] + u[:,0]*w[0] # same as np.dot(R, vt[0])
c = np.dot(vt[0], p)
s = np.dot(vt[1], p)
axis = np.cross(vt[0], vt[1])
return axis * np.arctan2(s, c)
def draw_str(dst, target, s):
x, y = target
cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv2.LINE_AA)
cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv2.LINE_AA)
class Sketcher:
def __init__(self, windowname, dests, colors_func):
self.prev_pt = None
self.windowname = windowname
self.dests = dests
self.colors_func = colors_func
self.dirty = False
self.show()
cv2.setMouseCallback(self.windowname, self.on_mouse)
def show(self):
cv2.imshow(self.windowname, self.dests[0])
def on_mouse(self, event, x, y, flags, param):
pt = (x, y)
if event == cv2.EVENT_LBUTTONDOWN:
self.prev_pt = pt
elif event == cv2.EVENT_LBUTTONUP:
self.prev_pt = None
if self.prev_pt and flags & cv2.EVENT_FLAG_LBUTTON:
for dst, color in zip(self.dests, self.colors_func()):
cv2.line(dst, self.prev_pt, pt, color, 5)
self.dirty = True
self.prev_pt = pt
self.show()
# palette data from matplotlib/_cm.py
_jet_data = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89,1, 1),
(1, 0.5, 0.5)),
'green': ((0., 0, 0), (0.125,0, 0), (0.375,1, 1), (0.64,1, 1),
(0.91,0,0), (1, 0, 0)),
'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1), (0.65,0, 0),
(1, 0, 0))}
cmap_data = { 'jet' : _jet_data }
def make_cmap(name, n=256):
data = cmap_data[name]
xs = np.linspace(0.0, 1.0, n)
channels = []
eps = 1e-6
for ch_name in ['blue', 'green', 'red']:
ch_data = data[ch_name]
xp, yp = [], []
for x, y1, y2 in ch_data:
xp += [x, x+eps]
yp += [y1, y2]
ch = np.interp(xs, xp, yp)
channels.append(ch)
return np.uint8(np.array(channels).T*255)
def nothing(*arg, **kw):
pass
def clock():
return cv2.getTickCount() / cv2.getTickFrequency()
@contextmanager
def Timer(msg):
print(msg, '...',)
start = clock()
try:
yield
finally:
print("%.2f ms" % ((clock()-start)*1000))
class StatValue:
def __init__(self, smooth_coef = 0.5):
self.value = None
self.smooth_coef = smooth_coef
def update(self, v):
if self.value is None:
self.value = v
else:
c = self.smooth_coef
self.value = c * self.value + (1.0-c) * v
class RectSelector:
def __init__(self, win, callback):
self.win = win
self.callback = callback
cv2.setMouseCallback(win, self.onmouse)
self.drag_start = None
self.drag_rect = None
def onmouse(self, event, x, y, flags, param):
x, y = np.int16([x, y]) # BUG
if event == cv2.EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
return
if self.drag_start:
if flags & cv2.EVENT_FLAG_LBUTTON:
xo, yo = self.drag_start
x0, y0 = np.minimum([xo, yo], [x, y])
x1, y1 = np.maximum([xo, yo], [x, y])
self.drag_rect = None
if x1-x0 > 0 and y1-y0 > 0:
self.drag_rect = (x0, y0, x1, y1)
else:
rect = self.drag_rect
self.drag_start = None
self.drag_rect = None
if rect:
self.callback(rect)
def draw(self, vis):
if not self.drag_rect:
return False
x0, y0, x1, y1 = self.drag_rect
cv2.rectangle(vis, (x0, y0), (x1, y1), (0, 255, 0), 2)
return True
@property
def dragging(self):
return self.drag_rect is not None
def grouper(n, iterable, fillvalue=None):
'''grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx'''
args = [iter(iterable)] * n
if PY3:
output = it.zip_longest(fillvalue=fillvalue, *args)
else:
output = it.izip_longest(fillvalue=fillvalue, *args)
return output
def mosaic(w, imgs):
'''Make a grid from images.
w -- number of grid columns
imgs -- images (must have same size and format)
'''
imgs = iter(imgs)
if PY3:
img0 = next(imgs)
else:
img0 = imgs.next()
pad = np.zeros_like(img0)
imgs = it.chain([img0], imgs)
rows = grouper(w, imgs, pad)
return np.vstack(map(np.hstack, rows))
def getsize(img):
h, w = img.shape[:2]
return w, h
def mdot(*args):
return reduce(np.dot, args)
def draw_keypoints(vis, keypoints, color = (0, 255, 255)):
for kp in keypoints:
x, y = kp.pt
cv2.circle(vis, (int(x), int(y)), 2, color)
| apache-2.0 |
vybstat/scikit-learn | sklearn/neighbors/tests/test_kde.py | 208 | 5556 | import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_raises,
assert_equal)
from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors
from sklearn.neighbors.ball_tree import kernel_norm
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_blobs
from sklearn.grid_search import GridSearchCV
from sklearn.preprocessing import StandardScaler
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kernel_density(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_features)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for bandwidth in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
def check_results(kernel, bandwidth, atol, rtol):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth,
atol=atol, rtol=rtol)
log_dens = kde.fit(X).score_samples(Y)
assert_allclose(np.exp(log_dens), dens_true,
atol=atol, rtol=max(1E-7, rtol))
assert_allclose(np.exp(kde.score(Y)),
np.prod(dens_true),
atol=atol, rtol=max(1E-7, rtol))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, bandwidth, atol, rtol)
def test_kernel_density_sampling(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
bandwidth = 0.2
for kernel in ['gaussian', 'tophat']:
# draw a tophat sample
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
samp = kde.sample(100)
assert_equal(X.shape, samp.shape)
# check that samples are in the right range
nbrs = NearestNeighbors(n_neighbors=1).fit(X)
dist, ind = nbrs.kneighbors(X, return_distance=True)
if kernel == 'tophat':
assert np.all(dist < bandwidth)
elif kernel == 'gaussian':
# 5 standard deviations is safe for 100 samples, but there's a
# very small chance this test could fail.
assert np.all(dist < 5 * bandwidth)
# check unsupported kernels
for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']:
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
assert_raises(NotImplementedError, kde.sample, 100)
# non-regression test: used to return a scalar
X = rng.randn(4, 1)
kde = KernelDensity(kernel="gaussian").fit(X)
assert_equal(kde.sample().shape, (1, 1))
def test_kde_algorithm_metric_choice():
# Smoke test for various metrics and algorithms
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev', 'haversine']:
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert_equal(y_dens.shape, Y.shape[:1])
def test_kde_score(n_samples=100, n_features=3):
pass
#FIXME
#np.random.seed(0)
#X = np.random.random((n_samples, n_features))
#Y = np.random.random((n_samples, n_features))
def test_kde_badargs():
assert_raises(ValueError, KernelDensity,
algorithm='blah')
assert_raises(ValueError, KernelDensity,
bandwidth=0)
assert_raises(ValueError, KernelDensity,
kernel='blah')
assert_raises(ValueError, KernelDensity,
metric='blah')
assert_raises(ValueError, KernelDensity,
algorithm='kd_tree', metric='blah')
def test_kde_pipeline_gridsearch():
# test that kde plays nice in pipelines and grid-searches
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False),
KernelDensity(kernel="gaussian"))
params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10])
search = GridSearchCV(pipe1, param_grid=params, cv=5)
search.fit(X)
assert_equal(search.best_params_['kerneldensity__bandwidth'], .1)
| bsd-3-clause |
zasdfgbnm/tensorflow | tensorflow/contrib/metrics/python/ops/metric_ops.py | 1 | 167419 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains metric-computing operations on streamed tensors.
Module documentation, including "@@" callouts, should be put in
third_party/tensorflow/contrib/metrics/__init__.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as collections_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import metrics_impl
from tensorflow.python.ops import nn
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.ops.distributions.normal import Normal
from tensorflow.python.util.deprecation import deprecated
# Epsilon constant used to represent extremely small quantity.
_EPSILON = 1e-7
def _safe_div(numerator, denominator, name):
"""Divides two values, returning 0 if the denominator is <= 0.
Args:
numerator: A real `Tensor`.
denominator: A real `Tensor`, with dtype matching `numerator`.
name: Name for the returned op.
Returns:
0 if `denominator` <= 0, else `numerator` / `denominator`
"""
return array_ops.where(
math_ops.greater(denominator, 0),
math_ops.truediv(numerator, denominator),
0,
name=name)
def streaming_true_positives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.true_positives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_true_negatives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.true_negatives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_false_positives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of false positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.false_positives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_false_negatives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the total number of false negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.false_negatives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_mean(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the (weighted) mean of the given values.
The `streaming_mean` function creates two local variables, `total` and `count`
that are used to compute the average of `values`. This average is ultimately
returned as `mean` which is an idempotent operation that simply divides
`total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: `Tensor` whose rank is either 0, or the same rank as `values`, and
must be broadcastable to `values` (i.e., all dimensions must be either
`1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A `Tensor` representing the current mean, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.mean(
values=values,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_mean_tensor(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the element-wise (weighted) mean of the given tensors.
In contrast to the `streaming_mean` function which returns a scalar with the
mean, this function returns an average tensor with the same shape as the
input tensors.
The `streaming_mean_tensor` function creates two local variables,
`total_tensor` and `count_tensor` that are used to compute the average of
`values`. This average is ultimately returned as `mean` which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: `Tensor` whose rank is either 0, or the same rank as `values`, and
must be broadcastable to `values` (i.e., all dimensions must be either
`1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A float `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.mean_tensor(
values=values,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None,
'Please switch to tf.metrics.accuracy. Note that the order of the '
'labels and predictions arguments has been switched.')
def streaming_accuracy(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculates how often `predictions` matches `labels`.
The `streaming_accuracy` function creates two local variables, `total` and
`count` that are used to compute the frequency with which `predictions`
matches `labels`. This frequency is ultimately returned as `accuracy`: an
idempotent operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `accuracy`.
Internally, an `is_correct` operation computes a `Tensor` with elements 1.0
where the corresponding elements of `predictions` and `labels` match and 0.0
otherwise. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `is_correct`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of any shape.
labels: The ground truth values, a `Tensor` whose shape matches
`predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `accuracy` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
accuracy: A `Tensor` representing the accuracy, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `accuracy`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.accuracy(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_precision(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the precision of the predictions with respect to the labels.
The `streaming_precision` function creates two local variables,
`true_positives` and `false_positives`, that are used to compute the
precision. This value is ultimately returned as `precision`, an idempotent
operation that simply divides `true_positives` by the sum of `true_positives`
and `false_positives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`. `update_op` weights each prediction by the corresponding value in
`weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `precision` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: Scalar float `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately and whose value matches
`precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.precision(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_recall(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the recall of the predictions with respect to the labels.
The `streaming_recall` function creates two local variables, `true_positives`
and `false_negatives`, that are used to compute the recall. This value is
ultimately returned as `recall`, an idempotent operation that simply divides
`true_positives` by the sum of `true_positives` and `false_negatives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` that updates these variables and returns the `recall`. `update_op`
weights each prediction by the corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: Scalar float `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately and whose value matches
`recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.recall(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_false_positive_rate(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the false positive rate of predictions with respect to labels.
The `false_positive_rate` function creates two local variables,
`false_positives` and `true_negatives`, that are used to compute the
false positive rate. This value is ultimately returned as
`false_positive_rate`, an idempotent operation that simply divides
`false_positives` by the sum of `false_positives` and `true_negatives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_positive_rate`. `update_op` weights each prediction by the
corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_positive_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_positive_rate: Scalar float `Tensor` with the value of
`false_positives` divided by the sum of `false_positives` and
`true_negatives`.
update_op: `Operation` that increments `false_positives` and
`true_negatives` variables appropriately and whose value matches
`false_positive_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_positive_rate',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
false_p, false_positives_update_op = metrics.false_positives(
labels=labels,
predictions=predictions,
weights=weights,
metrics_collections=None,
updates_collections=None,
name=None)
true_n, true_negatives_update_op = metrics.true_negatives(
labels=labels,
predictions=predictions,
weights=weights,
metrics_collections=None,
updates_collections=None,
name=None)
def compute_fpr(fp, tn, name):
return array_ops.where(
math_ops.greater(fp + tn, 0), math_ops.div(fp, fp + tn), 0, name)
fpr = compute_fpr(false_p, true_n, 'value')
update_op = compute_fpr(false_positives_update_op, true_negatives_update_op,
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fpr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fpr, update_op
def streaming_false_negative_rate(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the false negative rate of predictions with respect to labels.
The `false_negative_rate` function creates two local variables,
`false_negatives` and `true_positives`, that are used to compute the
false positive rate. This value is ultimately returned as
`false_negative_rate`, an idempotent operation that simply divides
`false_negatives` by the sum of `false_negatives` and `true_positives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_negative_rate`. `update_op` weights each prediction by the
corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_negative_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_negative_rate: Scalar float `Tensor` with the value of
`false_negatives` divided by the sum of `false_negatives` and
`true_positives`.
update_op: `Operation` that increments `false_negatives` and
`true_positives` variables appropriately and whose value matches
`false_negative_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_negative_rate',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
false_n, false_negatives_update_op = metrics.false_negatives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
true_p, true_positives_update_op = metrics.true_positives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
def compute_fnr(fn, tp, name):
return array_ops.where(
math_ops.greater(fn + tp, 0), math_ops.div(fn, fn + tp), 0, name)
fnr = compute_fnr(false_n, true_p, 'value')
update_op = compute_fnr(false_negatives_update_op, true_positives_update_op,
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fnr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fnr, update_op
def _streaming_confusion_matrix_at_thresholds(predictions,
labels,
thresholds,
weights=None,
includes=None):
"""Computes true_positives, false_negatives, true_negatives, false_positives.
This function creates up to four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives`.
`true_positive[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `True`.
`false_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `True`.
`true_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `False`.
`false_positives[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `False`.
For estimation of these metrics over a stream of data, for each metric the
function respectively creates an `update_op` operation that updates the
variable and returns its value.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `Tensor` whose shape matches `predictions`. `labels` will be cast
to `bool`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
includes: Tuple of keys to return, from 'tp', 'fn', 'tn', fp'. If `None`,
default to all four.
Returns:
values: Dict of variables of shape `[len(thresholds)]`. Keys are from
`includes`.
update_ops: Dict of operations that increments the `values`. Keys are from
`includes`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`includes` contains invalid keys.
"""
all_includes = ('tp', 'fn', 'tn', 'fp')
if includes is None:
includes = all_includes
else:
for include in includes:
if include not in all_includes:
raise ValueError('Invalid key: %s.' % include)
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
num_thresholds = len(thresholds)
# Reshape predictions and labels.
predictions_2d = array_ops.reshape(predictions, [-1, 1])
labels_2d = array_ops.reshape(
math_ops.cast(labels, dtype=dtypes.bool), [1, -1])
# Use static shape if known.
num_predictions = predictions_2d.get_shape().as_list()[0]
# Otherwise use dynamic shape.
if num_predictions is None:
num_predictions = array_ops.shape(predictions_2d)[0]
thresh_tiled = array_ops.tile(
array_ops.expand_dims(array_ops.constant(thresholds), [1]),
array_ops.stack([1, num_predictions]))
# Tile the predictions after thresholding them across different thresholds.
pred_is_pos = math_ops.greater(
array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]),
thresh_tiled)
if ('fn' in includes) or ('tn' in includes):
pred_is_neg = math_ops.logical_not(pred_is_pos)
# Tile labels by number of thresholds
label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1])
if ('fp' in includes) or ('tn' in includes):
label_is_neg = math_ops.logical_not(label_is_pos)
if weights is not None:
broadcast_weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), predictions)
weights_tiled = array_ops.tile(
array_ops.reshape(broadcast_weights, [1, -1]), [num_thresholds, 1])
thresh_tiled.get_shape().assert_is_compatible_with(
weights_tiled.get_shape())
else:
weights_tiled = None
values = {}
update_ops = {}
if 'tp' in includes:
true_positives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='true_positives')
is_true_positive = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_pos))
if weights_tiled is not None:
is_true_positive *= weights_tiled
update_ops['tp'] = state_ops.assign_add(true_positives,
math_ops.reduce_sum(
is_true_positive, 1))
values['tp'] = true_positives
if 'fn' in includes:
false_negatives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='false_negatives')
is_false_negative = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_neg))
if weights_tiled is not None:
is_false_negative *= weights_tiled
update_ops['fn'] = state_ops.assign_add(false_negatives,
math_ops.reduce_sum(
is_false_negative, 1))
values['fn'] = false_negatives
if 'tn' in includes:
true_negatives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='true_negatives')
is_true_negative = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_neg))
if weights_tiled is not None:
is_true_negative *= weights_tiled
update_ops['tn'] = state_ops.assign_add(true_negatives,
math_ops.reduce_sum(
is_true_negative, 1))
values['tn'] = true_negatives
if 'fp' in includes:
false_positives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='false_positives')
is_false_positive = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_pos))
if weights_tiled is not None:
is_false_positive *= weights_tiled
update_ops['fp'] = state_ops.assign_add(false_positives,
math_ops.reduce_sum(
is_false_positive, 1))
values['fp'] = false_positives
return values, update_ops
def streaming_true_positives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('tp',))
return values['tp'], update_ops['tp']
def streaming_false_negatives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('fn',))
return values['fn'], update_ops['fn']
def streaming_false_positives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('fp',))
return values['fp'], update_ops['fp']
def streaming_true_negatives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('tn',))
return values['tn'], update_ops['tn']
def streaming_curve_points(labels=None,
predictions=None,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
curve='ROC',
name=None):
"""Computes curve (ROC or PR) values for a prespecified number of points.
The `streaming_curve_points` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
that are used to compute the curve values. To discretize the curve, a linearly
spaced set of thresholds is used to compute pairs of recall and precision
values.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use when discretizing the roc
curve.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
curve: Specifies the name of the curve to be computed, 'ROC' [default] or
'PR' for the Precision-Recall-curve.
name: An optional variable_scope name.
Returns:
points: A `Tensor` with shape [num_thresholds, 2] that contains points of
the curve.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
TODO(chizeng): Consider rewriting this method to make use of logic within the
precision_recall_at_equal_thresholds method (to improve run time).
"""
with variable_scope.variable_scope(name, 'curve_points',
(labels, predictions, weights)):
if curve != 'ROC' and curve != 'PR':
raise ValueError('curve must be either ROC or PR, %s unknown' % (curve))
kepsilon = _EPSILON # to account for floating point imprecisions
thresholds = [
(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)
]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _streaming_confusion_matrix_at_thresholds(
labels=labels,
predictions=predictions,
thresholds=thresholds,
weights=weights)
# Add epsilons to avoid dividing by 0.
epsilon = 1.0e-6
def compute_points(tp, fn, tn, fp):
"""Computes the roc-auc or pr-auc based on confusion counts."""
rec = math_ops.div(tp + epsilon, tp + fn + epsilon)
if curve == 'ROC':
fp_rate = math_ops.div(fp, fp + tn + epsilon)
return fp_rate, rec
else: # curve == 'PR'.
prec = math_ops.div(tp + epsilon, tp + fp + epsilon)
return rec, prec
xs, ys = compute_points(values['tp'], values['fn'], values['tn'],
values['fp'])
points = array_ops.stack([xs, ys], axis=1)
update_op = control_flow_ops.group(*update_ops.values())
if metrics_collections:
ops.add_to_collections(metrics_collections, points)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return points, update_op
@deprecated(None, 'Please switch to tf.metrics.auc. Note that the order of the '
'labels and predictions arguments has been switched.')
def streaming_auc(predictions,
labels,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
curve='ROC',
name=None):
"""Computes the approximate AUC via a Riemann sum.
The `streaming_auc` function creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the AUC. To discretize the AUC curve, a linearly spaced set of
thresholds is used to compute pairs of recall and precision values. The area
under the ROC-curve is therefore computed using the height of the recall
values by the false positive rate, while the area under the PR-curve is the
computed using the height of the precision values by the recall.
This value is ultimately returned as `auc`, an idempotent operation that
computes the area under a discretized curve of precision versus recall values
(computed using the aforementioned variables). The `num_thresholds` variable
controls the degree of discretization with larger numbers of thresholds more
closely approximating the true AUC. The quality of the approximation may vary
dramatically depending on `num_thresholds`.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC
approximation may be poor if this is not the case.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `auc`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use when discretizing the roc
curve.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
curve: Specifies the name of the curve to be computed, 'ROC' [default] or
'PR' for the Precision-Recall-curve.
name: An optional variable_scope name.
Returns:
auc: A scalar `Tensor` representing the current area-under-curve.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `auc`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.auc(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
num_thresholds=num_thresholds,
curve=curve,
updates_collections=updates_collections,
name=name)
def _compute_dynamic_auc(labels, predictions, curve='ROC'):
"""Computes the apporixmate AUC by a Riemann sum with data-derived thresholds.
Computes the area under the ROC or PR curve using each prediction as a
threshold. This could be slow for large batches, but has the advantage of not
having its results degrade depending on the distribution of predictions.
Args:
labels: A `Tensor` of ground truth labels with the same shape as
`predictions` with values of 0 or 1 and type `int64`.
predictions: A 1-D `Tensor` of predictions whose values are `float64`.
curve: The name of the curve to be computed, 'ROC' for the Receiving
Operating Characteristic or 'PR' for the Precision-Recall curve.
Returns:
A scalar `Tensor` containing the area-under-curve value for the input.
"""
# Count the total number of positive and negative labels in the input.
size = array_ops.size(predictions)
total_positive = math_ops.cast(math_ops.reduce_sum(labels), dtypes.int32)
def continue_computing_dynamic_auc():
"""Continues dynamic auc computation, entered if labels are not all equal.
Returns:
A scalar `Tensor` containing the area-under-curve value.
"""
# Sort the predictions descending, and the corresponding labels as well.
ordered_predictions, indices = nn.top_k(predictions, k=size)
ordered_labels = array_ops.gather(labels, indices)
# Get the counts of the unique ordered predictions.
_, _, counts = array_ops.unique_with_counts(ordered_predictions)
# Compute the indices of the split points between different predictions.
splits = math_ops.cast(
array_ops.pad(math_ops.cumsum(counts), paddings=[[1, 0]]), dtypes.int32)
# Count the positives to the left of the split indices.
positives = math_ops.cast(
array_ops.pad(math_ops.cumsum(ordered_labels), paddings=[[1, 0]]),
dtypes.int32)
true_positives = array_ops.gather(positives, splits)
if curve == 'ROC':
# Count the negatives to the left of every split point and the total
# number of negatives for computing the FPR.
false_positives = math_ops.subtract(splits, true_positives)
total_negative = size - total_positive
x_axis_values = math_ops.truediv(false_positives, total_negative)
y_axis_values = math_ops.truediv(true_positives, total_positive)
elif curve == 'PR':
x_axis_values = math_ops.truediv(true_positives, total_positive)
# For conformance, set precision to 1 when the number of positive
# classifications is 0.
y_axis_values = array_ops.where(
math_ops.greater(splits, 0), math_ops.truediv(true_positives, splits),
array_ops.ones_like(true_positives, dtype=dtypes.float64))
# Calculate trapezoid areas.
heights = math_ops.add(y_axis_values[1:], y_axis_values[:-1]) / 2.0
widths = math_ops.abs(
math_ops.subtract(x_axis_values[1:], x_axis_values[:-1]))
return math_ops.reduce_sum(math_ops.multiply(heights, widths))
# If all the labels are the same, AUC isn't well-defined (but raising an
# exception seems excessive) so we return 0, otherwise we finish computing.
return control_flow_ops.cond(
math_ops.logical_or(
math_ops.equal(total_positive, 0), math_ops.equal(
total_positive, size)),
true_fn=lambda: array_ops.constant(0, dtypes.float64),
false_fn=continue_computing_dynamic_auc)
def streaming_dynamic_auc(labels,
predictions,
curve='ROC',
metrics_collections=(),
updates_collections=(),
name=None):
"""Computes the apporixmate AUC by a Riemann sum with data-derived thresholds.
USAGE NOTE: this approach requires storing all of the predictions and labels
for a single evaluation in memory, so it may not be usable when the evaluation
batch size and/or the number of evaluation steps is very large.
Computes the area under the ROC or PR curve using each prediction as a
threshold. This has the advantage of being resilient to the distribution of
predictions by aggregating across batches, accumulating labels and predictions
and performing the final calculation using all of the concatenated values.
Args:
labels: A `Tensor` of ground truth labels with the same shape as `labels`
and with values of 0 or 1 whose values are castable to `int64`.
predictions: A `Tensor` of predictions whose values are castable to
`float64`. Will be flattened into a 1-D `Tensor`.
curve: The name of the curve for which to compute AUC, 'ROC' for the
Receiving Operating Characteristic or 'PR' for the Precision-Recall curve.
metrics_collections: An optional iterable of collections that `auc` should
be added to.
updates_collections: An optional iterable of collections that `update_op`
should be added to.
name: An optional name for the variable_scope that contains the metric
variables.
Returns:
auc: A scalar `Tensor` containing the current area-under-curve value.
update_op: An operation that concatenates the input labels and predictions
to the accumulated values.
Raises:
ValueError: If `labels` and `predictions` have mismatched shapes or if
`curve` isn't a recognized curve type.
"""
if curve not in ['PR', 'ROC']:
raise ValueError('curve must be either ROC or PR, %s unknown' % curve)
with variable_scope.variable_scope(name, default_name='dynamic_auc'):
labels.get_shape().assert_is_compatible_with(predictions.get_shape())
predictions = array_ops.reshape(
math_ops.cast(predictions, dtypes.float64), [-1])
labels = array_ops.reshape(math_ops.cast(labels, dtypes.int64), [-1])
with ops.control_dependencies([
check_ops.assert_greater_equal(
labels,
array_ops.zeros_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is <0'),
check_ops.assert_less_equal(
labels,
array_ops.ones_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is >1')
]):
preds_accum, update_preds = streaming_concat(
predictions, name='concat_preds')
labels_accum, update_labels = streaming_concat(
labels, name='concat_labels')
update_op = control_flow_ops.group(update_labels, update_preds)
auc = _compute_dynamic_auc(labels_accum, preds_accum, curve=curve)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, auc)
return auc, update_op
def _compute_placement_auc(labels, predictions, weights, alpha,
logit_transformation, is_valid):
"""Computes the AUC and asymptotic normally distributed confidence interval.
The calculations are achieved using the fact that AUC = P(Y_1>Y_0) and the
concept of placement values for each labeled group, as presented by Delong and
Delong (1988). The actual algorithm used is a more computationally efficient
approach presented by Sun and Xu (2014). This could be slow for large batches,
but has the advantage of not having its results degrade depending on the
distribution of predictions.
Args:
labels: A `Tensor` of ground truth labels with the same shape as
`predictions` with values of 0 or 1 and type `int64`.
predictions: A 1-D `Tensor` of predictions whose values are `float64`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`.
alpha: Confidence interval level desired.
logit_transformation: A boolean value indicating whether the estimate should
be logit transformed prior to calculating the confidence interval. Doing
so enforces the restriction that the AUC should never be outside the
interval [0,1].
is_valid: A bool tensor describing whether the input is valid.
Returns:
A 1-D `Tensor` containing the area-under-curve, lower, and upper confidence
interval values.
"""
# Disable the invalid-name checker so that we can capitalize the name.
# pylint: disable=invalid-name
AucData = collections_lib.namedtuple('AucData', ['auc', 'lower', 'upper'])
# pylint: enable=invalid-name
# If all the labels are the same or if number of observations are too few,
# AUC isn't well-defined
size = array_ops.size(predictions, out_type=dtypes.int32)
# Count the total number of positive and negative labels in the input.
total_0 = math_ops.reduce_sum(
math_ops.cast(1 - labels, weights.dtype) * weights)
total_1 = math_ops.reduce_sum(
math_ops.cast(labels, weights.dtype) * weights)
# Sort the predictions ascending, as well as
# (i) the corresponding labels and
# (ii) the corresponding weights.
ordered_predictions, indices = nn.top_k(predictions, k=size, sorted=True)
ordered_predictions = array_ops.reverse(
ordered_predictions, axis=array_ops.zeros(1, dtypes.int32))
indices = array_ops.reverse(indices, axis=array_ops.zeros(1, dtypes.int32))
ordered_labels = array_ops.gather(labels, indices)
ordered_weights = array_ops.gather(weights, indices)
# We now compute values required for computing placement values.
# We generate a list of indices (segmented_indices) of increasing order. An
# index is assigned for each unique prediction float value. Prediction
# values that are the same share the same index.
_, segmented_indices = array_ops.unique(ordered_predictions)
# We create 2 tensors of weights. weights_for_true is non-zero for true
# labels. weights_for_false is non-zero for false labels.
float_labels_for_true = math_ops.cast(ordered_labels, dtypes.float32)
float_labels_for_false = 1.0 - float_labels_for_true
weights_for_true = ordered_weights * float_labels_for_true
weights_for_false = ordered_weights * float_labels_for_false
# For each set of weights with the same segmented indices, we add up the
# weight values. Note that for each label, we deliberately rely on weights
# for the opposite label.
weight_totals_for_true = math_ops.segment_sum(weights_for_false,
segmented_indices)
weight_totals_for_false = math_ops.segment_sum(weights_for_true,
segmented_indices)
# These cumulative sums of weights importantly exclude the current weight
# sums.
cum_weight_totals_for_true = math_ops.cumsum(weight_totals_for_true,
exclusive=True)
cum_weight_totals_for_false = math_ops.cumsum(weight_totals_for_false,
exclusive=True)
# Compute placement values using the formula. Values with the same segmented
# indices and labels share the same placement values.
placements_for_true = (
(cum_weight_totals_for_true + weight_totals_for_true / 2.0) /
(math_ops.reduce_sum(weight_totals_for_true) + _EPSILON))
placements_for_false = (
(cum_weight_totals_for_false + weight_totals_for_false / 2.0) /
(math_ops.reduce_sum(weight_totals_for_false) + _EPSILON))
# We expand the tensors of placement values (for each label) so that their
# shapes match that of predictions.
placements_for_true = array_ops.gather(placements_for_true, segmented_indices)
placements_for_false = array_ops.gather(placements_for_false,
segmented_indices)
# Select placement values based on the label for each index.
placement_values = (
placements_for_true * float_labels_for_true +
placements_for_false * float_labels_for_false)
# Split placement values by labeled groups.
placement_values_0 = placement_values * math_ops.cast(
1 - ordered_labels, weights.dtype)
weights_0 = ordered_weights * math_ops.cast(
1 - ordered_labels, weights.dtype)
placement_values_1 = placement_values * math_ops.cast(
ordered_labels, weights.dtype)
weights_1 = ordered_weights * math_ops.cast(
ordered_labels, weights.dtype)
# Calculate AUC using placement values
auc_0 = (math_ops.reduce_sum(weights_0 * (1. - placement_values_0)) /
(total_0 + _EPSILON))
auc_1 = (math_ops.reduce_sum(weights_1 * (placement_values_1)) /
(total_1 + _EPSILON))
auc = array_ops.where(math_ops.less(total_0, total_1), auc_1, auc_0)
# Calculate variance and standard error using the placement values.
var_0 = (
math_ops.reduce_sum(
weights_0 * math_ops.square(1. - placement_values_0 - auc_0)) /
(total_0 - 1. + _EPSILON))
var_1 = (
math_ops.reduce_sum(
weights_1 * math_ops.square(placement_values_1 - auc_1)) /
(total_1 - 1. + _EPSILON))
auc_std_err = math_ops.sqrt(
(var_0 / (total_0 + _EPSILON)) + (var_1 / (total_1 + _EPSILON)))
# Calculate asymptotic normal confidence intervals
std_norm_dist = Normal(loc=0., scale=1.)
z_value = std_norm_dist.quantile((1.0 - alpha) / 2.0)
if logit_transformation:
estimate = math_ops.log(auc / (1. - auc + _EPSILON))
std_err = auc_std_err / (auc * (1. - auc + _EPSILON))
transformed_auc_lower = estimate + (z_value * std_err)
transformed_auc_upper = estimate - (z_value * std_err)
def inverse_logit_transformation(x):
exp_negative = math_ops.exp(math_ops.negative(x))
return 1. / (1. + exp_negative + _EPSILON)
auc_lower = inverse_logit_transformation(transformed_auc_lower)
auc_upper = inverse_logit_transformation(transformed_auc_upper)
else:
estimate = auc
std_err = auc_std_err
auc_lower = estimate + (z_value * std_err)
auc_upper = estimate - (z_value * std_err)
## If estimate is 1 or 0, no variance is present so CI = 1
## n.b. This can be misleading, since number obs can just be too low.
lower = array_ops.where(
math_ops.logical_or(
math_ops.equal(auc, array_ops.ones_like(auc)),
math_ops.equal(auc, array_ops.zeros_like(auc))),
auc, auc_lower)
upper = array_ops.where(
math_ops.logical_or(
math_ops.equal(auc, array_ops.ones_like(auc)),
math_ops.equal(auc, array_ops.zeros_like(auc))),
auc, auc_upper)
# If all the labels are the same, AUC isn't well-defined (but raising an
# exception seems excessive) so we return 0, otherwise we finish computing.
trivial_value = array_ops.constant(0.0)
return AucData(*control_flow_ops.cond(
is_valid, lambda: [auc, lower, upper], lambda: [trivial_value]*3))
def auc_with_confidence_intervals(labels,
predictions,
weights=None,
alpha=0.95,
logit_transformation=True,
metrics_collections=(),
updates_collections=(),
name=None):
"""Computes the AUC and asymptotic normally distributed confidence interval.
USAGE NOTE: this approach requires storing all of the predictions and labels
for a single evaluation in memory, so it may not be usable when the evaluation
batch size and/or the number of evaluation steps is very large.
Computes the area under the ROC curve and its confidence interval using
placement values. This has the advantage of being resilient to the
distribution of predictions by aggregating across batches, accumulating labels
and predictions and performing the final calculation using all of the
concatenated values.
Args:
labels: A `Tensor` of ground truth labels with the same shape as `labels`
and with values of 0 or 1 whose values are castable to `int64`.
predictions: A `Tensor` of predictions whose values are castable to
`float64`. Will be flattened into a 1-D `Tensor`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`.
alpha: Confidence interval level desired.
logit_transformation: A boolean value indicating whether the estimate should
be logit transformed prior to calculating the confidence interval. Doing
so enforces the restriction that the AUC should never be outside the
interval [0,1].
metrics_collections: An optional iterable of collections that `auc` should
be added to.
updates_collections: An optional iterable of collections that `update_op`
should be added to.
name: An optional name for the variable_scope that contains the metric
variables.
Returns:
auc: A 1-D `Tensor` containing the current area-under-curve, lower, and
upper confidence interval values.
update_op: An operation that concatenates the input labels and predictions
to the accumulated values.
Raises:
ValueError: If `labels`, `predictions`, and `weights` have mismatched shapes
or if `alpha` isn't in the range (0,1).
"""
if not (alpha > 0 and alpha < 1):
raise ValueError('alpha must be between 0 and 1; currently %.02f' % alpha)
if weights is None:
weights = array_ops.ones_like(predictions)
with variable_scope.variable_scope(
name,
default_name='auc_with_confidence_intervals',
values=[labels, predictions, weights]):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=predictions,
labels=labels,
weights=weights)
total_weight = math_ops.reduce_sum(weights)
weights = array_ops.reshape(weights, [-1])
predictions = array_ops.reshape(
math_ops.cast(predictions, dtypes.float64), [-1])
labels = array_ops.reshape(math_ops.cast(labels, dtypes.int64), [-1])
with ops.control_dependencies([
check_ops.assert_greater_equal(
labels,
array_ops.zeros_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is <0'),
check_ops.assert_less_equal(
labels,
array_ops.ones_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is >1'),
]):
preds_accum, update_preds = streaming_concat(
predictions, name='concat_preds')
labels_accum, update_labels = streaming_concat(labels,
name='concat_labels')
weights_accum, update_weights = streaming_concat(
weights, name='concat_weights')
update_op_for_valid_case = control_flow_ops.group(
update_labels, update_preds, update_weights)
# Only perform updates if this case is valid.
all_labels_positive_or_0 = math_ops.logical_and(
math_ops.equal(math_ops.reduce_min(labels), 0),
math_ops.equal(math_ops.reduce_max(labels), 1))
sums_of_weights_at_least_1 = math_ops.greater_equal(total_weight, 1.0)
is_valid = math_ops.logical_and(all_labels_positive_or_0,
sums_of_weights_at_least_1)
update_op = control_flow_ops.cond(
sums_of_weights_at_least_1,
lambda: update_op_for_valid_case, control_flow_ops.no_op)
auc = _compute_placement_auc(
labels_accum,
preds_accum,
weights_accum,
alpha=alpha,
logit_transformation=logit_transformation,
is_valid=is_valid)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, auc)
return auc, update_op
def precision_recall_at_equal_thresholds(labels,
predictions,
weights=None,
num_thresholds=None,
use_locking=None,
name=None):
"""A helper method for creating metrics related to precision-recall curves.
These values are true positives, false negatives, true negatives, false
positives, precision, and recall. This function returns a data structure that
contains ops within it.
Unlike _streaming_confusion_matrix_at_thresholds (which exhibits O(T * N)
space and run time), this op exhibits O(T + N) space and run time, where T is
the number of thresholds and N is the size of the predictions tensor. Hence,
it may be advantageous to use this function when `predictions` is big.
For instance, prefer this method for per-pixel classification tasks, for which
the predictions tensor may be very large.
Each number in `predictions`, a float in `[0, 1]`, is compared with its
corresponding label in `labels`, and counts as a single tp/fp/tn/fn value at
each threshold. This is then multiplied with `weights` which can be used to
reweight certain values, or more commonly used for masking values.
Args:
labels: A bool `Tensor` whose shape matches `predictions`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
weights: Optional; If provided, a `Tensor` that has the same dtype as,
and broadcastable to, `predictions`. This tensor is multiplied by counts.
num_thresholds: Optional; Number of thresholds, evenly distributed in
`[0, 1]`. Should be `>= 2`. Defaults to 201. Note that the number of bins
is 1 less than `num_thresholds`. Using an even `num_thresholds` value
instead of an odd one may yield unfriendly edges for bins.
use_locking: Optional; If True, the op will be protected by a lock.
Otherwise, the behavior is undefined, but may exhibit less contention.
Defaults to True.
name: Optional; variable_scope name. If not provided, the string
'precision_recall_at_equal_threshold' is used.
Returns:
result: A named tuple (See PrecisionRecallData within the implementation of
this function) with properties that are variables of shape
`[num_thresholds]`. The names of the properties are tp, fp, tn, fn,
precision, recall, thresholds.
update_op: An op that accumulates values.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`includes` contains invalid keys.
"""
# Disable the invalid-name checker so that we can capitalize the name.
# pylint: disable=invalid-name
PrecisionRecallData = collections_lib.namedtuple(
'PrecisionRecallData',
['tp', 'fp', 'tn', 'fn', 'precision', 'recall', 'thresholds'])
# pylint: enable=invalid-name
if num_thresholds is None:
num_thresholds = 201
if weights is None:
weights = 1.0
if use_locking is None:
use_locking = True
check_ops.assert_type(labels, dtypes.bool)
dtype = predictions.dtype
with variable_scope.variable_scope(name,
'precision_recall_at_equal_thresholds',
(labels, predictions, weights)):
# Make sure that predictions are within [0.0, 1.0].
with ops.control_dependencies([
check_ops.assert_greater_equal(
predictions,
math_ops.cast(0.0, dtype=predictions.dtype),
message='predictions must be in [0, 1]'),
check_ops.assert_less_equal(
predictions,
math_ops.cast(1.0, dtype=predictions.dtype),
message='predictions must be in [0, 1]')
]):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=predictions,
labels=labels,
weights=weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
# We cast to float to ensure we have 0.0 or 1.0.
f_labels = math_ops.cast(labels, dtype)
# Get weighted true/false labels.
true_labels = f_labels * weights
false_labels = (1.0 - f_labels) * weights
# Flatten predictions and labels.
predictions = array_ops.reshape(predictions, [-1])
true_labels = array_ops.reshape(true_labels, [-1])
false_labels = array_ops.reshape(false_labels, [-1])
# To compute TP/FP/TN/FN, we are measuring a binary classifier
# C(t) = (predictions >= t)
# at each threshold 't'. So we have
# TP(t) = sum( C(t) * true_labels )
# FP(t) = sum( C(t) * false_labels )
#
# But, computing C(t) requires computation for each t. To make it fast,
# observe that C(t) is a cumulative integral, and so if we have
# thresholds = [t_0, ..., t_{n-1}]; t_0 < ... < t_{n-1}
# where n = num_thresholds, and if we can compute the bucket function
# B(i) = Sum( (predictions == t), t_i <= t < t{i+1} )
# then we get
# C(t_i) = sum( B(j), j >= i )
# which is the reversed cumulative sum in tf.cumsum().
#
# We can compute B(i) efficiently by taking advantage of the fact that
# our thresholds are evenly distributed, in that
# width = 1.0 / (num_thresholds - 1)
# thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]
# Given a prediction value p, we can map it to its bucket by
# bucket_index(p) = floor( p * (num_thresholds - 1) )
# so we can use tf.scatter_add() to update the buckets in one pass.
#
# This implementation exhibits a run time and space complexity of O(T + N),
# where T is the number of thresholds and N is the size of predictions.
# Metrics that rely on _streaming_confusion_matrix_at_thresholds instead
# exhibit a complexity of O(T * N).
# Compute the bucket indices for each prediction value.
bucket_indices = math_ops.cast(
math_ops.floor(predictions * (num_thresholds - 1)), dtypes.int32)
with ops.name_scope('variables'):
tp_buckets_v = metrics_impl.metric_variable(
[num_thresholds], dtype, name='tp_buckets')
fp_buckets_v = metrics_impl.metric_variable(
[num_thresholds], dtype, name='fp_buckets')
with ops.name_scope('update_op'):
update_tp = state_ops.scatter_add(
tp_buckets_v, bucket_indices, true_labels, use_locking=use_locking)
update_fp = state_ops.scatter_add(
fp_buckets_v, bucket_indices, false_labels, use_locking=use_locking)
# Set up the cumulative sums to compute the actual metrics.
tp = math_ops.cumsum(tp_buckets_v, reverse=True, name='tp')
fp = math_ops.cumsum(fp_buckets_v, reverse=True, name='fp')
# fn = sum(true_labels) - tp
# = sum(tp_buckets) - tp
# = tp[0] - tp
# Similarly,
# tn = fp[0] - fp
tn = fp[0] - fp
fn = tp[0] - tp
# We use a minimum to prevent division by 0.
epsilon = 1e-7
precision = tp / math_ops.maximum(epsilon, tp + fp)
recall = tp / math_ops.maximum(epsilon, tp + fn)
result = PrecisionRecallData(
tp=tp,
fp=fp,
tn=tn,
fn=fn,
precision=precision,
recall=recall,
thresholds=math_ops.lin_space(0.0, 1.0, num_thresholds))
update_op = control_flow_ops.group(update_tp, update_fp)
return result, update_op
def streaming_specificity_at_sensitivity(predictions,
labels,
sensitivity,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the specificity at a given sensitivity.
The `streaming_specificity_at_sensitivity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the specificity at the given
sensitivity value. The threshold for the given sensitivity value is computed
and used to evaluate the corresponding specificity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`specificity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
sensitivity: A scalar value in range `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
sensitivity.
metrics_collections: An optional list of collections that `specificity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
specificity: A scalar `Tensor` representing the specificity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `specificity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`sensitivity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
return metrics.specificity_at_sensitivity(
sensitivity=sensitivity,
num_thresholds=num_thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_sensitivity_at_specificity(predictions,
labels,
specificity,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the sensitivity at a given specificity.
The `streaming_sensitivity_at_specificity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the sensitivity at the given
specificity value. The threshold for the given specificity value is computed
and used to evaluate the corresponding sensitivity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`sensitivity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
specificity: A scalar value in range `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
specificity.
metrics_collections: An optional list of collections that `sensitivity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
sensitivity: A scalar `Tensor` representing the sensitivity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `sensitivity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`specificity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
return metrics.sensitivity_at_specificity(
specificity=specificity,
num_thresholds=num_thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(
None, 'Please switch to tf.metrics.precision_at_thresholds. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_precision_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision values for different `thresholds` on `predictions`.
The `streaming_precision_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `precision[i]` is defined as the total
weight of values in `predictions` above `thresholds[i]` whose corresponding
entry in `labels` is `True`, divided by the total weight of values in
`predictions` above `thresholds[i]` (`true_positives[i] / (true_positives[i] +
false_positives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `precision` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.precision_at_thresholds(
thresholds=thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None,
'Please switch to tf.metrics.recall_at_thresholds. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_recall_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various recall values for different `thresholds` on `predictions`.
The `streaming_recall_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `recall[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `True`, divided by the total weight of `True` values in `labels`
(`true_positives[i] / (true_positives[i] + false_negatives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `recall`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.recall_at_thresholds(
thresholds=thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_false_positive_rate_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various fpr values for different `thresholds` on `predictions`.
The `streaming_false_positive_rate_at_thresholds` function creates two
local variables, `false_positives`, `true_negatives`, for various values of
thresholds. `false_positive_rate[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `False`, divided by the total weight of `False` values in `labels`
(`false_positives[i] / (false_positives[i] + true_negatives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_positive_rate`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_positive_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_positive_rate: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `false_positives` and
`true_negatives` variables that are used in the computation of
`false_positive_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_positive_rate_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights, includes=('fp', 'tn'))
# Avoid division by zero.
epsilon = _EPSILON
def compute_fpr(fp, tn, name):
return math_ops.div(fp, epsilon + fp + tn, name='fpr_' + name)
fpr = compute_fpr(values['fp'], values['tn'], 'value')
update_op = compute_fpr(update_ops['fp'], update_ops['tn'], 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fpr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fpr, update_op
def streaming_false_negative_rate_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various fnr values for different `thresholds` on `predictions`.
The `streaming_false_negative_rate_at_thresholds` function creates two
local variables, `false_negatives`, `true_positives`, for various values of
thresholds. `false_negative_rate[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `False`, divided by the total weight of `True` values in `labels`
(`false_negatives[i] / (false_negatives[i] + true_positives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_positive_rate`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_negative_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_negative_rate: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `false_negatives` and
`true_positives` variables that are used in the computation of
`false_negative_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_negative_rate_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights, includes=('fn', 'tp'))
# Avoid division by zero.
epsilon = _EPSILON
def compute_fnr(fn, tp, name):
return math_ops.div(fn, epsilon + fn + tp, name='fnr_' + name)
fnr = compute_fnr(values['fn'], values['tp'], 'value')
update_op = compute_fnr(update_ops['fn'], update_ops['tp'], 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fnr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fnr, update_op
def _at_k_name(name, k=None, class_id=None):
if k is not None:
name = '%s_at_%d' % (name, k)
else:
name = '%s_at_k' % (name)
if class_id is not None:
name = '%s_class%d' % (name, class_id)
return name
@deprecated('2016-11-08', 'Please use `streaming_sparse_recall_at_k`, '
'and reshape labels from [batch_size] to [batch_size, 1].')
def streaming_recall_at_k(predictions,
labels,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the recall@k of the predictions with respect to dense labels.
The `streaming_recall_at_k` function creates two local variables, `total` and
`count`, that are used to compute the recall@k frequency. This frequency is
ultimately returned as `recall_at_<k>`: an idempotent operation that simply
divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Internally, an `in_top_k` operation computes a `Tensor` with
shape [batch_size] whose elements indicate whether or not the corresponding
label is in the top `k` `predictions`. Then `update_op` increments `total`
with the reduced sum of `weights` where `in_top_k` is `True`, and it
increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A float `Tensor` of dimension [batch_size, num_classes].
labels: A `Tensor` of dimension [batch_size] whose type is in `int32`,
`int64`.
k: The number of top elements to look at for computing recall.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall_at_k`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
recall_at_k: A `Tensor` representing the recall@k, the fraction of labels
which fall into the top `k` predictions.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `recall_at_k`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
in_top_k = math_ops.to_float(nn.in_top_k(predictions, labels, k))
return streaming_mean(in_top_k, weights, metrics_collections,
updates_collections, name or _at_k_name('recall', k))
# TODO(ptucker): Validate range of values in labels?
def streaming_sparse_recall_at_k(predictions,
labels,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of the predictions with respect to sparse labels.
If `class_id` is not specified, we'll calculate recall as the ratio of true
positives (i.e., correct predictions, items in the top `k` highest
`predictions` that are found in the corresponding row in `labels`) to
actual positives (the full `labels` row).
If `class_id` is specified, we calculate recall by considering only the rows
in the batch for which `class_id` is in `labels`, and computing the
fraction of them for which `class_id` is in the corresponding row in
`labels`.
`streaming_sparse_recall_at_k` creates two local variables,
`true_positive_at_<k>` and `false_negative_at_<k>`, that are used to compute
the recall_at_k frequency. This frequency is ultimately returned as
`recall_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_negative_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false negatives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_negative_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions`.
Values should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range always count
towards `false_negative_at_<k>`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
return metrics.recall_at_k(
k=k,
class_id=class_id,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
# TODO(ptucker): Validate range of values in labels?
def streaming_sparse_precision_at_k(predictions,
labels,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of the predictions with respect to sparse labels.
If `class_id` is not specified, we calculate precision as the ratio of true
positives (i.e., correct predictions, items in the top `k` highest
`predictions` that are found in the corresponding row in `labels`) to
positives (all top `k` `predictions`).
If `class_id` is specified, we calculate precision by considering only the
rows in the batch for which `class_id` is in the top `k` highest
`predictions`, and computing the fraction of them for which `class_id` is
in the corresponding row in `labels`.
We expect precision to decrease as `k` increases.
`streaming_sparse_precision_at_k` creates two local variables,
`true_positive_at_<k>` and `false_positive_at_<k>`, that are used to compute
the precision@k frequency. This frequency is ultimately returned as
`precision_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_positive_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range are ignored.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes], where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
return metrics.precision_at_k(
k=k,
class_id=class_id,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
# TODO(ptucker): Validate range of values in labels?
def streaming_sparse_precision_at_top_k(top_k_predictions,
labels,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of top-k predictions with respect to sparse labels.
If `class_id` is not specified, we calculate precision as the ratio of
true positives (i.e., correct predictions, items in `top_k_predictions`
that are found in the corresponding row in `labels`) to positives (all
`top_k_predictions`).
If `class_id` is specified, we calculate precision by considering only the
rows in the batch for which `class_id` is in the top `k` highest
`predictions`, and computing the fraction of them for which `class_id` is
in the corresponding row in `labels`.
We expect precision to decrease as `k` increases.
`streaming_sparse_precision_at_top_k` creates two local variables,
`true_positive_at_k` and `false_positive_at_k`, that are used to compute
the precision@k frequency. This frequency is ultimately returned as
`precision_at_k`: an idempotent operation that simply divides
`true_positive_at_k` by total (`true_positive_at_k` + `false_positive_at_k`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_k`. Internally, set operations applied to `top_k_predictions`
and `labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_k` and
`false_positive_at_k` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where
N >= 1. Commonly, N=1 and top_k_predictions has shape [batch size, k].
The final dimension contains the indices of top-k labels. [D1, ... DN]
must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`top_k_predictions`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range are ignored.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
ValueError: If `top_k_predictions` has rank < 2.
"""
default_name = _at_k_name('precision', class_id=class_id)
with ops.name_scope(name, default_name,
(top_k_predictions, labels, weights)) as name_scope:
return metrics_impl.precision_at_top_k(
labels=labels,
predictions_idx=top_k_predictions,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name_scope)
def sparse_recall_at_top_k(labels,
top_k_predictions,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of top-k predictions with respect to sparse labels.
If `class_id` is specified, we calculate recall by considering only the
entries in the batch for which `class_id` is in the label, and computing
the fraction of them for which `class_id` is in the top-k `predictions`.
If `class_id` is not specified, we'll calculate recall as how often on
average a class among the labels of a batch entry is in the top-k
`predictions`.
`sparse_recall_at_top_k` creates two local variables, `true_positive_at_<k>`
and `false_negative_at_<k>`, that are used to compute the recall_at_k
frequency. This frequency is ultimately returned as `recall_at_<k>`: an
idempotent operation that simply divides `true_positive_at_<k>` by total
(`true_positive_at_<k>` + `false_negative_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Set operations applied to `top_k` and `labels` calculate the
true positives and false negatives weighted by `weights`. Then `update_op`
increments `true_positive_at_<k>` and `false_negative_at_<k>` using these
values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`top_k_predictions`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range always count towards `false_negative_at_<k>`.
top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where
N >= 1. Commonly, N=1 and top_k_predictions has shape [batch size, k].
The final dimension contains the indices of top-k labels. [D1, ... DN]
must match `labels`.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
default_name = _at_k_name('recall', class_id=class_id)
with ops.name_scope(name, default_name,
(top_k_predictions, labels, weights)) as name_scope:
return metrics_impl.recall_at_top_k(
labels=labels,
predictions_idx=top_k_predictions,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name_scope)
def _compute_recall_at_precision(tp, fp, fn, precision, name):
"""Helper function to compute recall at a given `precision`.
Args:
tp: The number of true positives.
fp: The number of false positives.
fn: The number of false negatives.
precision: The precision for which the recall will be calculated.
name: An optional variable_scope name.
Returns:
The recall at a the given `precision`.
"""
precisions = math_ops.div(tp, tp + fp + _EPSILON)
tf_index = math_ops.argmin(
math_ops.abs(precisions - precision), 0, output_type=dtypes.int32)
# Now, we have the implicit threshold, so compute the recall:
return math_ops.div(tp[tf_index], tp[tf_index] + fn[tf_index] + _EPSILON,
name)
def recall_at_precision(labels,
predictions,
precision,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes `recall` at `precision`.
The `recall_at_precision` function creates four local variables,
`tp` (true positives), `fp` (false positives) and `fn` (false negatives)
that are used to compute the `recall` at the given `precision` value. The
threshold for the given `precision` value is computed and used to evaluate the
corresponding `recall`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall`. `update_op` increments the `tp`, `fp` and `fn` counts with the
weight of each case found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
precision: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
`precision`.
metrics_collections: An optional list of collections that `recall`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: A scalar `Tensor` representing the recall at the given
`precision` value.
update_op: An operation that increments the `tp`, `fp` and `fn`
variables appropriately and whose value matches `recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`precision` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
if not 0 <= precision <= 1:
raise ValueError('`precision` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'recall_at_precision',
(predictions, labels, weights)):
thresholds = [
i * 1.0 / (num_thresholds - 1) for i in range(1, num_thresholds - 1)
]
thresholds = [0.0 - _EPSILON] + thresholds + [1.0 + _EPSILON]
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights)
recall = _compute_recall_at_precision(values['tp'], values['fp'],
values['fn'], precision, 'value')
update_op = _compute_recall_at_precision(update_ops['tp'], update_ops['fp'],
update_ops['fn'], precision,
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, recall)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return recall, update_op
def streaming_sparse_average_precision_at_k(predictions,
labels,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
See `sparse_average_precision_at_k` for details on formula. `weights` are
applied to the result of `sparse_average_precision_at_k`
`streaming_sparse_average_precision_at_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and `predictions` has shape
[batch size, num_classes]. The final dimension contains the logit values
for each class. [D1, ... DN] must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range are ignored.
k: Integer, k for @k metric. This will calculate an average precision for
range `[1,k]`, as documented above.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
"""
return metrics.average_precision_at_k(
k=k,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_sparse_average_precision_at_top_k(top_k_predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
`streaming_sparse_average_precision_at_top_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Set operations applied to `top_k` and `labels` calculate
the true positives and false positives weighted by `weights`. Then `update_op`
increments `true_positive_at_<k>` and `false_positive_at_<k>` using these
values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.
Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final
dimension must be set and contains the top `k` predicted class indices.
[D1, ... DN] must match `labels`. Values should be in range
[0, num_classes).
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `top_k_predictions`.
Values should be in range [0, num_classes).
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
Raises:
ValueError: if the last dimension of top_k_predictions is not set.
"""
return metrics_impl._streaming_sparse_average_precision_at_top_k( # pylint: disable=protected-access
predictions_idx=top_k_predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.mean.')
def streaming_mean_absolute_error(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean absolute error between the labels and predictions.
The `streaming_mean_absolute_error` function creates two local variables,
`total` and `count` that are used to compute the mean absolute error. This
average is weighted by `weights`, and it is ultimately returned as
`mean_absolute_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_absolute_error`. Internally, an `absolute_errors` operation computes the
absolute value of the differences between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `absolute_errors`, and it increments `count` with the reduced
sum of `weights`
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_absolute_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_absolute_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_absolute_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_absolute_error(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_mean_relative_error(predictions,
labels,
normalizer,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean relative error by normalizing with the given values.
The `streaming_mean_relative_error` function creates two local variables,
`total` and `count` that are used to compute the mean relative absolute error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_relative_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_reative_error`. Internally, a `relative_errors` operation divides the
absolute value of the differences between `predictions` and `labels` by the
`normalizer`. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `relative_errors`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
normalizer: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_relative_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_relative_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_relative_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_relative_error(
normalizer=normalizer,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_mean_squared_error(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean squared error between the labels and predictions.
The `streaming_mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_squared_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_squared_error`. Internally, a `squared_error` operation computes the
element-wise square of the difference between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_squared_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_squared_error(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_root_mean_squared_error(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the root mean squared error between the labels and predictions.
The `streaming_root_mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the root mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`root_mean_squared_error`: an idempotent operation that takes the square root
of the division of `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`root_mean_squared_error`. Internally, a `squared_error` operation computes
the element-wise square of the difference between `predictions` and `labels`.
Then `update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`root_mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
root_mean_squared_error: A `Tensor` representing the current mean, the value
of `total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `root_mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.root_mean_squared_error(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_covariance(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the unbiased sample covariance between `predictions` and `labels`.
The `streaming_covariance` function creates four local variables,
`comoment`, `mean_prediction`, `mean_label`, and `count`, which are used to
compute the sample covariance between predictions and labels across multiple
batches of data. The covariance is ultimately returned as an idempotent
operation that simply divides `comoment` by `count` - 1. We use `count` - 1
in order to get an unbiased estimate.
The algorithm used for this online computation is described in
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance.
Specifically, the formula used to combine two sample comoments is
`C_AB = C_A + C_B + (E[x_A] - E[x_B]) * (E[y_A] - E[y_B]) * n_A * n_B / n_AB`
The comoment for a single batch of data is simply
`sum((x - E[x]) * (y - E[y]))`, optionally weighted.
If `weights` is not None, then it is used to compute weighted comoments,
means, and count. NOTE: these weights are treated as "frequency weights", as
opposed to "reliability weights". See discussion of the difference on
https://wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance
To facilitate the computation of covariance across multiple batches of data,
the function creates an `update_op` operation, which updates underlying
variables and returns the updated covariance.
Args:
predictions: A `Tensor` of arbitrary size.
labels: A `Tensor` of the same size as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
covariance: A `Tensor` representing the current unbiased sample covariance,
`comoment` / (`count` - 1).
update_op: An operation that updates the local variables appropriately.
Raises:
ValueError: If labels and predictions are of different sizes or if either
`metrics_collections` or `updates_collections` are not a list or tuple.
"""
with variable_scope.variable_scope(name, 'covariance',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
count_ = metrics_impl.metric_variable([], dtypes.float32, name='count')
mean_prediction = metrics_impl.metric_variable(
[], dtypes.float32, name='mean_prediction')
mean_label = metrics_impl.metric_variable(
[], dtypes.float32, name='mean_label')
comoment = metrics_impl.metric_variable( # C_A in update equation
[], dtypes.float32, name='comoment')
if weights is None:
batch_count = math_ops.to_float(array_ops.size(labels)) # n_B in eqn
weighted_predictions = predictions
weighted_labels = labels
else:
weights = weights_broadcast_ops.broadcast_weights(weights, labels)
batch_count = math_ops.reduce_sum(weights) # n_B in eqn
weighted_predictions = math_ops.multiply(predictions, weights)
weighted_labels = math_ops.multiply(labels, weights)
update_count = state_ops.assign_add(count_, batch_count) # n_AB in eqn
prev_count = update_count - batch_count # n_A in update equation
# We update the means by Delta=Error*BatchCount/(BatchCount+PrevCount)
# batch_mean_prediction is E[x_B] in the update equation
batch_mean_prediction = _safe_div(
math_ops.reduce_sum(weighted_predictions), batch_count,
'batch_mean_prediction')
delta_mean_prediction = _safe_div(
(batch_mean_prediction - mean_prediction) * batch_count, update_count,
'delta_mean_prediction')
update_mean_prediction = state_ops.assign_add(mean_prediction,
delta_mean_prediction)
# prev_mean_prediction is E[x_A] in the update equation
prev_mean_prediction = update_mean_prediction - delta_mean_prediction
# batch_mean_label is E[y_B] in the update equation
batch_mean_label = _safe_div(
math_ops.reduce_sum(weighted_labels), batch_count, 'batch_mean_label')
delta_mean_label = _safe_div((batch_mean_label - mean_label) * batch_count,
update_count, 'delta_mean_label')
update_mean_label = state_ops.assign_add(mean_label, delta_mean_label)
# prev_mean_label is E[y_A] in the update equation
prev_mean_label = update_mean_label - delta_mean_label
unweighted_batch_coresiduals = ((predictions - batch_mean_prediction) *
(labels - batch_mean_label))
# batch_comoment is C_B in the update equation
if weights is None:
batch_comoment = math_ops.reduce_sum(unweighted_batch_coresiduals)
else:
batch_comoment = math_ops.reduce_sum(
unweighted_batch_coresiduals * weights)
# View delta_comoment as = C_AB - C_A in the update equation above.
# Since C_A is stored in a var, by how much do we need to increment that var
# to make the var = C_AB?
delta_comoment = (
batch_comoment + (prev_mean_prediction - batch_mean_prediction) *
(prev_mean_label - batch_mean_label) *
(prev_count * batch_count / update_count))
update_comoment = state_ops.assign_add(comoment, delta_comoment)
covariance = array_ops.where(
math_ops.less_equal(count_, 1.),
float('nan'),
math_ops.truediv(comoment, count_ - 1),
name='covariance')
with ops.control_dependencies([update_comoment]):
update_op = array_ops.where(
math_ops.less_equal(count_, 1.),
float('nan'),
math_ops.truediv(comoment, count_ - 1),
name='update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, covariance)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return covariance, update_op
def streaming_pearson_correlation(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes Pearson correlation coefficient between `predictions`, `labels`.
The `streaming_pearson_correlation` function delegates to
`streaming_covariance` the tracking of three [co]variances:
- `streaming_covariance(predictions, labels)`, i.e. covariance
- `streaming_covariance(predictions, predictions)`, i.e. variance
- `streaming_covariance(labels, labels)`, i.e. variance
The product-moment correlation ultimately returned is an idempotent operation
`cov(predictions, labels) / sqrt(var(predictions) * var(labels))`. To
facilitate correlation computation across multiple batches, the function
groups the `update_op`s of the underlying streaming_covariance and returns an
`update_op`.
If `weights` is not None, then it is used to compute a weighted correlation.
NOTE: these weights are treated as "frequency weights", as opposed to
"reliability weights". See discussion of the difference on
https://wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance
Args:
predictions: A `Tensor` of arbitrary size.
labels: A `Tensor` of the same size as predictions.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
pearson_r: A `Tensor` representing the current Pearson product-moment
correlation coefficient, the value of
`cov(predictions, labels) / sqrt(var(predictions) * var(labels))`.
update_op: An operation that updates the underlying variables appropriately.
Raises:
ValueError: If `labels` and `predictions` are of different sizes, or if
`weights` is the wrong size, or if either `metrics_collections` or
`updates_collections` are not a `list` or `tuple`.
"""
with variable_scope.variable_scope(name, 'pearson_r',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
# Broadcast weights here to avoid duplicate broadcasting in each call to
# `streaming_covariance`.
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, labels)
cov, update_cov = streaming_covariance(
predictions, labels, weights=weights, name='covariance')
var_predictions, update_var_predictions = streaming_covariance(
predictions, predictions, weights=weights, name='variance_predictions')
var_labels, update_var_labels = streaming_covariance(
labels, labels, weights=weights, name='variance_labels')
pearson_r = math_ops.truediv(
cov,
math_ops.multiply(
math_ops.sqrt(var_predictions), math_ops.sqrt(var_labels)),
name='pearson_r')
update_op = math_ops.truediv(
update_cov,
math_ops.multiply(
math_ops.sqrt(update_var_predictions),
math_ops.sqrt(update_var_labels)),
name='update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, pearson_r)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return pearson_r, update_op
# TODO(nsilberman): add a 'normalized' flag so that the user can request
# normalization if the inputs are not normalized.
def streaming_mean_cosine_distance(predictions,
labels,
dim,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the cosine distance between the labels and predictions.
The `streaming_mean_cosine_distance` function creates two local variables,
`total` and `count` that are used to compute the average cosine distance
between `predictions` and `labels`. This average is weighted by `weights`,
and it is ultimately returned as `mean_distance`, which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_distance`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of the same shape as `labels`.
labels: A `Tensor` of arbitrary shape.
dim: The dimension along which the cosine distance is computed.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`,
and whose dimension `dim` is 1.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
mean_distance: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
radial_diffs = math_ops.multiply(predictions, labels)
radial_diffs = math_ops.reduce_sum(
radial_diffs, reduction_indices=[
dim,
], keep_dims=True)
mean_distance, update_op = streaming_mean(radial_diffs, weights, None, None,
name or 'mean_cosine_distance')
mean_distance = math_ops.subtract(1.0, mean_distance)
update_op = math_ops.subtract(1.0, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_distance)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_distance, update_op
def streaming_percentage_less(values,
threshold,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the percentage of values less than the given threshold.
The `streaming_percentage_less` function creates two local variables,
`total` and `count` that are used to compute the percentage of `values` that
fall below `threshold`. This rate is weighted by `weights`, and it is
ultimately returned as `percentage` which is an idempotent operation that
simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`percentage`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A numeric `Tensor` of arbitrary size.
threshold: A scalar threshold.
weights: An optional `Tensor` whose shape is broadcastable to `values`.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
percentage: A `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.percentage_below(
values=values,
threshold=threshold,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_mean_iou(predictions,
labels,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculate per-step mean Intersection-Over-Union (mIOU).
Mean Intersection-Over-Union is a common evaluation metric for
semantic image segmentation, which first computes the IOU for each
semantic class and then computes the average over classes.
IOU is defined as follows:
IOU = true_positive / (true_positive + false_positive + false_negative).
The predictions are accumulated in a confusion matrix, weighted by `weights`,
and mIOU is then calculated from it.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean_iou`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened, if its rank > 1.
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened, if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since a confusion matrix of
dimension = [num_classes, num_classes] will be allocated.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that `mean_iou`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
mean_iou: A `Tensor` representing the mean intersection-over-union.
update_op: An operation that increments the confusion matrix.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_iou(
num_classes=num_classes,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def _next_array_size(required_size, growth_factor=1.5):
"""Calculate the next size for reallocating a dynamic array.
Args:
required_size: number or tf.Tensor specifying required array capacity.
growth_factor: optional number or tf.Tensor specifying the growth factor
between subsequent allocations.
Returns:
tf.Tensor with dtype=int32 giving the next array size.
"""
exponent = math_ops.ceil(
math_ops.log(math_ops.cast(required_size, dtypes.float32)) / math_ops.log(
math_ops.cast(growth_factor, dtypes.float32)))
return math_ops.cast(math_ops.ceil(growth_factor**exponent), dtypes.int32)
def streaming_concat(values,
axis=0,
max_size=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Concatenate values along an axis across batches.
The function `streaming_concat` creates two local variables, `array` and
`size`, that are used to store concatenated values. Internally, `array` is
used as storage for a dynamic array (if `maxsize` is `None`), which ensures
that updates can be run in amortized constant time.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that appends the values of a tensor and returns the
length of the concatenated axis.
This op allows for evaluating metrics that cannot be updated incrementally
using the same framework as other streaming metrics.
Args:
values: `Tensor` to concatenate. Rank and the shape along all axes other
than the axis to concatenate along must be statically known.
axis: optional integer axis to concatenate along.
max_size: optional integer maximum size of `value` along the given axis.
Once the maximum size is reached, further updates are no-ops. By default,
there is no maximum size: the array is resized as necessary.
metrics_collections: An optional list of collections that `value`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
value: A `Tensor` representing the concatenated values.
update_op: An operation that concatenates the next values.
Raises:
ValueError: if `values` does not have a statically known rank, `axis` is
not in the valid range or the size of `values` is not statically known
along any axis other than `axis`.
"""
with variable_scope.variable_scope(name, 'streaming_concat', (values,)):
# pylint: disable=invalid-slice-index
values_shape = values.get_shape()
if values_shape.dims is None:
raise ValueError('`values` must have known statically known rank')
ndim = len(values_shape)
if axis < 0:
axis += ndim
if not 0 <= axis < ndim:
raise ValueError('axis = %r not in [0, %r)' % (axis, ndim))
fixed_shape = [dim.value for n, dim in enumerate(values_shape) if n != axis]
if any(value is None for value in fixed_shape):
raise ValueError('all dimensions of `values` other than the dimension to '
'concatenate along must have statically known size')
# We move `axis` to the front of the internal array so assign ops can be
# applied to contiguous slices
init_size = 0 if max_size is None else max_size
init_shape = [init_size] + fixed_shape
array = metrics_impl.metric_variable(
init_shape, values.dtype, validate_shape=False, name='array')
size = metrics_impl.metric_variable([], dtypes.int32, name='size')
perm = [0 if n == axis else n + 1 if n < axis else n for n in range(ndim)]
valid_array = array[:size]
valid_array.set_shape([None] + fixed_shape)
value = array_ops.transpose(valid_array, perm, name='concat')
values_size = array_ops.shape(values)[axis]
if max_size is None:
batch_size = values_size
else:
batch_size = math_ops.minimum(values_size, max_size - size)
perm = [axis] + [n for n in range(ndim) if n != axis]
batch_values = array_ops.transpose(values, perm)[:batch_size]
def reallocate():
next_size = _next_array_size(new_size)
next_shape = array_ops.stack([next_size] + fixed_shape)
new_value = array_ops.zeros(next_shape, dtype=values.dtype)
old_value = array.value()
assign_op = state_ops.assign(array, new_value, validate_shape=False)
with ops.control_dependencies([assign_op]):
copy_op = array[:size].assign(old_value[:size])
# return value needs to be the same dtype as no_op() for cond
with ops.control_dependencies([copy_op]):
return control_flow_ops.no_op()
new_size = size + batch_size
array_size = array_ops.shape_internal(array, optimize=False)[0]
maybe_reallocate_op = control_flow_ops.cond(
new_size > array_size, reallocate, control_flow_ops.no_op)
with ops.control_dependencies([maybe_reallocate_op]):
append_values_op = array[size:new_size].assign(batch_values)
with ops.control_dependencies([append_values_op]):
update_op = size.assign(new_size)
if metrics_collections:
ops.add_to_collections(metrics_collections, value)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return value, update_op
# pylint: enable=invalid-slice-index
def aggregate_metrics(*value_update_tuples):
"""Aggregates the metric value tensors and update ops into two lists.
Args:
*value_update_tuples: a variable number of tuples, each of which contain the
pair of (value_tensor, update_op) from a streaming metric.
Returns:
A list of value `Tensor` objects and a list of update ops.
Raises:
ValueError: if `value_update_tuples` is empty.
"""
if not value_update_tuples:
raise ValueError('Expected at least one value_tensor/update_op pair')
value_ops, update_ops = zip(*value_update_tuples)
return list(value_ops), list(update_ops)
def aggregate_metric_map(names_to_tuples):
"""Aggregates the metric names to tuple dictionary.
This function is useful for pairing metric names with their associated value
and update ops when the list of metrics is long. For example:
```python
metrics_to_values, metrics_to_updates = slim.metrics.aggregate_metric_map({
'Mean Absolute Error': new_slim.metrics.streaming_mean_absolute_error(
predictions, labels, weights),
'Mean Relative Error': new_slim.metrics.streaming_mean_relative_error(
predictions, labels, labels, weights),
'RMSE Linear': new_slim.metrics.streaming_root_mean_squared_error(
predictions, labels, weights),
'RMSE Log': new_slim.metrics.streaming_root_mean_squared_error(
predictions, labels, weights),
})
```
Args:
names_to_tuples: a map of metric names to tuples, each of which contain the
pair of (value_tensor, update_op) from a streaming metric.
Returns:
A dictionary from metric names to value ops and a dictionary from metric
names to update ops.
"""
metric_names = names_to_tuples.keys()
value_ops, update_ops = zip(*names_to_tuples.values())
return dict(zip(metric_names, value_ops)), dict(zip(metric_names, update_ops))
def count(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the number of examples, or sum of `weights`.
When evaluating some metric (e.g. mean) on one or more subsets of the data,
this auxiliary metric is useful for keeping track of how many examples there
are in each subset.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions. Only it's shape is used.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
count: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the metric from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
with variable_scope.variable_scope(name, 'count', (values, weights)):
count_ = metrics_impl.metric_variable([], dtypes.float32, name='count')
if weights is None:
num_values = math_ops.to_float(array_ops.size(values))
else:
_, _, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=values,
labels=None,
weights=weights)
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), values)
num_values = math_ops.reduce_sum(weights)
with ops.control_dependencies([values]):
update_op = state_ops.assign_add(count_, num_values)
if metrics_collections:
ops.add_to_collections(metrics_collections, count_)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return count_, update_op
def cohen_kappa(labels,
predictions_idx,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculates Cohen's kappa.
[Cohen's kappa](https://en.wikipedia.org/wiki/Cohen's_kappa) is a statistic
that measures inter-annotator agreement.
The `cohen_kappa` function calculates the confusion matrix, and creates three
local variables to compute the Cohen's kappa: `po`, `pe_row`, and `pe_col`,
which refer to the diagonal part, rows and columns totals of the confusion
matrix, respectively. This value is ultimately returned as `kappa`, an
idempotent operation that is calculated by
pe = (pe_row * pe_col) / N
k = (sum(po) - sum(pe)) / (N - sum(pe))
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`kappa`. `update_op` weights each prediction by the corresponding value in
`weights`.
Class labels are expected to start at 0. E.g., if `num_classes`
was three, then the possible labels would be [0, 1, 2].
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
NOTE: Equivalent to `sklearn.metrics.cohen_kappa_score`, but the method
doesn't support weighted matrix yet.
Args:
labels: 1-D `Tensor` of real labels for the classification task. Must be
one of the following types: int16, int32, int64.
predictions_idx: 1-D `Tensor` of predicted class indices for a given
classification. Must have the same type as `labels`.
num_classes: The possible number of labels.
weights: Optional `Tensor` whose shape matches `predictions`.
metrics_collections: An optional list of collections that `kappa` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
kappa: Scalar float `Tensor` representing the current Cohen's kappa.
update_op: `Operation` that increments `po`, `pe_row` and `pe_col`
variables appropriately and whose value matches `kappa`.
Raises:
ValueError: If `num_classes` is less than 2, or `predictions` and `labels`
have mismatched shapes, or if `weights` is not `None` and its shape
doesn't match `predictions`, or if either `metrics_collections` or
`updates_collections` are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.in_eager_mode():
raise RuntimeError('tf.contrib.metrics.cohen_kappa is not supported'
'when eager execution is enabled.')
if num_classes < 2:
raise ValueError('`num_classes` must be >= 2.'
'Found: {}'.format(num_classes))
with variable_scope.variable_scope(name, 'cohen_kappa',
(labels, predictions_idx, weights)):
# Convert 2-dim (num, 1) to 1-dim (num,)
labels.get_shape().with_rank_at_most(2)
if labels.get_shape().ndims == 2:
labels = array_ops.squeeze(labels, axis=[-1])
predictions_idx, labels, weights = (
metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=predictions_idx,
labels=labels,
weights=weights))
predictions_idx.get_shape().assert_is_compatible_with(labels.get_shape())
stat_dtype = (
dtypes.int64
if weights is None or weights.dtype.is_integer else dtypes.float32)
po = metrics_impl.metric_variable((num_classes,), stat_dtype, name='po')
pe_row = metrics_impl.metric_variable(
(num_classes,), stat_dtype, name='pe_row')
pe_col = metrics_impl.metric_variable(
(num_classes,), stat_dtype, name='pe_col')
# Table of the counts of agreement:
counts_in_table = confusion_matrix.confusion_matrix(
labels,
predictions_idx,
num_classes=num_classes,
weights=weights,
dtype=stat_dtype,
name='counts_in_table')
po_t = array_ops.diag_part(counts_in_table)
pe_row_t = math_ops.reduce_sum(counts_in_table, axis=0)
pe_col_t = math_ops.reduce_sum(counts_in_table, axis=1)
update_po = state_ops.assign_add(po, po_t)
update_pe_row = state_ops.assign_add(pe_row, pe_row_t)
update_pe_col = state_ops.assign_add(pe_col, pe_col_t)
def _calculate_k(po, pe_row, pe_col, name):
po_sum = math_ops.reduce_sum(po)
total = math_ops.reduce_sum(pe_row)
pe_sum = math_ops.reduce_sum(
metrics_impl._safe_div( # pylint: disable=protected-access
pe_row * pe_col, total, None))
po_sum, pe_sum, total = (math_ops.to_double(po_sum),
math_ops.to_double(pe_sum),
math_ops.to_double(total))
# kappa = (po - pe) / (N - pe)
k = metrics_impl._safe_scalar_div( # pylint: disable=protected-access
po_sum - pe_sum,
total - pe_sum,
name=name)
return k
kappa = _calculate_k(po, pe_row, pe_col, name='value')
update_op = _calculate_k(
update_po, update_pe_row, update_pe_col, name='update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, kappa)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return kappa, update_op
__all__ = [
'auc_with_confidence_intervals',
'aggregate_metric_map',
'aggregate_metrics',
'cohen_kappa',
'count',
'precision_recall_at_equal_thresholds',
'recall_at_precision',
'sparse_recall_at_top_k',
'streaming_accuracy',
'streaming_auc',
'streaming_curve_points',
'streaming_dynamic_auc',
'streaming_false_negative_rate',
'streaming_false_negative_rate_at_thresholds',
'streaming_false_negatives',
'streaming_false_negatives_at_thresholds',
'streaming_false_positive_rate',
'streaming_false_positive_rate_at_thresholds',
'streaming_false_positives',
'streaming_false_positives_at_thresholds',
'streaming_mean',
'streaming_mean_absolute_error',
'streaming_mean_cosine_distance',
'streaming_mean_iou',
'streaming_mean_relative_error',
'streaming_mean_squared_error',
'streaming_mean_tensor',
'streaming_percentage_less',
'streaming_precision',
'streaming_precision_at_thresholds',
'streaming_recall',
'streaming_recall_at_k',
'streaming_recall_at_thresholds',
'streaming_root_mean_squared_error',
'streaming_sensitivity_at_specificity',
'streaming_sparse_average_precision_at_k',
'streaming_sparse_average_precision_at_top_k',
'streaming_sparse_precision_at_k',
'streaming_sparse_precision_at_top_k',
'streaming_sparse_recall_at_k',
'streaming_specificity_at_sensitivity',
'streaming_true_negatives',
'streaming_true_negatives_at_thresholds',
'streaming_true_positives',
'streaming_true_positives_at_thresholds',
]
| apache-2.0 |
eegroopm/pyLATTICE | resources/Dialogs.py | 1 | 4713 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 21 10:36:10 2014
@author: eegroopm
"""
from PyQt4.QtGui import QDialog, QListWidgetItem
from PyQt4.QtCore import Qt
import sys
sys.path.append("..") #import from parent directory
import gui
###############################################################################
## Various Input Dialogs ##
###############################################################################
class SettingsDialog(QDialog):
def __init__(self,current_settings,parent=None):
QDialog.__init__(self,parent)
gui.loadUi(__file__,self)
c = current_settings
self.maxa.setValue(c['a max'])
self.maxb.setValue(c['b max'])
self.maxc.setValue(c['c max'])
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
class MineralListDialog(QDialog):
def __init__(self,parent=None):
QDialog.__init__(self,parent)
gui.loadUi(__file__,self)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
class NewMineralDialog(QDialog):
def __init__(self,parent=None):
QDialog.__init__(self,parent)
gui.loadUi(__file__,self)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
class ManualConditionsDialog(QDialog):
"""Allows for the user to input special reflection conditions manually.
This is a simple interface where the user can choose if:then conditions for
combinations of h,k,l. The results are then converted to python/pandas syntax
from where they can be parsed by pylattice"""
def __init__(self,conditions= [], parent= None):
QDialog.__init__(self,parent)
gui.loadUi(__file__,self)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.manualConds = conditions
#self.manualCondList.addItems(self.manualConds)
for c in self.manualConds:
self.newItem(c)
self.Disable(True)
self.signals_slots()
def newItem(self,text):
"""Make a new item for the item list"""
item = QListWidgetItem(text)
item.setFlags(Qt.ItemIsEditable | Qt.ItemIsEnabled | Qt.ItemIsSelectable)
self.manualCondList.addItem(item)
def Disable(self,bool):
#set items disabled to start
for item in [self.IF2, self.IF3,self.ifVal1,self.ifVal2,self.ifVal3,
self.label_3,self.label_4,self.label_5,self.THEN2,self.THEN3,
self.pm1,self.pm2,self.ifAND1,self.ifAND2]:
item.setDisabled(bool)
def signals_slots(self):
self.addCond.clicked.connect(self.parse)
self.IF1.currentIndexChanged.connect(self.toggleON)
self.deleteButton.clicked.connect(self.deleteCond)
def deleteCond(self):
"""remove selected conditions"""
for item in self.manualCondList.selectedItems():
ind = self.manualCondList.row(item)
d = self.manualCondList.takeItem(ind)
def toggleON(self):
"""Enabled/disables if options based upon first combo box choice."""
if self.IF1.currentIndex() == 0:
self.Disable(True)
else:
self.ifVal1.setEnabled(True)
self.label_3.setEnabled(True)
self.ifAND1.setEnabled(True)
self.ifAND2.setEnabled(True)
def parse(self):
"""Parses current options and converts them into python/pandas syntax"""
cond = 'if '
if1 = self.IF1.currentText()
ifVal1 = self.ifVal1.value()
ifand1 = self.ifAND1.isChecked()
ifand2 = self.ifAND2.isChecked()
#Parse 'if' statements
if if1 == 'True':
cond += (if1 + ': ')
else:
cond += '(%s == %i)' % (if1,ifVal1)
if ifand1:
cond += ' & (%s == %i)' % (self.IF2.currentText(),self.ifVal2.value())
if ifand2:
cond += ' & (%s == %i)' % (self.IF3.currentText(),self.ifVal3.value())
cond += ': '
then1 = self.THEN1.currentText()
N = self.THENN.value()
cond += '(%s' % then1
if self.thenAND1.isChecked():
cond += ' %s %s' % (self.pm1.currentText(),self.THEN2.currentText())
if self.thenAND2.isChecked():
cond += ' %s %s' % (self.pm2.currentText(),self.THEN3.currentText())
cond += (')%' + '%i == 0' % N)
self.newItem(cond)
| gpl-2.0 |
untom/scikit-learn | examples/classification/plot_classifier_comparison.py | 181 | 4699 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.lda import LDA
from sklearn.qda import QDA
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree",
"Random Forest", "AdaBoost", "Naive Bayes", "LDA", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(),
GaussianNB(),
LDA(),
QDA()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds in datasets:
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
| bsd-3-clause |
coolblaze03/WSNNS3Port | src/flow-monitor/examples/wifi-olsr-flowmon.py | 1 | 7376 | # -*- Mode: Python; -*-
# Copyright (c) 2009 INESC Porto
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors: Gustavo Carneiro <gjc@inescporto.pt>
import sys
import ns.core
import ns.flow_monitor
import ns.internet
import ns.mobility
import ns.network
import ns.olsr
import ns.wifi
import ns.visualizer
import ns.applications
DISTANCE = 100 # (m)
NUM_NODES_SIDE = 3
def main(argv):
cmd = ns.core.CommandLine()
cmd.NumNodesSide = None
cmd.AddValue("NumNodesSide", "Grid side number of nodes (total number of nodes will be this number squared)")
cmd.Results = None
cmd.AddValue("Results", "Write XML results to file")
cmd.Plot = None
cmd.AddValue("Plot", "Plot the results using the matplotlib python module")
cmd.Parse(argv)
wifi = ns.wifi.WifiHelper.Default()
wifiMac = ns.wifi.NqosWifiMacHelper.Default()
wifiPhy = ns.wifi.YansWifiPhyHelper.Default()
wifiChannel = ns.wifi.YansWifiChannelHelper.Default()
wifiPhy.SetChannel(wifiChannel.Create())
ssid = ns.wifi.Ssid("wifi-default")
wifi.SetRemoteStationManager("ns3::ArfWifiManager")
wifiMac.SetType ("ns3::AdhocWifiMac",
"Ssid", ns.wifi.SsidValue(ssid))
internet = ns.internet.InternetStackHelper()
list_routing = ns.internet.Ipv4ListRoutingHelper()
olsr_routing = ns.olsr.OlsrHelper()
static_routing = ns.internet.Ipv4StaticRoutingHelper()
list_routing.Add(static_routing, 0)
list_routing.Add(olsr_routing, 100)
internet.SetRoutingHelper(list_routing)
ipv4Addresses = ns.internet.Ipv4AddressHelper()
ipv4Addresses.SetBase(ns.network.Ipv4Address("10.0.0.0"), ns.network.Ipv4Mask("255.255.255.0"))
port = 9 # Discard port(RFC 863)
onOffHelper = ns.applications.OnOffHelper("ns3::UdpSocketFactory",
ns.network.Address(ns.network.InetSocketAddress(ns.network.Ipv4Address("10.0.0.1"), port)))
onOffHelper.SetAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate("100kbps")))
onOffHelper.SetAttribute("OnTime", ns.core.RandomVariableValue(ns.core.ConstantVariable(1)))
onOffHelper.SetAttribute("OffTime", ns.core.RandomVariableValue(ns.core.ConstantVariable(0)))
addresses = []
nodes = []
if cmd.NumNodesSide is None:
num_nodes_side = NUM_NODES_SIDE
else:
num_nodes_side = int(cmd.NumNodesSide)
for xi in range(num_nodes_side):
for yi in range(num_nodes_side):
node = ns.network.Node()
nodes.append(node)
internet.Install(ns.network.NodeContainer(node))
mobility = ns.mobility.ConstantPositionMobilityModel()
mobility.SetPosition(ns.core.Vector(xi*DISTANCE, yi*DISTANCE, 0))
node.AggregateObject(mobility)
devices = wifi.Install(wifiPhy, wifiMac, node)
ipv4_interfaces = ipv4Addresses.Assign(devices)
addresses.append(ipv4_interfaces.GetAddress(0))
for i, node in enumerate(nodes):
destaddr = addresses[(len(addresses) - 1 - i) % len(addresses)]
#print i, destaddr
onOffHelper.SetAttribute("Remote", ns.network.AddressValue(ns.network.InetSocketAddress(destaddr, port)))
app = onOffHelper.Install(ns.network.NodeContainer(node))
app.Start(ns.core.Seconds(ns.core.UniformVariable(20, 30).GetValue()))
#internet.EnablePcapAll("wifi-olsr")
flowmon_helper = ns.flow_monitor.FlowMonitorHelper()
#flowmon_helper.SetMonitorAttribute("StartTime", ns.core.TimeValue(ns.core.Seconds(31)))
monitor = flowmon_helper.InstallAll()
monitor = flowmon_helper.GetMonitor()
monitor.SetAttribute("DelayBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("JitterBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("PacketSizeBinWidth", ns.core.DoubleValue(20))
ns.core.Simulator.Stop(ns.core.Seconds(44.0))
ns.core.Simulator.Run()
def print_stats(os, st):
print >> os, " Tx Bytes: ", st.txBytes
print >> os, " Rx Bytes: ", st.rxBytes
print >> os, " Tx Packets: ", st.txPackets
print >> os, " Rx Packets: ", st.rxPackets
print >> os, " Lost Packets: ", st.lostPackets
if st.rxPackets > 0:
print >> os, " Mean{Delay}: ", (st.delaySum.GetSeconds() / st.rxPackets)
print >> os, " Mean{Jitter}: ", (st.jitterSum.GetSeconds() / (st.rxPackets-1))
print >> os, " Mean{Hop Count}: ", float(st.timesForwarded) / st.rxPackets + 1
if 0:
print >> os, "Delay Histogram"
for i in range(st.delayHistogram.GetNBins () ):
print >> os, " ",i,"(", st.delayHistogram.GetBinStart (i), "-", \
st.delayHistogram.GetBinEnd (i), "): ", st.delayHistogram.GetBinCount (i)
print >> os, "Jitter Histogram"
for i in range(st.jitterHistogram.GetNBins () ):
print >> os, " ",i,"(", st.jitterHistogram.GetBinStart (i), "-", \
st.jitterHistogram.GetBinEnd (i), "): ", st.jitterHistogram.GetBinCount (i)
print >> os, "PacketSize Histogram"
for i in range(st.packetSizeHistogram.GetNBins () ):
print >> os, " ",i,"(", st.packetSizeHistogram.GetBinStart (i), "-", \
st.packetSizeHistogram.GetBinEnd (i), "): ", st.packetSizeHistogram.GetBinCount (i)
for reason, drops in enumerate(st.packetsDropped):
print " Packets dropped by reason %i: %i" % (reason, drops)
#for reason, drops in enumerate(st.bytesDropped):
# print "Bytes dropped by reason %i: %i" % (reason, drops)
monitor.CheckForLostPackets()
classifier = flowmon_helper.GetClassifier()
if cmd.Results is None:
for flow_id, flow_stats in monitor.GetFlowStats():
t = classifier.FindFlow(flow_id)
proto = {6: 'TCP', 17: 'UDP'} [t.protocol]
print "FlowID: %i (%s %s/%s --> %s/%i)" % \
(flow_id, proto, t.sourceAddress, t.sourcePort, t.destinationAddress, t.destinationPort)
print_stats(sys.stdout, flow_stats)
else:
print monitor.SerializeToXmlFile(cmd.Results, True, True)
if cmd.Plot is not None:
import pylab
delays = []
for flow_id, flow_stats in monitor.GetFlowStats():
tupl = classifier.FindFlow(flow_id)
if tupl.protocol == 17 and tupl.sourcePort == 698:
continue
delays.append(flow_stats.delaySum.GetSeconds() / flow_stats.rxPackets)
pylab.hist(delays, 20)
pylab.xlabel("Delay (s)")
pylab.ylabel("Number of Flows")
pylab.show()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| gpl-2.0 |
drammock/mne-python | mne/utils/dataframe.py | 14 | 3236 | # -*- coding: utf-8 -*-
"""inst.to_data_frame() helper functions."""
# Authors: Daniel McCloy <dan@mccloy.info>
#
# License: BSD (3-clause)
import numpy as np
from ._logging import logger
from ..defaults import _handle_default
def _set_pandas_dtype(df, columns, dtype):
"""Try to set the right columns to dtype."""
for column in columns:
df[column] = df[column].astype(dtype)
logger.info('Converting "%s" to "%s"...' % (column, dtype))
def _scale_dataframe_data(inst, data, picks, scalings):
ch_types = inst.get_channel_types()
ch_types_used = list()
scalings = _handle_default('scalings', scalings)
for tt in scalings.keys():
if tt in ch_types:
ch_types_used.append(tt)
for tt in ch_types_used:
scaling = scalings[tt]
idx = [ii for ii in range(len(picks)) if ch_types[ii] == tt]
if len(idx):
data[:, idx] *= scaling
return data
def _convert_times(inst, times, time_format):
"""Convert vector of time in seconds to ms, datetime, or timedelta."""
# private function; pandas already checked in calling function
from pandas import to_timedelta
if time_format == 'ms':
times = np.round(times * 1e3).astype(np.int64)
elif time_format == 'timedelta':
times = to_timedelta(times, unit='s')
elif time_format == 'datetime':
times = (to_timedelta(times + inst.first_time, unit='s') +
inst.info['meas_date'])
return times
def _build_data_frame(inst, data, picks, long_format, mindex, index,
default_index, col_names=None, col_kind='channel'):
"""Build DataFrame from MNE-object-derived data array."""
# private function; pandas already checked in calling function
from pandas import DataFrame
from ..source_estimate import _BaseSourceEstimate
# build DataFrame
if col_names is None:
col_names = [inst.ch_names[p] for p in picks]
df = DataFrame(data, columns=col_names)
for i, (k, v) in enumerate(mindex):
df.insert(i, k, v)
# build Index
if long_format:
df.set_index(default_index, inplace=True)
df.columns.name = col_kind
elif index is not None:
df.set_index(index, inplace=True)
if set(index) == set(default_index):
df.columns.name = col_kind
# long format
if long_format:
df = df.stack().reset_index()
df.rename(columns={0: 'value'}, inplace=True)
# add column for channel types (as appropriate)
ch_map = (None if isinstance(inst, _BaseSourceEstimate) else
dict(zip(np.array(inst.ch_names)[picks],
np.array(inst.get_channel_types())[picks])))
if ch_map is not None:
col_index = len(df.columns) - 1
ch_type = df['channel'].map(ch_map)
df.insert(col_index, 'ch_type', ch_type)
# restore index
if index is not None:
df.set_index(index, inplace=True)
# convert channel/vertex/ch_type columns to factors
to_factor = [c for c in df.columns.tolist()
if c not in ('time', 'value')]
_set_pandas_dtype(df, to_factor, 'category')
return df
| bsd-3-clause |
dingocuster/scikit-learn | benchmarks/bench_plot_parallel_pairwise.py | 297 | 1247 | # Author: Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
import time
import pylab as pl
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
pl.figure('scikit-learn parallel %s benchmark results' % func.__name__)
pl.plot(sample_sizes, one_core, label="one core")
pl.plot(sample_sizes, multi_core, label="multi core")
pl.xlabel('n_samples')
pl.ylabel('Time (s)')
pl.title('Parallel %s' % func.__name__)
pl.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
pl.show()
| bsd-3-clause |
guangxingli/python-neo | examples/simple_plot_with_matplotlib.py | 7 | 1057 | # -*- coding: utf-8 -*-
"""
This is an example for plotting neo object with maplotlib.
"""
import urllib
import numpy as np
import quantities as pq
from matplotlib import pyplot
import neo
url = 'https://portal.g-node.org/neo/'
distantfile = url + 'neuroexplorer/File_neuroexplorer_2.nex'
localfile = 'File_neuroexplorer_2.nex'
urllib.urlretrieve(distantfile, localfile)
reader = neo.io.NeuroExplorerIO(filename='File_neuroexplorer_2.nex')
bl = reader.read(cascade=True, lazy=False)[0]
for seg in bl.segments:
fig = pyplot.figure()
ax1 = fig.add_subplot(2, 1, 1)
ax2 = fig.add_subplot(2, 1, 2)
ax1.set_title(seg.file_origin)
mint = 0 * pq.s
maxt = np.inf * pq.s
for i, asig in enumerate(seg.analogsignals):
times = asig.times.rescale('s').magnitude
asig = asig.rescale('mV').magnitude
ax1.plot(times, asig)
trains = [st.rescale('s').magnitude for st in seg.spiketrains]
colors = pyplot.cm.jet(np.linspace(0, 1, len(seg.spiketrains)))
ax2.eventplot(trains, colors=colors)
pyplot.show()
| bsd-3-clause |
crichardson17/starburst_atlas | Low_resolution_sims/Dusty_LowRes/Geneva_cont_Rot/Geneva_cont_Rot_2/fullgrid/Rest.py | 30 | 9192 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith("1.grd"):
gridfile1 = file
for file in os.listdir('.'):
if file.endswith("2.grd"):
gridfile2 = file
for file in os.listdir('.'):
if file.endswith("3.grd"):
gridfile3 = file
# ------------------------
for file in os.listdir('.'):
if file.endswith("1.txt"):
Elines1 = file
for file in os.listdir('.'):
if file.endswith("2.txt"):
Elines2 = file
for file in os.listdir('.'):
if file.endswith("3.txt"):
Elines3 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid1 = [];
grid2 = [];
grid3 = [];
with open(gridfile1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid1.append(row);
grid1 = asarray(grid1)
with open(gridfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid2.append(row);
grid2 = asarray(grid2)
with open(gridfile3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid3.append(row);
grid3 = asarray(grid3)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines1 = [];
dataEmissionlines2 = [];
dataEmissionlines3 = [];
with open(Elines1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines1.append(row);
dataEmissionlines1 = asarray(dataEmissionlines1)
with open(Elines2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers2 = csvReader.next()
for row in csvReader:
dataEmissionlines2.append(row);
dataEmissionlines2 = asarray(dataEmissionlines2)
with open(Elines3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers3 = csvReader.next()
for row in csvReader:
dataEmissionlines3.append(row);
dataEmissionlines3 = asarray(dataEmissionlines3)
print "import files complete"
# ---------------------------------------------------
#for concatenating grid
#pull the phi and hdens values from each of the runs. exclude header lines
grid1new = zeros((len(grid1[:,0])-1,2))
grid1new[:,0] = grid1[1:,6]
grid1new[:,1] = grid1[1:,7]
grid2new = zeros((len(grid2[:,0])-1,2))
x = array(17.00000)
grid2new[:,0] = repeat(x,len(grid2[:,0])-1)
grid2new[:,1] = grid2[1:,6]
grid3new = zeros((len(grid3[:,0])-1,2))
grid3new[:,0] = grid3[1:,6]
grid3new[:,1] = grid3[1:,7]
grid = concatenate((grid1new,grid2new,grid3new))
hdens_values = grid[:,1]
phi_values = grid[:,0]
# ---------------------------------------------------
#for concatenating Emission lines data
Emissionlines = concatenate((dataEmissionlines1[:,1:],dataEmissionlines2[:,1:],dataEmissionlines3[:,1:]))
#for lines
headers = headers[1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
#change desired lines here!
line = [3,4,15,22,37,53,54,55,57,62,77,88,89,90,92,93]
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Dusty Rest of the Lines", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('Dusty_Rest.pdf')
plt.clf()
print "figure saved"
| gpl-2.0 |
faturita/ShinkeyBot | NeoCortex/TelemetryAnalysis.py | 1 | 2558 | #coding: latin-1
#
# STEM - Blinking Counter
# Este programa es un ejemplo de utilizacion de python para implementar un simple
# contador de penstaneos basados en una senal de EMG/EMG/EOG.
#
# Frecuencia de sampleo Fs = 128
import csv
import numpy as np
import scipy.signal as signal
def moving_average(a, n=3) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def corrections(a,n=300):
cumlist = []
for source in range(1, len(a)-n,n):
cum = 0
for i in range(source, source+n):
if (a[i-1] + 1==a[i]) or (a[i-1] == 254 and a[i]==0):
pass
else:
cum = cum + 1
cumlist.append(cum)
return cumlist
results = []
print 'Este programa tiene que ejecutarse con python 2.7!'
# Esta primera linea, abre el archivo 'blinking.dat' que se grabó
# al establecerse la conexión con el servidor.
with open('sensor1.dat') as inputfile:
for row in csv.reader(inputfile):
rows = row[0].split(' ')
results.append(rows[0:])
# Convert the file into numpy array of ints.
results = np.asarray(results)
results = results.astype(float)
# Strip from the signal anything you want
# La primer columna corresponde a el largo del archivo a considerar
# en relación a las muestras (1:100 serian las muestras) representante
# del tiempo.
# La segunda columna, corresponde a: eeg, attention y meditation.
#data = results[0:,0
data = results
mylist = data[0:,2]
N = 300
cumsum, moving_aves = [0], []
for i, x in enumerate(mylist, 1):
cumsum.append(cumsum[i-1] + x)
if i>=N:
moving_ave = (cumsum[i] - cumsum[i-N])/N
#can do stuff with moving_ave here
moving_aves.append(moving_ave)
fps = moving_average(data[0:,1], n=5000)
voltages = moving_average(data[0:,2], n=300)
current = moving_average(data[0:,3],n=300)
freq = moving_average(data[0:,4], n=5000)
s = corrections(data[0:,5],n=5000)
import matplotlib.pyplot as plt
fig = plt.figure()
ax1 = fig.add_subplot(311)
ax2 = fig.add_subplot(312)
ax3 = fig.add_subplot(313)
ax2.plot(freq,'r', label='Fps')
#ax1.plot(data[0:,2],'g', label='V')
ax3.plot(current,'b', label='A')
ax1.plot(voltages,'g', label='V')
#ax1.plot(data[0:,4],'y', label='A')
plt.legend(loc='upper left');
plt.show()
fig = plt.figure()
ax1 = fig.add_subplot(311)
ax2 = fig.add_subplot(312)
ax3 = fig.add_subplot(313)
ax2.plot(freq,'r', label='Freq')
ax1.plot(fps,'y', label='Fps')
ax3.plot(s, 'b', label='Errors')
plt.legend(loc='upper left');
plt.show()
| mit |
Petr-Kovalev/nupic-win32 | external/linux32/lib/python2.6/site-packages/matplotlib/cbook.py | 69 | 42525 | """
A collection of utility functions and classes. Many (but not all)
from the Python Cookbook -- hence the name cbook
"""
from __future__ import generators
import re, os, errno, sys, StringIO, traceback, locale, threading, types
import time, datetime
import warnings
import numpy as np
import numpy.ma as ma
from weakref import ref
major, minor1, minor2, s, tmp = sys.version_info
# on some systems, locale.getpreferredencoding returns None, which can break unicode
preferredencoding = locale.getpreferredencoding()
def unicode_safe(s):
if preferredencoding is None: return unicode(s)
else: return unicode(s, preferredencoding)
class converter:
"""
Base class for handling string -> python type with support for
missing values
"""
def __init__(self, missing='Null', missingval=None):
self.missing = missing
self.missingval = missingval
def __call__(self, s):
if s==self.missing: return self.missingval
return s
def is_missing(self, s):
return not s.strip() or s==self.missing
class tostr(converter):
'convert to string or None'
def __init__(self, missing='Null', missingval=''):
converter.__init__(self, missing=missing, missingval=missingval)
class todatetime(converter):
'convert to a datetime or None'
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
'use a :func:`time.strptime` format string for conversion'
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s): return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.datetime(*tup[:6])
class todate(converter):
'convert to a date or None'
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
'use a :func:`time.strptime` format string for conversion'
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s): return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.date(*tup[:3])
class tofloat(converter):
'convert to a float or None'
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
self.missingval = missingval
def __call__(self, s):
if self.is_missing(s): return self.missingval
return float(s)
class toint(converter):
'convert to an int or None'
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
def __call__(self, s):
if self.is_missing(s): return self.missingval
return int(s)
class CallbackRegistry:
"""
Handle registering and disconnecting for a set of signals and
callbacks::
signals = 'eat', 'drink', 'be merry'
def oneat(x):
print 'eat', x
def ondrink(x):
print 'drink', x
callbacks = CallbackRegistry(signals)
ideat = callbacks.connect('eat', oneat)
iddrink = callbacks.connect('drink', ondrink)
#tmp = callbacks.connect('drunk', ondrink) # this will raise a ValueError
callbacks.process('drink', 123) # will call oneat
callbacks.process('eat', 456) # will call ondrink
callbacks.process('be merry', 456) # nothing will be called
callbacks.disconnect(ideat) # disconnect oneat
callbacks.process('eat', 456) # nothing will be called
"""
def __init__(self, signals):
'*signals* is a sequence of valid signals'
self.signals = set(signals)
# callbacks is a dict mapping the signal to a dictionary
# mapping callback id to the callback function
self.callbacks = dict([(s, dict()) for s in signals])
self._cid = 0
def _check_signal(self, s):
'make sure *s* is a valid signal or raise a ValueError'
if s not in self.signals:
signals = list(self.signals)
signals.sort()
raise ValueError('Unknown signal "%s"; valid signals are %s'%(s, signals))
def connect(self, s, func):
"""
register *func* to be called when a signal *s* is generated
func will be called
"""
self._check_signal(s)
self._cid +=1
self.callbacks[s][self._cid] = func
return self._cid
def disconnect(self, cid):
"""
disconnect the callback registered with callback id *cid*
"""
for eventname, callbackd in self.callbacks.items():
try: del callbackd[cid]
except KeyError: continue
else: return
def process(self, s, *args, **kwargs):
"""
process signal *s*. All of the functions registered to receive
callbacks on *s* will be called with *\*args* and *\*\*kwargs*
"""
self._check_signal(s)
for func in self.callbacks[s].values():
func(*args, **kwargs)
class Scheduler(threading.Thread):
"""
Base class for timeout and idle scheduling
"""
idlelock = threading.Lock()
id = 0
def __init__(self):
threading.Thread.__init__(self)
self.id = Scheduler.id
self._stopped = False
Scheduler.id += 1
self._stopevent = threading.Event()
def stop(self):
if self._stopped: return
self._stopevent.set()
self.join()
self._stopped = True
class Timeout(Scheduler):
"""
Schedule recurring events with a wait time in seconds
"""
def __init__(self, wait, func):
Scheduler.__init__(self)
self.wait = wait
self.func = func
def run(self):
while not self._stopevent.isSet():
self._stopevent.wait(self.wait)
Scheduler.idlelock.acquire()
b = self.func(self)
Scheduler.idlelock.release()
if not b: break
class Idle(Scheduler):
"""
Schedule callbacks when scheduler is idle
"""
# the prototype impl is a bit of a poor man's idle handler. It
# just implements a short wait time. But it will provide a
# placeholder for a proper impl ater
waittime = 0.05
def __init__(self, func):
Scheduler.__init__(self)
self.func = func
def run(self):
while not self._stopevent.isSet():
self._stopevent.wait(Idle.waittime)
Scheduler.idlelock.acquire()
b = self.func(self)
Scheduler.idlelock.release()
if not b: break
class silent_list(list):
"""
override repr when returning a list of matplotlib artists to
prevent long, meaningless output. This is meant to be used for a
homogeneous list of a give type
"""
def __init__(self, type, seq=None):
self.type = type
if seq is not None: self.extend(seq)
def __repr__(self):
return '<a list of %d %s objects>' % (len(self), self.type)
def __str__(self):
return '<a list of %d %s objects>' % (len(self), self.type)
def strip_math(s):
'remove latex formatting from mathtext'
remove = (r'\mathdefault', r'\rm', r'\cal', r'\tt', r'\it', '\\', '{', '}')
s = s[1:-1]
for r in remove: s = s.replace(r,'')
return s
class Bunch:
"""
Often we want to just collect a bunch of stuff together, naming each
item of the bunch; a dictionary's OK for that, but a small do- nothing
class is even handier, and prettier to use. Whenever you want to
group a few variables:
>>> point = Bunch(datum=2, squared=4, coord=12)
>>> point.datum
By: Alex Martelli
From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52308
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
def unique(x):
'Return a list of unique elements of *x*'
return dict([ (val, 1) for val in x]).keys()
def iterable(obj):
'return true if *obj* is iterable'
try: len(obj)
except: return False
return True
def is_string_like(obj):
'Return True if *obj* looks like a string'
if isinstance(obj, (str, unicode)): return True
# numpy strings are subclass of str, ma strings are not
if ma.isMaskedArray(obj):
if obj.ndim == 0 and obj.dtype.kind in 'SU':
return True
else:
return False
try: obj + ''
except (TypeError, ValueError): return False
return True
def is_sequence_of_strings(obj):
"""
Returns true if *obj* is iterable and contains strings
"""
if not iterable(obj): return False
if is_string_like(obj): return False
for o in obj:
if not is_string_like(o): return False
return True
def is_writable_file_like(obj):
'return true if *obj* looks like a file object with a *write* method'
return hasattr(obj, 'write') and callable(obj.write)
def is_scalar(obj):
'return true if *obj* is not string like and is not iterable'
return not is_string_like(obj) and not iterable(obj)
def is_numlike(obj):
'return true if *obj* looks like a number'
try: obj+1
except TypeError: return False
else: return True
def to_filehandle(fname, flag='r', return_opened=False):
"""
*fname* can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in .gz. *flag* is a
read/write flag for :func:`file`
"""
if is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, flag)
else:
fh = file(fname, flag)
opened = True
elif hasattr(fname, 'seek'):
fh = fname
opened = False
else:
raise ValueError('fname must be a string or file handle')
if return_opened:
return fh, opened
return fh
def is_scalar_or_string(val):
return is_string_like(val) or not iterable(val)
def flatten(seq, scalarp=is_scalar_or_string):
"""
this generator flattens nested containers such as
>>> l=( ('John', 'Hunter'), (1,23), [[[[42,(5,23)]]]])
so that
>>> for i in flatten(l): print i,
John Hunter 1 23 42 5 23
By: Composite of Holger Krekel and Luther Blissett
From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/121294
and Recipe 1.12 in cookbook
"""
for item in seq:
if scalarp(item): yield item
else:
for subitem in flatten(item, scalarp):
yield subitem
class Sorter:
"""
Sort by attribute or item
Example usage::
sort = Sorter()
list = [(1, 2), (4, 8), (0, 3)]
dict = [{'a': 3, 'b': 4}, {'a': 5, 'b': 2}, {'a': 0, 'b': 0},
{'a': 9, 'b': 9}]
sort(list) # default sort
sort(list, 1) # sort by index 1
sort(dict, 'a') # sort a list of dicts by key 'a'
"""
def _helper(self, data, aux, inplace):
aux.sort()
result = [data[i] for junk, i in aux]
if inplace: data[:] = result
return result
def byItem(self, data, itemindex=None, inplace=1):
if itemindex is None:
if inplace:
data.sort()
result = data
else:
result = data[:]
result.sort()
return result
else:
aux = [(data[i][itemindex], i) for i in range(len(data))]
return self._helper(data, aux, inplace)
def byAttribute(self, data, attributename, inplace=1):
aux = [(getattr(data[i],attributename),i) for i in range(len(data))]
return self._helper(data, aux, inplace)
# a couple of handy synonyms
sort = byItem
__call__ = byItem
class Xlator(dict):
"""
All-in-one multiple-string-substitution class
Example usage::
text = "Larry Wall is the creator of Perl"
adict = {
"Larry Wall" : "Guido van Rossum",
"creator" : "Benevolent Dictator for Life",
"Perl" : "Python",
}
print multiple_replace(adict, text)
xlat = Xlator(adict)
print xlat.xlat(text)
"""
def _make_regex(self):
""" Build re object based on the keys of the current dictionary """
return re.compile("|".join(map(re.escape, self.keys())))
def __call__(self, match):
""" Handler invoked for each regex *match* """
return self[match.group(0)]
def xlat(self, text):
""" Translate *text*, returns the modified text. """
return self._make_regex().sub(self, text)
def soundex(name, len=4):
""" soundex module conforming to Odell-Russell algorithm """
# digits holds the soundex values for the alphabet
soundex_digits = '01230120022455012623010202'
sndx = ''
fc = ''
# Translate letters in name to soundex digits
for c in name.upper():
if c.isalpha():
if not fc: fc = c # Remember first letter
d = soundex_digits[ord(c)-ord('A')]
# Duplicate consecutive soundex digits are skipped
if not sndx or (d != sndx[-1]):
sndx += d
# Replace first digit with first letter
sndx = fc + sndx[1:]
# Remove all 0s from the soundex code
sndx = sndx.replace('0', '')
# Return soundex code truncated or 0-padded to len characters
return (sndx + (len * '0'))[:len]
class Null:
""" Null objects always and reliably "do nothing." """
def __init__(self, *args, **kwargs): pass
def __call__(self, *args, **kwargs): return self
def __str__(self): return "Null()"
def __repr__(self): return "Null()"
def __nonzero__(self): return 0
def __getattr__(self, name): return self
def __setattr__(self, name, value): return self
def __delattr__(self, name): return self
def mkdirs(newdir, mode=0777):
"""
make directory *newdir* recursively, and set *mode*. Equivalent to ::
> mkdir -p NEWDIR
> chmod MODE NEWDIR
"""
try:
if not os.path.exists(newdir):
parts = os.path.split(newdir)
for i in range(1, len(parts)+1):
thispart = os.path.join(*parts[:i])
if not os.path.exists(thispart):
os.makedirs(thispart, mode)
except OSError, err:
# Reraise the error unless it's about an already existing directory
if err.errno != errno.EEXIST or not os.path.isdir(newdir):
raise
class GetRealpathAndStat:
def __init__(self):
self._cache = {}
def __call__(self, path):
result = self._cache.get(path)
if result is None:
realpath = os.path.realpath(path)
if sys.platform == 'win32':
stat_key = realpath
else:
stat = os.stat(realpath)
stat_key = (stat.st_ino, stat.st_dev)
result = realpath, stat_key
self._cache[path] = result
return result
get_realpath_and_stat = GetRealpathAndStat()
def dict_delall(d, keys):
'delete all of the *keys* from the :class:`dict` *d*'
for key in keys:
try: del d[key]
except KeyError: pass
class RingBuffer:
""" class that implements a not-yet-full buffer """
def __init__(self,size_max):
self.max = size_max
self.data = []
class __Full:
""" class that implements a full buffer """
def append(self, x):
""" Append an element overwriting the oldest one. """
self.data[self.cur] = x
self.cur = (self.cur+1) % self.max
def get(self):
""" return list of elements in correct order """
return self.data[self.cur:]+self.data[:self.cur]
def append(self,x):
"""append an element at the end of the buffer"""
self.data.append(x)
if len(self.data) == self.max:
self.cur = 0
# Permanently change self's class from non-full to full
self.__class__ = __Full
def get(self):
""" Return a list of elements from the oldest to the newest. """
return self.data
def __get_item__(self, i):
return self.data[i % len(self.data)]
def get_split_ind(seq, N):
"""
*seq* is a list of words. Return the index into seq such that::
len(' '.join(seq[:ind])<=N
"""
sLen = 0
# todo: use Alex's xrange pattern from the cbook for efficiency
for (word, ind) in zip(seq, range(len(seq))):
sLen += len(word) + 1 # +1 to account for the len(' ')
if sLen>=N: return ind
return len(seq)
def wrap(prefix, text, cols):
'wrap *text* with *prefix* at length *cols*'
pad = ' '*len(prefix.expandtabs())
available = cols - len(pad)
seq = text.split(' ')
Nseq = len(seq)
ind = 0
lines = []
while ind<Nseq:
lastInd = ind
ind += get_split_ind(seq[ind:], available)
lines.append(seq[lastInd:ind])
# add the prefix to the first line, pad with spaces otherwise
ret = prefix + ' '.join(lines[0]) + '\n'
for line in lines[1:]:
ret += pad + ' '.join(line) + '\n'
return ret
# A regular expression used to determine the amount of space to
# remove. It looks for the first sequence of spaces immediately
# following the first newline, or at the beginning of the string.
_find_dedent_regex = re.compile("(?:(?:\n\r?)|^)( *)\S")
# A cache to hold the regexs that actually remove the indent.
_dedent_regex = {}
def dedent(s):
"""
Remove excess indentation from docstring *s*.
Discards any leading blank lines, then removes up to n whitespace
characters from each line, where n is the number of leading
whitespace characters in the first line. It differs from
textwrap.dedent in its deletion of leading blank lines and its use
of the first non-blank line to determine the indentation.
It is also faster in most cases.
"""
# This implementation has a somewhat obtuse use of regular
# expressions. However, this function accounted for almost 30% of
# matplotlib startup time, so it is worthy of optimization at all
# costs.
if not s: # includes case of s is None
return ''
match = _find_dedent_regex.match(s)
if match is None:
return s
# This is the number of spaces to remove from the left-hand side.
nshift = match.end(1) - match.start(1)
if nshift == 0:
return s
# Get a regex that will remove *up to* nshift spaces from the
# beginning of each line. If it isn't in the cache, generate it.
unindent = _dedent_regex.get(nshift, None)
if unindent is None:
unindent = re.compile("\n\r? {0,%d}" % nshift)
_dedent_regex[nshift] = unindent
result = unindent.sub("\n", s).strip()
return result
def listFiles(root, patterns='*', recurse=1, return_folders=0):
"""
Recursively list files
from Parmar and Martelli in the Python Cookbook
"""
import os.path, fnmatch
# Expand patterns from semicolon-separated string to list
pattern_list = patterns.split(';')
# Collect input and output arguments into one bunch
class Bunch:
def __init__(self, **kwds): self.__dict__.update(kwds)
arg = Bunch(recurse=recurse, pattern_list=pattern_list,
return_folders=return_folders, results=[])
def visit(arg, dirname, files):
# Append to arg.results all relevant files (and perhaps folders)
for name in files:
fullname = os.path.normpath(os.path.join(dirname, name))
if arg.return_folders or os.path.isfile(fullname):
for pattern in arg.pattern_list:
if fnmatch.fnmatch(name, pattern):
arg.results.append(fullname)
break
# Block recursion if recursion was disallowed
if not arg.recurse: files[:]=[]
os.path.walk(root, visit, arg)
return arg.results
def get_recursive_filelist(args):
"""
Recurs all the files and dirs in *args* ignoring symbolic links
and return the files as a list of strings
"""
files = []
for arg in args:
if os.path.isfile(arg):
files.append(arg)
continue
if os.path.isdir(arg):
newfiles = listFiles(arg, recurse=1, return_folders=1)
files.extend(newfiles)
return [f for f in files if not os.path.islink(f)]
def pieces(seq, num=2):
"Break up the *seq* into *num* tuples"
start = 0
while 1:
item = seq[start:start+num]
if not len(item): break
yield item
start += num
def exception_to_str(s = None):
sh = StringIO.StringIO()
if s is not None: print >>sh, s
traceback.print_exc(file=sh)
return sh.getvalue()
def allequal(seq):
"""
Return *True* if all elements of *seq* compare equal. If *seq* is
0 or 1 length, return *True*
"""
if len(seq)<2: return True
val = seq[0]
for i in xrange(1, len(seq)):
thisval = seq[i]
if thisval != val: return False
return True
def alltrue(seq):
"""
Return *True* if all elements of *seq* evaluate to *True*. If
*seq* is empty, return *False*.
"""
if not len(seq): return False
for val in seq:
if not val: return False
return True
def onetrue(seq):
"""
Return *True* if one element of *seq* is *True*. It *seq* is
empty, return *False*.
"""
if not len(seq): return False
for val in seq:
if val: return True
return False
def allpairs(x):
"""
return all possible pairs in sequence *x*
Condensed by Alex Martelli from this thread_ on c.l.python
.. _thread: http://groups.google.com/groups?q=all+pairs+group:*python*&hl=en&lr=&ie=UTF-8&selm=mailman.4028.1096403649.5135.python-list%40python.org&rnum=1
"""
return [ (s, f) for i, f in enumerate(x) for s in x[i+1:] ]
# python 2.2 dicts don't have pop--but we don't support 2.2 any more
def popd(d, *args):
"""
Should behave like python2.3 :meth:`dict.pop` method; *d* is a
:class:`dict`::
# returns value for key and deletes item; raises a KeyError if key
# is not in dict
val = popd(d, key)
# returns value for key if key exists, else default. Delete key,
# val item if it exists. Will not raise a KeyError
val = popd(d, key, default)
"""
warnings.warn("Use native python dict.pop method", DeprecationWarning)
# warning added 2008/07/22
if len(args)==1:
key = args[0]
val = d[key]
del d[key]
elif len(args)==2:
key, default = args
val = d.get(key, default)
try: del d[key]
except KeyError: pass
return val
class maxdict(dict):
"""
A dictionary with a maximum size; this doesn't override all the
relevant methods to contrain size, just setitem, so use with
caution
"""
def __init__(self, maxsize):
dict.__init__(self)
self.maxsize = maxsize
self._killkeys = []
def __setitem__(self, k, v):
if len(self)>=self.maxsize:
del self[self._killkeys[0]]
del self._killkeys[0]
dict.__setitem__(self, k, v)
self._killkeys.append(k)
class Stack:
"""
Implement a stack where elements can be pushed on and you can move
back and forth. But no pop. Should mimic home / back / forward
in a browser
"""
def __init__(self, default=None):
self.clear()
self._default = default
def __call__(self):
'return the current element, or None'
if not len(self._elements): return self._default
else: return self._elements[self._pos]
def forward(self):
'move the position forward and return the current element'
N = len(self._elements)
if self._pos<N-1: self._pos += 1
return self()
def back(self):
'move the position back and return the current element'
if self._pos>0: self._pos -= 1
return self()
def push(self, o):
"""
push object onto stack at current position - all elements
occurring later than the current position are discarded
"""
self._elements = self._elements[:self._pos+1]
self._elements.append(o)
self._pos = len(self._elements)-1
return self()
def home(self):
'push the first element onto the top of the stack'
if not len(self._elements): return
self.push(self._elements[0])
return self()
def empty(self):
return len(self._elements)==0
def clear(self):
'empty the stack'
self._pos = -1
self._elements = []
def bubble(self, o):
"""
raise *o* to the top of the stack and return *o*. *o* must be
in the stack
"""
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
bubbles = []
for thiso in old:
if thiso==o: bubbles.append(thiso)
else: self.push(thiso)
for thiso in bubbles:
self.push(o)
return o
def remove(self, o):
'remove element *o* from the stack'
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
for thiso in old:
if thiso==o: continue
else: self.push(thiso)
def popall(seq):
'empty a list'
for i in xrange(len(seq)): seq.pop()
def finddir(o, match, case=False):
"""
return all attributes of *o* which match string in match. if case
is True require an exact case match.
"""
if case:
names = [(name,name) for name in dir(o) if is_string_like(name)]
else:
names = [(name.lower(), name) for name in dir(o) if is_string_like(name)]
match = match.lower()
return [orig for name, orig in names if name.find(match)>=0]
def reverse_dict(d):
'reverse the dictionary -- may lose data if values are not unique!'
return dict([(v,k) for k,v in d.items()])
def report_memory(i=0): # argument may go away
'return the memory consumed by process'
pid = os.getpid()
if sys.platform=='sunos5':
a2 = os.popen('ps -p %d -o osz' % pid).readlines()
mem = int(a2[-1].strip())
elif sys.platform.startswith('linux'):
a2 = os.popen('ps -p %d -o rss,sz' % pid).readlines()
mem = int(a2[1].split()[1])
elif sys.platform.startswith('darwin'):
a2 = os.popen('ps -p %d -o rss,vsz' % pid).readlines()
mem = int(a2[1].split()[0])
return mem
_safezip_msg = 'In safezip, len(args[0])=%d but len(args[%d])=%d'
def safezip(*args):
'make sure *args* are equal len before zipping'
Nx = len(args[0])
for i, arg in enumerate(args[1:]):
if len(arg) != Nx:
raise ValueError(_safezip_msg % (Nx, i+1, len(arg)))
return zip(*args)
def issubclass_safe(x, klass):
'return issubclass(x, klass) and return False on a TypeError'
try:
return issubclass(x, klass)
except TypeError:
return False
class MemoryMonitor:
def __init__(self, nmax=20000):
self._nmax = nmax
self._mem = np.zeros((self._nmax,), np.int32)
self.clear()
def clear(self):
self._n = 0
self._overflow = False
def __call__(self):
mem = report_memory()
if self._n < self._nmax:
self._mem[self._n] = mem
self._n += 1
else:
self._overflow = True
return mem
def report(self, segments=4):
n = self._n
segments = min(n, segments)
dn = int(n/segments)
ii = range(0, n, dn)
ii[-1] = n-1
print
print 'memory report: i, mem, dmem, dmem/nloops'
print 0, self._mem[0]
for i in range(1, len(ii)):
di = ii[i] - ii[i-1]
if di == 0:
continue
dm = self._mem[ii[i]] - self._mem[ii[i-1]]
print '%5d %5d %3d %8.3f' % (ii[i], self._mem[ii[i]],
dm, dm / float(di))
if self._overflow:
print "Warning: array size was too small for the number of calls."
def xy(self, i0=0, isub=1):
x = np.arange(i0, self._n, isub)
return x, self._mem[i0:self._n:isub]
def plot(self, i0=0, isub=1, fig=None):
if fig is None:
from pylab import figure, show
fig = figure()
ax = fig.add_subplot(111)
ax.plot(*self.xy(i0, isub))
fig.canvas.draw()
def print_cycles(objects, outstream=sys.stdout, show_progress=False):
"""
*objects*
A list of objects to find cycles in. It is often useful to
pass in gc.garbage to find the cycles that are preventing some
objects from being garbage collected.
*outstream*
The stream for output.
*show_progress*
If True, print the number of objects reached as they are found.
"""
import gc
from types import FrameType
def print_path(path):
for i, step in enumerate(path):
# next "wraps around"
next = path[(i + 1) % len(path)]
outstream.write(" %s -- " % str(type(step)))
if isinstance(step, dict):
for key, val in step.items():
if val is next:
outstream.write("[%s]" % repr(key))
break
if key is next:
outstream.write("[key] = %s" % repr(val))
break
elif isinstance(step, list):
outstream.write("[%d]" % step.index(next))
elif isinstance(step, tuple):
outstream.write("( tuple )")
else:
outstream.write(repr(step))
outstream.write(" ->\n")
outstream.write("\n")
def recurse(obj, start, all, current_path):
if show_progress:
outstream.write("%d\r" % len(all))
all[id(obj)] = None
referents = gc.get_referents(obj)
for referent in referents:
# If we've found our way back to the start, this is
# a cycle, so print it out
if referent is start:
print_path(current_path)
# Don't go back through the original list of objects, or
# through temporary references to the object, since those
# are just an artifact of the cycle detector itself.
elif referent is objects or isinstance(referent, FrameType):
continue
# We haven't seen this object before, so recurse
elif id(referent) not in all:
recurse(referent, start, all, current_path + [obj])
for obj in objects:
outstream.write("Examining: %r\n" % (obj,))
recurse(obj, obj, { }, [])
class Grouper(object):
"""
This class provides a lightweight way to group arbitrary objects
together into disjoint sets when a full-blown graph data structure
would be overkill.
Objects can be joined using :meth:`join`, tested for connectedness
using :meth:`joined`, and all disjoint sets can be retreived by
using the object as an iterator.
The objects being joined must be hashable.
For example:
>>> g = grouper.Grouper()
>>> g.join('a', 'b')
>>> g.join('b', 'c')
>>> g.join('d', 'e')
>>> list(g)
[['a', 'b', 'c'], ['d', 'e']]
>>> g.joined('a', 'b')
True
>>> g.joined('a', 'c')
True
>>> g.joined('a', 'd')
False
"""
def __init__(self, init=[]):
mapping = self._mapping = {}
for x in init:
mapping[ref(x)] = [ref(x)]
def __contains__(self, item):
return ref(item) in self._mapping
def clean(self):
"""
Clean dead weak references from the dictionary
"""
mapping = self._mapping
for key, val in mapping.items():
if key() is None:
del mapping[key]
val.remove(key)
def join(self, a, *args):
"""
Join given arguments into the same set. Accepts one or more
arguments.
"""
mapping = self._mapping
set_a = mapping.setdefault(ref(a), [ref(a)])
for arg in args:
set_b = mapping.get(ref(arg))
if set_b is None:
set_a.append(ref(arg))
mapping[ref(arg)] = set_a
elif set_b is not set_a:
if len(set_b) > len(set_a):
set_a, set_b = set_b, set_a
set_a.extend(set_b)
for elem in set_b:
mapping[elem] = set_a
self.clean()
def joined(self, a, b):
"""
Returns True if *a* and *b* are members of the same set.
"""
self.clean()
mapping = self._mapping
try:
return mapping[ref(a)] is mapping[ref(b)]
except KeyError:
return False
def __iter__(self):
"""
Iterate over each of the disjoint sets as a list.
The iterator is invalid if interleaved with calls to join().
"""
self.clean()
class Token: pass
token = Token()
# Mark each group as we come across if by appending a token,
# and don't yield it twice
for group in self._mapping.itervalues():
if not group[-1] is token:
yield [x() for x in group]
group.append(token)
# Cleanup the tokens
for group in self._mapping.itervalues():
if group[-1] is token:
del group[-1]
def get_siblings(self, a):
"""
Returns all of the items joined with *a*, including itself.
"""
self.clean()
siblings = self._mapping.get(ref(a), [ref(a)])
return [x() for x in siblings]
def simple_linear_interpolation(a, steps):
steps = np.floor(steps)
new_length = ((len(a) - 1) * steps) + 1
new_shape = list(a.shape)
new_shape[0] = new_length
result = np.zeros(new_shape, a.dtype)
result[0] = a[0]
a0 = a[0:-1]
a1 = a[1: ]
delta = ((a1 - a0) / steps)
for i in range(1, int(steps)):
result[i::steps] = delta * i + a0
result[steps::steps] = a1
return result
def recursive_remove(path):
if os.path.isdir(path):
for fname in glob.glob(os.path.join(path, '*')) + glob.glob(os.path.join(path, '.*')):
if os.path.isdir(fname):
recursive_remove(fname)
os.removedirs(fname)
else:
os.remove(fname)
#os.removedirs(path)
else:
os.remove(path)
def delete_masked_points(*args):
"""
Find all masked and/or non-finite points in a set of arguments,
and return the arguments with only the unmasked points remaining.
Arguments can be in any of 5 categories:
1) 1-D masked arrays
2) 1-D ndarrays
3) ndarrays with more than one dimension
4) other non-string iterables
5) anything else
The first argument must be in one of the first four categories;
any argument with a length differing from that of the first
argument (and hence anything in category 5) then will be
passed through unchanged.
Masks are obtained from all arguments of the correct length
in categories 1, 2, and 4; a point is bad if masked in a masked
array or if it is a nan or inf. No attempt is made to
extract a mask from categories 2, 3, and 4 if :meth:`np.isfinite`
does not yield a Boolean array.
All input arguments that are not passed unchanged are returned
as ndarrays after removing the points or rows corresponding to
masks in any of the arguments.
A vastly simpler version of this function was originally
written as a helper for Axes.scatter().
"""
if not len(args):
return ()
if (is_string_like(args[0]) or not iterable(args[0])):
raise ValueError("First argument must be a sequence")
nrecs = len(args[0])
margs = []
seqlist = [False] * len(args)
for i, x in enumerate(args):
if (not is_string_like(x)) and iterable(x) and len(x) == nrecs:
seqlist[i] = True
if ma.isMA(x):
if x.ndim > 1:
raise ValueError("Masked arrays must be 1-D")
else:
x = np.asarray(x)
margs.append(x)
masks = [] # list of masks that are True where good
for i, x in enumerate(margs):
if seqlist[i]:
if x.ndim > 1:
continue # Don't try to get nan locations unless 1-D.
if ma.isMA(x):
masks.append(~ma.getmaskarray(x)) # invert the mask
xd = x.data
else:
xd = x
try:
mask = np.isfinite(xd)
if isinstance(mask, np.ndarray):
masks.append(mask)
except: #Fixme: put in tuple of possible exceptions?
pass
if len(masks):
mask = reduce(np.logical_and, masks)
igood = mask.nonzero()[0]
if len(igood) < nrecs:
for i, x in enumerate(margs):
if seqlist[i]:
margs[i] = x.take(igood, axis=0)
for i, x in enumerate(margs):
if seqlist[i] and ma.isMA(x):
margs[i] = x.filled()
return margs
def unmasked_index_ranges(mask, compressed = True):
'''
Find index ranges where *mask* is *False*.
*mask* will be flattened if it is not already 1-D.
Returns Nx2 :class:`numpy.ndarray` with each row the start and stop
indices for slices of the compressed :class:`numpy.ndarray`
corresponding to each of *N* uninterrupted runs of unmasked
values. If optional argument *compressed* is *False*, it returns
the start and stop indices into the original :class:`numpy.ndarray`,
not the compressed :class:`numpy.ndarray`. Returns *None* if there
are no unmasked values.
Example::
y = ma.array(np.arange(5), mask = [0,0,1,0,0])
ii = unmasked_index_ranges(ma.getmaskarray(y))
# returns array [[0,2,] [2,4,]]
y.compressed()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
ii = unmasked_index_ranges(ma.getmaskarray(y), compressed=False)
# returns array [[0, 2], [3, 5]]
y.filled()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
Prior to the transforms refactoring, this was used to support
masked arrays in Line2D.
'''
mask = mask.reshape(mask.size)
m = np.concatenate(((1,), mask, (1,)))
indices = np.arange(len(mask) + 1)
mdif = m[1:] - m[:-1]
i0 = np.compress(mdif == -1, indices)
i1 = np.compress(mdif == 1, indices)
assert len(i0) == len(i1)
if len(i1) == 0:
return None # Maybe this should be np.zeros((0,2), dtype=int)
if not compressed:
return np.concatenate((i0[:, np.newaxis], i1[:, np.newaxis]), axis=1)
seglengths = i1 - i0
breakpoints = np.cumsum(seglengths)
ic0 = np.concatenate(((0,), breakpoints[:-1]))
ic1 = breakpoints
return np.concatenate((ic0[:, np.newaxis], ic1[:, np.newaxis]), axis=1)
# a dict to cross-map linestyle arguments
_linestyles = [('-', 'solid'),
('--', 'dashed'),
('-.', 'dashdot'),
(':', 'dotted')]
ls_mapper = dict(_linestyles)
ls_mapper.update([(ls[1], ls[0]) for ls in _linestyles])
def less_simple_linear_interpolation( x, y, xi, extrap=False ):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('less_simple_linear_interpolation has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.less_simple_linear_interpolation( x, y, xi, extrap=extrap )
def isvector(X):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('isvector has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.isvector( x, y, xi, extrap=extrap )
def vector_lengths( X, P=2., axis=None ):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('vector_lengths has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.vector_lengths( X, P=2., axis=axis )
def distances_along_curve( X ):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('distances_along_curve has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.distances_along_curve( X )
def path_length(X):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('path_length has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.path_length(X)
def is_closed_polygon(X):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('is_closed_polygon has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.is_closed_polygon(X)
def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('quad2cubic has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y)
if __name__=='__main__':
assert( allequal([1,1,1]) )
assert(not allequal([1,1,0]) )
assert( allequal([]) )
assert( allequal(('a', 'a')))
assert( not allequal(('a', 'b')))
| gpl-3.0 |
eggie5/ipython-notebooks | college_trends/acs.py | 1 | 2358 | import pandas as pd
all_ages = pd.read_csv("all-ages.csv")
all_ages_totals = all_ages.pivot_table(index="Major_category", aggfunc="sum")["Total"]
all_ages_major_categories = dict(all_ages_totals)
print all_ages_totals
print "\n****\n"
recent_grads = pd.read_csv("recent-grads.csv")
recent_totals = recent_grads.pivot_table(index="Major_category", aggfunc="sum")["Total"]
recent_grads_major_categories = dict(recent_totals)
print recent_totals
#Use the Low_wage_jobs and Total columns to calculate the proportion of recent college graduates that worked low wage jobs. Store the resulting Float object of the calculation as low_wage_percent.
recent_grads = pd.read_csv("recent-grads.csv")
low_wage_percent = 0.0
low_wage_sum = float(recent_grads["Low_wage_jobs"].sum())
recent_sum = float(recent_grads["Employed"].sum())
low_wage_percent = low_wage_sum / recent_sum
print low_wage_percent
#Both `all_ages` and `recent_grads` datasets have 173 rows, corresponding to the 173 college major codes. This enables us to do some comparisons between the two datasets and perform some initial calculations to see how similar or different the statistics of recent college graduates are from those of the entire population.
#Instructions
#We want to know the number of majors where recent grads fare better than the overall population. For each major, determine if the Unemployment_rate is lower for `recent_grads` or for `all_ages` and increment either `recent_grads_lower_emp_count` or `all_ages_lower_emp_count` respectively.
# All majors, common to both DataFrames
majors = recent_grads['Major'].value_counts().index
recent_grads_lower_emp=[]
all_ages_lower_emp=[]
for major in majors:
recent_unemply_rate = recent_grads[recent_grads["Major"]==major]["Unemployment_rate"].values[0]
all_time_unemply_rate = all_ages[all_ages["Major"]==major]["Unemployment_rate"].values[0]
diff = recent_unemply_rate - all_time_unemply_rate #comparator
if diff < 0:
recent_grads_lower_emp.append(major)
elif diff >0:
all_ages_lower_emp.append(major)
else:
pass #equal
recent_grads_lower_emp_count = len(recent_grads_lower_emp)
all_ages_lower_emp_count = len(all_ages_lower_emp)
print recent_grads_lower_emp_count
print all_ages_lower_emp_count
print recent_grads_lower_emp
print all_ages_lower_emp
| mit |
Snazz2001/sklearn-pmml | sklearn_pmml/convert/test/test_gradientBoostingConverter.py | 3 | 2043 | from unittest import TestCase
from sklearn.ensemble import GradientBoostingClassifier
import numpy as np
from sklearn_pmml.convert import TransformationContext
from sklearn_pmml.convert.features import *
from sklearn_pmml.convert.gbrt import GradientBoostingConverter
class TestGradientBoostingClassifierConverter(TestCase):
def setUp(self):
np.random.seed(1)
self.est = GradientBoostingClassifier(max_depth=2, n_estimators=10)
self.est.fit([
[0, 0],
[0, 1],
[1, 0],
[1, 1],
], [0, 1, 1, 1])
self.ctx = TransformationContext(
input=[IntegerNumericFeature('x1'), StringCategoricalFeature('x2', ['zero', 'one'])],
derived=[],
model=[IntegerNumericFeature('x1'), StringCategoricalFeature('x2', ['zero', 'one'])],
output=[RealNumericFeature('output')]
)
self.converter = GradientBoostingConverter(
estimator=self.est,
context=self.ctx
)
def test_transform(self):
p = self.converter.pmml()
mm = p.MiningModel[0]
assert mm.MiningSchema is not None, 'Missing mining schema'
assert len(mm.MiningSchema.MiningField) == 3, 'Wrong number of mining fields'
assert mm.Segmentation is not None, 'Missing segmentation root'
def test_transform_with_verification(self):
p = self.converter.pmml([
{'x1': 0, 'x2': 'zero', 'output': self.est.predict_proba([[0, 0]])[0, 1]},
{'x1': 0, 'x2': 'one', 'output': self.est.predict_proba([[0, 1]])[0, 1]},
{'x1': 1, 'x2': 'zero', 'output': self.est.predict_proba([[1, 0]])[0, 1]},
{'x1': 1, 'x2': 'one', 'output': self.est.predict_proba([[1, 1]])[0, 1]},
])
mm = p.MiningModel[0]
assert mm.MiningSchema is not None, 'Missing mining schema'
assert len(mm.MiningSchema.MiningField) == 3, 'Wrong number of mining fields'
assert mm.Segmentation is not None, 'Missing segmentation root'
| mit |
DonBeo/scikit-learn | sklearn/neighbors/graph.py | 19 | 6904 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(
n_neighbors, metric=metric, p=p, metric_params=metric_params
).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(
radius=radius, metric=metric, p=p,
metric_params=metric_params
).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
fedspendingtransparency/data-act-broker-backend | dataactvalidator/scripts/load_program_activity.py | 1 | 10005 | import os
import logging
import io
import pandas as pd
import numpy as np
import boto3
import datetime
import sys
import json
import re
from dataactcore.config import CONFIG_BROKER
from dataactcore.interfaces.db import GlobalDB
from dataactcore.models.domainModels import ProgramActivity, ExternalDataLoadDate
from dataactcore.models.lookups import EXTERNAL_DATA_TYPE_DICT
from dataactvalidator.health_check import create_app
from dataactvalidator.scripts.loader_utils import clean_data, insert_dataframe
from dataactcore.utils.failure_threshold_exception import FailureThresholdExceededException
logger = logging.getLogger(__name__)
PA_BUCKET = CONFIG_BROKER['data_sources_bucket']
PA_SUB_KEY = 'OMB_Data/'
PA_FILE_NAME = 'DATA Act Program Activity List for Treas.csv'
VALID_HEADERS = {'AGENCY_CODE', 'ALLOCATION_ID', 'ACCOUNT_CODE', 'PA_CODE', 'PA_TITLE', 'FYQ'}
def get_program_activity_file(base_path):
""" Retrieves the program activity file to load
Args:
base_path: directory of domain config files
Returns:
the file path for the pa file either on S3 or locally
"""
if CONFIG_BROKER['use_aws']:
s3 = boto3.resource('s3', region_name=CONFIG_BROKER['aws_region'])
s3_object = s3.Object(PA_BUCKET, PA_SUB_KEY + PA_FILE_NAME)
response = s3_object.get(Key=(PA_SUB_KEY + PA_FILE_NAME))
pa_file = io.BytesIO(response['Body'].read())
else:
pa_file = os.path.join(base_path, PA_FILE_NAME)
return pa_file
def get_date_of_current_pa_upload(base_path):
""" Gets the last time the file was uploaded to S3, or alternatively the last time the local file was modified.
Args:
base_path: directory of domain config files
Returns:
DateTime object
"""
if CONFIG_BROKER['use_aws']:
last_uploaded = boto3.client('s3', region_name=CONFIG_BROKER['aws_region']). \
head_object(Bucket=PA_BUCKET, Key=PA_SUB_KEY + PA_FILE_NAME)['LastModified']
# LastModified is coming back to us in UTC already; just drop the TZ.
last_uploaded = last_uploaded.replace(tzinfo=None)
else:
pa_file = get_program_activity_file(base_path)
last_uploaded = datetime.datetime.utcfromtimestamp(os.path.getmtime(pa_file))
return last_uploaded
def get_stored_pa_last_upload():
""" Gets last recorded timestamp from last time file was processed.
Returns:
Upload date of most recent file we have recorded (Datetime object)
"""
sess = GlobalDB.db().session
last_stored_obj = sess.query(ExternalDataLoadDate).filter_by(
external_data_type_id=EXTERNAL_DATA_TYPE_DICT['program_activity_upload']).one_or_none()
if not last_stored_obj:
# return epoch ts to make sure we load the data the first time through,
# and ideally any time the data might have been wiped
last_stored = datetime.datetime.utcfromtimestamp(0)
else:
last_stored = last_stored_obj.last_load_date
return last_stored
def set_stored_pa_last_upload(load_datetime):
""" Set upload date of most recent file we have recorded (Datetime object)
Args:
Datetime object representing the timestamp associated with the current file
"""
sess = GlobalDB.db().session
last_stored_obj = sess.query(ExternalDataLoadDate).filter_by(
external_data_type_id=EXTERNAL_DATA_TYPE_DICT['program_activity_upload']).one_or_none()
if not last_stored_obj:
last_stored_obj = ExternalDataLoadDate(external_data_type_id=EXTERNAL_DATA_TYPE_DICT['program_activity_upload'],
last_load_date=load_datetime)
sess.add(last_stored_obj)
else:
last_stored_obj.last_load_date = load_datetime
sess.commit()
def load_program_activity_data(base_path):
""" Load program activity lookup table.
Args:
base_path: directory of domain config files
"""
now = datetime.datetime.now()
metrics_json = {
'script_name': 'load_program_activity.py',
'start_time': str(now),
'records_received': 0,
'duplicates_dropped': 0,
'invalid_records_dropped': 0,
'records_deleted': 0,
'records_inserted': 0
}
dropped_count = 0
logger.info('Checking PA upload dates to see if we can skip.')
last_upload = get_date_of_current_pa_upload(base_path)
if not (last_upload > get_stored_pa_last_upload()):
logger.info('Skipping load as it\'s already been done')
else:
logger.info('Getting the progrma activity file')
program_activity_file = get_program_activity_file(base_path)
logger.info('Loading program activity: {}'.format(PA_FILE_NAME))
with create_app().app_context():
sess = GlobalDB.db().session
try:
data = pd.read_csv(program_activity_file, dtype=str)
except pd.io.common.EmptyDataError:
log_blank_file()
exit_if_nonlocal(4) # exit code chosen arbitrarily, to indicate distinct failure states
return
headers = set([header.upper() for header in list(data)])
if not VALID_HEADERS.issubset(headers):
logger.error('Missing required headers. Required headers include: %s' % str(VALID_HEADERS))
exit_if_nonlocal(4)
return
try:
dropped_count, data = clean_data(
data,
ProgramActivity,
{'fyq': 'fiscal_year_period', 'agency_code': 'agency_id', 'allocation_id': 'allocation_transfer_id',
'account_code': 'account_number', 'pa_code': 'program_activity_code',
'pa_title': 'program_activity_name'},
{'program_activity_code': {'pad_to_length': 4}, 'agency_id': {'pad_to_length': 3},
'allocation_transfer_id': {'pad_to_length': 3, 'keep_null': True},
'account_number': {'pad_to_length': 4}},
['agency_id', 'program_activity_code', 'account_number', 'program_activity_name'],
True
)
except FailureThresholdExceededException as e:
if e.count == 0:
log_blank_file()
exit_if_nonlocal(4)
return
else:
logger.error('Loading of program activity file failed due to exceeded failure threshold. '
'Application tried to drop {} rows'.format(e.count))
exit_if_nonlocal(5)
return
metrics_json['records_deleted'] = sess.query(ProgramActivity).delete()
metrics_json['invalid_records_dropped'] = dropped_count
# Lowercase Program Activity Name
data['program_activity_name'] = data['program_activity_name'].apply(lambda x: lowercase_or_notify(x))
# Convert FYQ to FYP
data['fiscal_year_period'] = data['fiscal_year_period'].apply(lambda x: convert_fyq_to_fyp(x))
# because we're only loading a subset of program activity info, there will be duplicate records in the
# dataframe. this is ok, but need to de-duped before the db load. We also need to log them.
base_count = len(data.index)
metrics_json['records_received'] = base_count
data.drop_duplicates(inplace=True)
dupe_count = base_count - len(data.index)
logger.info('Dropped {} duplicate rows.'.format(dupe_count))
metrics_json['duplicates_dropped'] = dupe_count
# insert to db
table_name = ProgramActivity.__table__.name
num = insert_dataframe(data, table_name, sess.connection())
sess.commit()
set_stored_pa_last_upload(last_upload)
logger.info('{} records inserted to {}'.format(num, table_name))
metrics_json['records_inserted'] = num
metrics_json['duration'] = str(datetime.datetime.now() - now)
with open('load_program_activity_metrics.json', 'w+') as metrics_file:
json.dump(metrics_json, metrics_file)
if dropped_count > 0:
exit_if_nonlocal(3)
return
def lowercase_or_notify(x):
""" Lowercases the input if it is valid, otherwise logs the error and sets a default value
Args:
String to lowercase
Returns:
Lowercased string if possible, else unmodified string or default value.
"""
try:
return x.lower()
except Exception:
if x and not np.isnan(x):
logger.info('Program activity of {} was unable to be lowercased. Entered as-is.'.format(x))
return x
else:
logger.info('Null value found for program activity name. Entered default value.') # should not happen
return '(not provided)'
def convert_fyq_to_fyp(fyq):
""" Converts the fyq provided to fyp if it is in fyq format. Do nothing if it is already in fyp format
Args:
fyq: String to convert or leave alone fiscal year quarters
Returns:
FYQ converted to FYP or left the same
"""
# If it's in quarter format, convert to period
if re.match('^FY\d{2}Q\d$', str(fyq).upper().strip()):
# Make sure it's all uppercase and replace the Q with a P
fyq = fyq.upper().strip().replace('Q', 'P')
# take the last character in the string (the quarter), multiply by 3, replace
quarter = fyq[-1]
period = str(int(quarter) * 3).zfill(2)
fyq = fyq[:-1] + period
return fyq
return fyq
def log_blank_file():
""" Helper function for specific reused log message """
logger.error('File was blank! Not loaded, routine aborted.')
def exit_if_nonlocal(exit_code):
if not CONFIG_BROKER['local']:
sys.exit(exit_code)
| cc0-1.0 |
jjs0sbw/CSPLN | apps/scaffolding/mac/web2py/web2py.app/Contents/Resources/lib/python2.7/matplotlib/backends/backend_qt4.py | 2 | 26724 | from __future__ import division
import math
import os
import sys
import matplotlib
from matplotlib import verbose
from matplotlib.cbook import is_string_like, onetrue
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, IdleEvent, \
cursors, TimerBase
from matplotlib.backend_bases import ShowBase
from matplotlib._pylab_helpers import Gcf
from matplotlib.figure import Figure
from matplotlib.mathtext import MathTextParser
from matplotlib.widgets import SubplotTool
try:
import matplotlib.backends.qt4_editor.figureoptions as figureoptions
except ImportError:
figureoptions = None
from qt4_compat import QtCore, QtGui, _getSaveFileName, __version__
backend_version = __version__
def fn_name(): return sys._getframe(1).f_code.co_name
DEBUG = False
cursord = {
cursors.MOVE : QtCore.Qt.SizeAllCursor,
cursors.HAND : QtCore.Qt.PointingHandCursor,
cursors.POINTER : QtCore.Qt.ArrowCursor,
cursors.SELECT_REGION : QtCore.Qt.CrossCursor,
}
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager != None:
figManager.canvas.draw_idle()
def _create_qApp():
"""
Only one qApp can exist at a time, so check before creating one.
"""
if QtGui.QApplication.startingUp():
if DEBUG: print "Starting up QApplication"
global qApp
app = QtGui.QApplication.instance()
if app is None:
qApp = QtGui.QApplication( [" "] )
QtCore.QObject.connect( qApp, QtCore.SIGNAL( "lastWindowClosed()" ),
qApp, QtCore.SLOT( "quit()" ) )
else:
qApp = app
class Show(ShowBase):
def mainloop(self):
QtGui.qApp.exec_()
show = Show()
def new_figure_manager( num, *args, **kwargs ):
"""
Create a new figure manager instance
"""
thisFig = Figure( *args, **kwargs )
canvas = FigureCanvasQT( thisFig )
manager = FigureManagerQT( canvas, num )
return manager
class TimerQT(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses Qt4 timer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
def __init__(self, *args, **kwargs):
TimerBase.__init__(self, *args, **kwargs)
# Create a new timer and connect the timeout() signal to the
# _on_timer method.
self._timer = QtCore.QTimer()
QtCore.QObject.connect(self._timer, QtCore.SIGNAL('timeout()'),
self._on_timer)
def __del__(self):
# Probably not necessary in practice, but is good behavior to disconnect
TimerBase.__del__(self)
QtCore.QObject.disconnect(self._timer , QtCore.SIGNAL('timeout()'),
self._on_timer)
def _timer_set_single_shot(self):
self._timer.setSingleShot(self._single)
def _timer_set_interval(self):
self._timer.setInterval(self._interval)
def _timer_start(self):
self._timer.start()
def _timer_stop(self):
self._timer.stop()
class FigureCanvasQT( QtGui.QWidget, FigureCanvasBase ):
keyvald = { QtCore.Qt.Key_Control : 'control',
QtCore.Qt.Key_Shift : 'shift',
QtCore.Qt.Key_Alt : 'alt',
QtCore.Qt.Key_Return : 'enter'
}
# map Qt button codes to MouseEvent's ones:
buttond = {QtCore.Qt.LeftButton : 1,
QtCore.Qt.MidButton : 2,
QtCore.Qt.RightButton : 3,
# QtCore.Qt.XButton1 : None,
# QtCore.Qt.XButton2 : None,
}
def __init__( self, figure ):
if DEBUG: print 'FigureCanvasQt: ', figure
_create_qApp()
QtGui.QWidget.__init__( self )
FigureCanvasBase.__init__( self, figure )
self.figure = figure
self.setMouseTracking( True )
self._idle = True
# hide until we can test and fix
#self.startTimer(backend_IdleEvent.milliseconds)
w,h = self.get_width_height()
self.resize( w, h )
# JDH: Note the commented out code below does not work as
# expected, because according to Pierre Raybaut, The reason is
# that PyQt fails (silently) to call a method of this object
# just before detroying it. Using a lambda function will work,
# exactly the same as using a function (which is not bound to
# the object to be destroyed).
#
#QtCore.QObject.connect(self, QtCore.SIGNAL('destroyed()'),
# self.close_event)
QtCore.QObject.connect(self, QtCore.SIGNAL('destroyed()'),
lambda: self.close_event())
def __timerEvent(self, event):
# hide until we can test and fix
self.mpl_idle_event(event)
def enterEvent(self, event):
FigureCanvasBase.enter_notify_event(self, event)
def leaveEvent(self, event):
QtGui.QApplication.restoreOverrideCursor()
FigureCanvasBase.leave_notify_event(self, event)
def mousePressEvent( self, event ):
x = event.pos().x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.pos().y()
button = self.buttond.get(event.button())
if button is not None: # only three buttons supported by MouseEvent
FigureCanvasBase.button_press_event( self, x, y, button )
if DEBUG: print('button pressed:', event.button())
def mouseMoveEvent( self, event ):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
FigureCanvasBase.motion_notify_event( self, x, y )
#if DEBUG: print 'mouse move'
def mouseReleaseEvent( self, event ):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
button = self.buttond.get(event.button())
if button is not None: # only three buttons supported by MouseEvent
FigureCanvasBase.button_release_event( self, x, y, button )
if DEBUG: print('button released')
def wheelEvent( self, event ):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
# from QWheelEvent::delta doc
steps = event.delta()/120
if (event.orientation() == QtCore.Qt.Vertical):
FigureCanvasBase.scroll_event( self, x, y, steps)
if DEBUG: print 'scroll event : delta = %i, steps = %i ' % (event.delta(),steps)
def keyPressEvent( self, event ):
key = self._get_key( event )
if key is None:
return
FigureCanvasBase.key_press_event( self, key )
if DEBUG: print 'key press', key
def keyReleaseEvent( self, event ):
key = self._get_key(event)
if key is None:
return
FigureCanvasBase.key_release_event( self, key )
if DEBUG: print 'key release', key
def resizeEvent( self, event ):
if DEBUG: print 'resize (%d x %d)' % (event.size().width(), event.size().height())
w = event.size().width()
h = event.size().height()
if DEBUG: print "FigureCanvasQtAgg.resizeEvent(", w, ",", h, ")"
dpival = self.figure.dpi
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches( winch, hinch )
self.draw()
self.update()
QtGui.QWidget.resizeEvent(self, event)
def sizeHint( self ):
w, h = self.get_width_height()
return QtCore.QSize( w, h )
def minumumSizeHint( self ):
return QtCore.QSize( 10, 10 )
def _get_key( self, event ):
if event.isAutoRepeat():
return None
if event.key() < 256:
key = str(event.text())
elif event.key() in self.keyvald:
key = self.keyvald[ event.key() ]
else:
key = None
return key
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
This is useful for getting periodic events through the backend's native
event loop. Implemented only for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerQT(*args, **kwargs)
def flush_events(self):
QtGui.qApp.processEvents()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
def draw_idle(self):
'update drawing area only if idle'
d = self._idle
self._idle = False
def idle_draw(*args):
self.draw()
self._idle = True
if d: QtCore.QTimer.singleShot(0, idle_draw)
class FigureManagerQT( FigureManagerBase ):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The qt.QToolBar
window : The qt.QMainWindow
"""
def __init__( self, canvas, num ):
if DEBUG: print 'FigureManagerQT.%s' % fn_name()
FigureManagerBase.__init__( self, canvas, num )
self.canvas = canvas
self.window = QtGui.QMainWindow()
self.window.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.window.setWindowTitle("Figure %d" % num)
image = os.path.join( matplotlib.rcParams['datapath'],'images','matplotlib.png' )
self.window.setWindowIcon(QtGui.QIcon( image ))
# Give the keyboard focus to the figure instead of the manager
self.canvas.setFocusPolicy( QtCore.Qt.ClickFocus )
self.canvas.setFocus()
QtCore.QObject.connect( self.window, QtCore.SIGNAL( 'destroyed()' ),
self._widgetclosed )
self.window._destroying = False
self.toolbar = self._get_toolbar(self.canvas, self.window)
if self.toolbar is not None:
self.window.addToolBar(self.toolbar)
QtCore.QObject.connect(self.toolbar, QtCore.SIGNAL("message"),
self._show_message)
tbs_height = self.toolbar.sizeHint().height()
else:
tbs_height = 0
# resize the main window so it will display the canvas with the
# requested size:
cs = canvas.sizeHint()
sbs = self.window.statusBar().sizeHint()
self._status_and_tool_height = tbs_height+sbs.height()
height = cs.height() + self._status_and_tool_height
self.window.resize(cs.width(), height)
self.window.setCentralWidget(self.canvas)
if matplotlib.is_interactive():
self.window.show()
# attach a show method to the figure for pylab ease of use
self.canvas.figure.show = lambda *args: self.window.show()
def notify_axes_change( fig ):
# This will be called whenever the current axes is changed
if self.toolbar is not None:
self.toolbar.update()
self.canvas.figure.add_axobserver( notify_axes_change )
@QtCore.Slot()
def _show_message(self,s):
# Fixes a PySide segfault.
self.window.statusBar().showMessage(s)
def _widgetclosed( self ):
if self.window._destroying: return
self.window._destroying = True
try:
Gcf.destroy(self.num)
except AttributeError:
pass
# It seems that when the python session is killed,
# Gcf can get destroyed before the Gcf.destroy
# line is run, leading to a useless AttributeError.
def _get_toolbar(self, canvas, parent):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar'] == 'classic':
print "Classic toolbar is not supported"
elif matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2QT(canvas, parent, False)
else:
toolbar = None
return toolbar
def resize(self, width, height):
'set the canvas size in pixels'
self.window.resize(width, height + self._status_and_tool_height)
def show(self):
self.window.show()
def destroy( self, *args ):
# check for qApp first, as PySide deletes it in its atexit handler
if QtGui.QApplication.instance() is None: return
if self.window._destroying: return
self.window._destroying = True
QtCore.QObject.disconnect( self.window, QtCore.SIGNAL( 'destroyed()' ),
self._widgetclosed )
if self.toolbar: self.toolbar.destroy()
if DEBUG: print "destroy figure manager"
self.window.close()
def set_window_title(self, title):
self.window.setWindowTitle(title)
class NavigationToolbar2QT( NavigationToolbar2, QtGui.QToolBar ):
def __init__(self, canvas, parent, coordinates=True):
""" coordinates: should we show the coordinates on the right? """
self.canvas = canvas
self.coordinates = coordinates
QtGui.QToolBar.__init__( self, parent )
NavigationToolbar2.__init__( self, canvas )
def _icon(self, name):
return QtGui.QIcon(os.path.join(self.basedir, name))
def _init_toolbar(self):
self.basedir = os.path.join(matplotlib.rcParams[ 'datapath' ],'images')
a = self.addAction(self._icon('home.png'), 'Home', self.home)
a.setToolTip('Reset original view')
a = self.addAction(self._icon('back.png'), 'Back', self.back)
a.setToolTip('Back to previous view')
a = self.addAction(self._icon('forward.png'), 'Forward', self.forward)
a.setToolTip('Forward to next view')
self.addSeparator()
a = self.addAction(self._icon('move.png'), 'Pan', self.pan)
a.setToolTip('Pan axes with left mouse, zoom with right')
a = self.addAction(self._icon('zoom_to_rect.png'), 'Zoom', self.zoom)
a.setToolTip('Zoom to rectangle')
self.addSeparator()
a = self.addAction(self._icon('subplots.png'), 'Subplots',
self.configure_subplots)
a.setToolTip('Configure subplots')
if figureoptions is not None:
a = self.addAction(self._icon("qt4_editor_options.png"),
'Customize', self.edit_parameters)
a.setToolTip('Edit curves line and axes parameters')
a = self.addAction(self._icon('filesave.png'), 'Save',
self.save_figure)
a.setToolTip('Save the figure')
self.buttons = {}
# Add the x,y location widget at the right side of the toolbar
# The stretch factor is 1 which means any resizing of the toolbar
# will resize this label instead of the buttons.
if self.coordinates:
self.locLabel = QtGui.QLabel( "", self )
self.locLabel.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTop )
self.locLabel.setSizePolicy(
QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Ignored))
labelAction = self.addWidget(self.locLabel)
labelAction.setVisible(True)
# reference holder for subplots_adjust window
self.adj_window = None
if figureoptions is not None:
def edit_parameters(self):
allaxes = self.canvas.figure.get_axes()
if len(allaxes) == 1:
axes = allaxes[0]
else:
titles = []
for axes in allaxes:
title = axes.get_title()
ylabel = axes.get_ylabel()
if title:
fmt = "%(title)s"
if ylabel:
fmt += ": %(ylabel)s"
fmt += " (%(axes_repr)s)"
elif ylabel:
fmt = "%(axes_repr)s (%(ylabel)s)"
else:
fmt = "%(axes_repr)s"
titles.append(fmt % dict(title = title,
ylabel = ylabel,
axes_repr = repr(axes)))
item, ok = QtGui.QInputDialog.getItem(self, 'Customize',
'Select axes:', titles,
0, False)
if ok:
axes = allaxes[titles.index(unicode(item))]
else:
return
figureoptions.figure_edit(axes, self)
def dynamic_update( self ):
self.canvas.draw()
def set_message( self, s ):
self.emit(QtCore.SIGNAL("message"), s)
if self.coordinates:
self.locLabel.setText(s.replace(', ', '\n'))
def set_cursor( self, cursor ):
if DEBUG: print 'Set cursor' , cursor
QtGui.QApplication.restoreOverrideCursor()
QtGui.QApplication.setOverrideCursor( QtGui.QCursor( cursord[cursor] ) )
def draw_rubberband( self, event, x0, y0, x1, y1 ):
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [ int(val)for val in min(x0,x1), min(y0, y1), w, h ]
self.canvas.drawRectangle( rect )
def configure_subplots(self):
self.adj_window = QtGui.QMainWindow()
win = self.adj_window
win.setAttribute(QtCore.Qt.WA_DeleteOnClose)
win.setWindowTitle("Subplot Configuration Tool")
image = os.path.join( matplotlib.rcParams['datapath'],'images','matplotlib.png' )
win.setWindowIcon(QtGui.QIcon( image ))
tool = SubplotToolQt(self.canvas.figure, win)
win.setCentralWidget(tool)
win.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
win.show()
def _get_canvas(self, fig):
return FigureCanvasQT(fig)
def save_figure(self, *args):
filetypes = self.canvas.get_supported_filetypes_grouped()
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
default_filetype = self.canvas.get_default_filetype()
start = "image." + default_filetype
filters = []
selectedFilter = None
for name, exts in sorted_filetypes:
exts_list = " ".join(['*.%s' % ext for ext in exts])
filter = '%s (%s)' % (name, exts_list)
if default_filetype in exts:
selectedFilter = filter
filters.append(filter)
filters = ';;'.join(filters)
fname = _getSaveFileName(self, "Choose a filename to save to",
start, filters, selectedFilter)
if fname:
try:
self.canvas.print_figure( unicode(fname) )
except Exception, e:
QtGui.QMessageBox.critical(
self, "Error saving file", str(e),
QtGui.QMessageBox.Ok, QtGui.QMessageBox.NoButton)
class SubplotToolQt( SubplotTool, QtGui.QWidget ):
def __init__(self, targetfig, parent):
QtGui.QWidget.__init__(self, None)
self.targetfig = targetfig
self.parent = parent
self.sliderleft = QtGui.QSlider(QtCore.Qt.Horizontal)
self.sliderbottom = QtGui.QSlider(QtCore.Qt.Vertical)
self.sliderright = QtGui.QSlider(QtCore.Qt.Horizontal)
self.slidertop = QtGui.QSlider(QtCore.Qt.Vertical)
self.sliderwspace = QtGui.QSlider(QtCore.Qt.Horizontal)
self.sliderhspace = QtGui.QSlider(QtCore.Qt.Vertical)
# constraints
QtCore.QObject.connect( self.sliderleft,
QtCore.SIGNAL( "valueChanged(int)" ),
self.sliderright.setMinimum )
QtCore.QObject.connect( self.sliderright,
QtCore.SIGNAL( "valueChanged(int)" ),
self.sliderleft.setMaximum )
QtCore.QObject.connect( self.sliderbottom,
QtCore.SIGNAL( "valueChanged(int)" ),
self.slidertop.setMinimum )
QtCore.QObject.connect( self.slidertop,
QtCore.SIGNAL( "valueChanged(int)" ),
self.sliderbottom.setMaximum )
sliders = (self.sliderleft, self.sliderbottom, self.sliderright,
self.slidertop, self.sliderwspace, self.sliderhspace, )
adjustments = ('left:', 'bottom:', 'right:', 'top:', 'wspace:', 'hspace:')
for slider, adjustment in zip(sliders, adjustments):
slider.setMinimum(0)
slider.setMaximum(1000)
slider.setSingleStep(5)
layout = QtGui.QGridLayout()
leftlabel = QtGui.QLabel('left')
layout.addWidget(leftlabel, 2, 0)
layout.addWidget(self.sliderleft, 2, 1)
toplabel = QtGui.QLabel('top')
layout.addWidget(toplabel, 0, 2)
layout.addWidget(self.slidertop, 1, 2)
layout.setAlignment(self.slidertop, QtCore.Qt.AlignHCenter)
bottomlabel = QtGui.QLabel('bottom')
layout.addWidget(QtGui.QLabel('bottom'), 4, 2)
layout.addWidget(self.sliderbottom, 3, 2)
layout.setAlignment(self.sliderbottom, QtCore.Qt.AlignHCenter)
rightlabel = QtGui.QLabel('right')
layout.addWidget(rightlabel, 2, 4)
layout.addWidget(self.sliderright, 2, 3)
hspacelabel = QtGui.QLabel('hspace')
layout.addWidget(hspacelabel, 0, 6)
layout.setAlignment(hspacelabel, QtCore.Qt.AlignHCenter)
layout.addWidget(self.sliderhspace, 1, 6)
layout.setAlignment(self.sliderhspace, QtCore.Qt.AlignHCenter)
wspacelabel = QtGui.QLabel('wspace')
layout.addWidget(wspacelabel, 4, 6)
layout.setAlignment(wspacelabel, QtCore.Qt.AlignHCenter)
layout.addWidget(self.sliderwspace, 3, 6)
layout.setAlignment(self.sliderwspace, QtCore.Qt.AlignBottom)
layout.setRowStretch(1,1)
layout.setRowStretch(3,1)
layout.setColumnStretch(1,1)
layout.setColumnStretch(3,1)
layout.setColumnStretch(6,1)
self.setLayout(layout)
self.sliderleft.setSliderPosition(int(targetfig.subplotpars.left*1000))
self.sliderbottom.setSliderPosition(\
int(targetfig.subplotpars.bottom*1000))
self.sliderright.setSliderPosition(\
int(targetfig.subplotpars.right*1000))
self.slidertop.setSliderPosition(int(targetfig.subplotpars.top*1000))
self.sliderwspace.setSliderPosition(\
int(targetfig.subplotpars.wspace*1000))
self.sliderhspace.setSliderPosition(\
int(targetfig.subplotpars.hspace*1000))
QtCore.QObject.connect( self.sliderleft,
QtCore.SIGNAL( "valueChanged(int)" ),
self.funcleft )
QtCore.QObject.connect( self.sliderbottom,
QtCore.SIGNAL( "valueChanged(int)" ),
self.funcbottom )
QtCore.QObject.connect( self.sliderright,
QtCore.SIGNAL( "valueChanged(int)" ),
self.funcright )
QtCore.QObject.connect( self.slidertop,
QtCore.SIGNAL( "valueChanged(int)" ),
self.functop )
QtCore.QObject.connect( self.sliderwspace,
QtCore.SIGNAL( "valueChanged(int)" ),
self.funcwspace )
QtCore.QObject.connect( self.sliderhspace,
QtCore.SIGNAL( "valueChanged(int)" ),
self.funchspace )
def funcleft(self, val):
if val == self.sliderright.value():
val -= 1
self.targetfig.subplots_adjust(left=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def funcright(self, val):
if val == self.sliderleft.value():
val += 1
self.targetfig.subplots_adjust(right=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def funcbottom(self, val):
if val == self.slidertop.value():
val -= 1
self.targetfig.subplots_adjust(bottom=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def functop(self, val):
if val == self.sliderbottom.value():
val += 1
self.targetfig.subplots_adjust(top=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def funcwspace(self, val):
self.targetfig.subplots_adjust(wspace=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def funchspace(self, val):
self.targetfig.subplots_adjust(hspace=val/1000.)
if self.drawon: self.targetfig.canvas.draw()
def error_msg_qt( msg, parent=None ):
if not is_string_like( msg ):
msg = ','.join( map( str,msg ) )
QtGui.QMessageBox.warning( None, "Matplotlib", msg, QtGui.QMessageBox.Ok )
def exception_handler( type, value, tb ):
"""Handle uncaught exceptions
It does not catch SystemExit
"""
msg = ''
# get the filename attribute if available (for IOError)
if hasattr(value, 'filename') and value.filename != None:
msg = value.filename + ': '
if hasattr(value, 'strerror') and value.strerror != None:
msg += value.strerror
else:
msg += str(value)
if len( msg ) : error_msg_qt( msg )
FigureManager = FigureManagerQT
| gpl-3.0 |
michalkurka/h2o-3 | h2o-py/tests/testdir_algos/isoforextended/pyunit_isoforextended_extension_level_smoke.py | 2 | 3390 | from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from sklearn.datasets import make_blobs
from tests import pyunit_utils
from h2o.estimators.extended_isolation_forest import H2OExtendedIsolationForestEstimator
def extended_isolation_forest_extension_level_smoke():
"""
Test extension_level parameter of Extended Isolation Forest.
The extension_level=0 means Isolation Forest's behaviour. This test is testing the known Isolation Forest's
behaviour of 'Ghost clusters' which Extended Isolation Forest mitigates. The overall variance in anomaly score of
EIF should be lower than anomaly score IF. Anomaly score in 'Ghost clusters' should be lower for IF.
For more information see source paper https://arxiv.org/pdf/1811.02141.pdf (Figure 2).
"""
seed = 0xBEEF
double_blob = make_blobs(centers=[[10, 0], [0, 10]], cluster_std=[1, 1], random_state=seed,
n_samples=500, n_features=2)[0]
train = h2o.H2OFrame(double_blob)
anomalies = h2o.H2OFrame([[0, 0], [10, 10]]) # Points in the ghost clusters
eif_model = H2OExtendedIsolationForestEstimator(ntrees=100, seed=seed, sample_size=255, extension_level=1)
eif_model.train(training_frame=train)
eif_overall_anomaly_score = eif_model.predict(train)
eif_overall_anomaly = eif_overall_anomaly_score['anomaly_score'].as_data_frame(use_pandas=True)["anomaly_score"]
if_model = H2OExtendedIsolationForestEstimator(ntrees=100, seed=0xBEEF, sample_size=255, extension_level=0)
if_model.train(training_frame=train)
if_overall_anomaly_score = if_model.predict(train)
if_overall_anomaly = if_overall_anomaly_score['anomaly_score'].as_data_frame(use_pandas=True)["anomaly_score"]
eif_anomaly_score = eif_model.predict(anomalies)['anomaly_score'].as_data_frame(use_pandas=True)["anomaly_score"]
if_anomaly_score = if_model.predict(anomalies)['anomaly_score'].as_data_frame(use_pandas=True)["anomaly_score"]
assert if_anomaly_score[0] < eif_anomaly_score[0], \
"The anomaly score of simulated Isolation Forest's should be significantly lower than score of " \
"Extended Isolation Forest because this point is in 'Ghost cluster'. " + str(if_anomaly_score[0]) + " < " \
+ str(eif_anomaly_score[0])
assert if_anomaly_score[1] < eif_anomaly_score[1], \
"The anomaly score of simulated Isolation Forest's should be significantly lower than score of " \
"Extended Isolation Forest because this point is in 'Ghost cluster'. " + str(if_anomaly_score[1]) + " < " \
+ str(eif_anomaly_score[1])
assert 0.0015 < eif_overall_anomaly.var() < 0.0020 < if_overall_anomaly.var() < 0.0023, \
"Not expected output: Variance in anomaly score of Extended Isolation Forest is suspiciously different from " \
"Isolation Forest (EIF with extension_level=0). In general, the overall variance in anomaly score of EIF " \
"should be lower than variance in score of IF. It could be potential bug in extension_level parameter " \
"handling because " + str(eif_overall_anomaly.var()) + " should be lower than " + str(if_overall_anomaly.var())
if __name__ == "__main__":
pyunit_utils.standalone_test(extended_isolation_forest_extension_level_smoke)
else:
extended_isolation_forest_extension_level_smoke()
| apache-2.0 |
mne-tools/mne-tools.github.io | 0.12/_downloads/plot_find_ecg_artifacts.py | 14 | 1313 | """
==================
Find ECG artifacts
==================
Locate QRS component of ECG.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
event_id = 999
ecg_events, _, _ = mne.preprocessing.find_ecg_events(raw, event_id,
ch_name='MEG 1531')
# Read epochs
picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=False, eog=False,
include=['MEG 1531'], exclude='bads')
tmin, tmax = -0.1, 0.1
epochs = mne.Epochs(raw, ecg_events, event_id, tmin, tmax, picks=picks,
proj=False)
data = epochs.get_data()
print("Number of detected ECG artifacts : %d" % len(data))
###############################################################################
# Plot ECG artifacts
plt.plot(1e3 * epochs.times, np.squeeze(data).T)
plt.xlabel('Times (ms)')
plt.ylabel('ECG')
plt.show()
| bsd-3-clause |
aabadie/scikit-learn | examples/neural_networks/plot_mlp_training_curves.py | 58 | 3692 | """
========================================================
Compare Stochastic learning strategies for MLPClassifier
========================================================
This example visualizes some training loss curves for different stochastic
learning strategies, including SGD and Adam. Because of time-constraints, we
use several small datasets, for which L-BFGS might be more suitable. The
general trend shown in these examples seems to carry over to larger datasets,
however.
Note that those results can be highly dependent on the value of
``learning_rate_init``.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn import datasets
# different learning rate schedules and momentum parameters
params = [{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': 0,
'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': 0,
'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'solver': 'adam', 'learning_rate_init': 0.01}]
labels = ["constant learning-rate", "constant with momentum",
"constant with Nesterov's momentum",
"inv-scaling learning-rate", "inv-scaling with momentum",
"inv-scaling with Nesterov's momentum", "adam"]
plot_args = [{'c': 'red', 'linestyle': '-'},
{'c': 'green', 'linestyle': '-'},
{'c': 'blue', 'linestyle': '-'},
{'c': 'red', 'linestyle': '--'},
{'c': 'green', 'linestyle': '--'},
{'c': 'blue', 'linestyle': '--'},
{'c': 'black', 'linestyle': '-'}]
def plot_on_dataset(X, y, ax, name):
# for each dataset, plot learning for each learning strategy
print("\nlearning on dataset %s" % name)
ax.set_title(name)
X = MinMaxScaler().fit_transform(X)
mlps = []
if name == "digits":
# digits is larger but converges fairly quickly
max_iter = 15
else:
max_iter = 400
for label, param in zip(labels, params):
print("training: %s" % label)
mlp = MLPClassifier(verbose=0, random_state=0,
max_iter=max_iter, **param)
mlp.fit(X, y)
mlps.append(mlp)
print("Training set score: %f" % mlp.score(X, y))
print("Training set loss: %f" % mlp.loss_)
for mlp, label, args in zip(mlps, labels, plot_args):
ax.plot(mlp.loss_curve_, label=label, **args)
fig, axes = plt.subplots(2, 2, figsize=(15, 10))
# load / generate some toy datasets
iris = datasets.load_iris()
digits = datasets.load_digits()
data_sets = [(iris.data, iris.target),
(digits.data, digits.target),
datasets.make_circles(noise=0.2, factor=0.5, random_state=1),
datasets.make_moons(noise=0.3, random_state=0)]
for ax, data, name in zip(axes.ravel(), data_sets, ['iris', 'digits',
'circles', 'moons']):
plot_on_dataset(*data, ax=ax, name=name)
fig.legend(ax.get_lines(), labels=labels, ncol=3, loc="upper center")
plt.show()
| bsd-3-clause |
buguen/pylayers | pylayers/measures/parker/plot_scanner.py | 3 | 1084 | import matplotlib.pyplot as plt
import numpy as np
import matplotlib.patches as patches
from matplotlib.path import Path
import matplotlib.lines as mlines
from matplotlib.collections import PatchCollection
pA = np.array([0,0])
pB = np.array([0,1.2])
pC = np.array([0.7,0])
pD = np.array([0.7,1.2])
pO = (pA+pB+pC+pD)/4
verts = [
(pA[0], pA[1]), # left, bottom
(pB[0], pB[1]), # left, top
(pD[0], pD[1]), # right, top
(pC[0],pC[1]), # right, bottom
(pA[0],pA[1]), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
patch = patches.PathPatch(path,facecolor='blue',lw=2,alpha=0.3)
ax = plt.gca()
circle = patches.Circle(pO,0.1,fc='orange',ec=None)
ax.add_patch(circle)
ax.add_patch(patch)
a1 = ax.arrow(pA[0],pA[1],(pB[0]-pA[0]),(pB[1]-pA[1]),head_width=0.05,head_length=0.1,fc='k',ec='k')
a2 = ax.arrow(pC[0],pC[1],(pD[0]-pC[0]),(pD[1]-pC[1]),head_width=0.05,head_length=0.1,fc='k',ec='k')
ax.axis('equal')
plt.show()
| lgpl-3.0 |
phobson/statsmodels | statsmodels/discrete/discrete_margins.py | 1 | 25391 | #Splitting out maringal effects to see if they can be generalized
from statsmodels.compat.python import lzip, callable, range
import numpy as np
from scipy.stats import norm
from statsmodels.tools.decorators import cache_readonly, resettable_cache
#### margeff helper functions ####
#NOTE: todo marginal effects for group 2
# group 2 oprobit, ologit, gologit, mlogit, biprobit
def _check_margeff_args(at, method):
"""
Checks valid options for margeff
"""
if at not in ['overall','mean','median','zero','all']:
raise ValueError("%s not a valid option for `at`." % at)
if method not in ['dydx','eyex','dyex','eydx']:
raise ValueError("method is not understood. Got %s" % method)
def _check_discrete_args(at, method):
"""
Checks the arguments for margeff if the exogenous variables are discrete.
"""
if method in ['dyex','eyex']:
raise ValueError("%s not allowed for discrete variables" % method)
if at in ['median', 'zero']:
raise ValueError("%s not allowed for discrete variables" % at)
def _get_const_index(exog):
"""
Returns a boolean array of non-constant column indices in exog and
an scalar array of where the constant is or None
"""
effects_idx = exog.var(0) != 0
if np.any(~effects_idx):
const_idx = np.where(~effects_idx)[0]
else:
const_idx = None
return effects_idx, const_idx
def _isdummy(X):
"""
Given an array X, returns the column indices for the dummy variables.
Parameters
----------
X : array-like
A 1d or 2d array of numbers
Examples
--------
>>> X = np.random.randint(0, 2, size=(15,5)).astype(float)
>>> X[:,1:3] = np.random.randn(15,2)
>>> ind = _isdummy(X)
>>> ind
array([0, 3, 4])
"""
X = np.asarray(X)
if X.ndim > 1:
ind = np.zeros(X.shape[1]).astype(bool)
max = (np.max(X, axis=0) == 1)
min = (np.min(X, axis=0) == 0)
remainder = np.all(X % 1. == 0, axis=0)
ind = min & max & remainder
if X.ndim == 1:
ind = np.asarray([ind])
return np.where(ind)[0]
def _get_dummy_index(X, const_idx):
dummy_ind = _isdummy(X)
dummy = True
# adjust back for a constant because effects doesn't have one
if const_idx is not None:
dummy_ind[dummy_ind > const_idx] -= 1
if dummy_ind.size == 0: # don't waste your time
dummy = False
dummy_ind = None # this gets passed to stand err func
return dummy_ind, dummy
def _iscount(X):
"""
Given an array X, returns the column indices for count variables.
Parameters
----------
X : array-like
A 1d or 2d array of numbers
Examples
--------
>>> X = np.random.randint(0, 10, size=(15,5)).astype(float)
>>> X[:,1:3] = np.random.randn(15,2)
>>> ind = _iscount(X)
>>> ind
array([0, 3, 4])
"""
X = np.asarray(X)
remainder = np.logical_and(np.logical_and(np.all(X % 1. == 0, axis = 0),
X.var(0) != 0), np.all(X >= 0, axis=0))
dummy = _isdummy(X)
remainder = np.where(remainder)[0].tolist()
for idx in dummy:
remainder.remove(idx)
return np.array(remainder)
def _get_count_index(X, const_idx):
count_ind = _iscount(X)
count = True
# adjust back for a constant because effects doesn't have one
if const_idx is not None:
count_ind[count_ind > const_idx] -= 1
if count_ind.size == 0: # don't waste your time
count = False
count_ind = None # for stand err func
return count_ind, count
def _get_margeff_exog(exog, at, atexog, ind):
if atexog is not None: # user supplied
if isinstance(atexog, dict):
# assumes values are singular or of len(exog)
for key in atexog:
exog[:,key] = atexog[key]
elif isinstance(atexog, np.ndarray): #TODO: handle DataFrames
if atexog.ndim == 1:
k_vars = len(atexog)
else:
k_vars = atexog.shape[1]
try:
assert k_vars == exog.shape[1]
except:
raise ValueError("atexog does not have the same number "
"of variables as exog")
exog = atexog
#NOTE: we should fill in atexog after we process at
if at == 'mean':
exog = np.atleast_2d(exog.mean(0))
elif at == 'median':
exog = np.atleast_2d(np.median(exog, axis=0))
elif at == 'zero':
exog = np.zeros((1,exog.shape[1]))
exog[0,~ind] = 1
return exog
def _get_count_effects(effects, exog, count_ind, method, model, params):
"""
If there's a count variable, the predicted difference is taken by
subtracting one and adding one to exog then averaging the difference
"""
# this is the index for the effect and the index for count col in exog
for i in count_ind:
exog0 = exog.copy()
exog0[:, i] -= 1
effect0 = model.predict(params, exog0)
exog0[:, i] += 2
effect1 = model.predict(params, exog0)
#NOTE: done by analogy with dummy effects but untested bc
# stata doesn't handle both count and eydx anywhere
if 'ey' in method:
effect0 = np.log(effect0)
effect1 = np.log(effect1)
effects[:, i] = ((effect1 - effect0)/2)
return effects
def _get_dummy_effects(effects, exog, dummy_ind, method, model, params):
"""
If there's a dummy variable, the predicted difference is taken at
0 and 1
"""
# this is the index for the effect and the index for dummy col in exog
for i in dummy_ind:
exog0 = exog.copy() # only copy once, can we avoid a copy?
exog0[:,i] = 0
effect0 = model.predict(params, exog0)
#fittedvalues0 = np.dot(exog0,params)
exog0[:,i] = 1
effect1 = model.predict(params, exog0)
if 'ey' in method:
effect0 = np.log(effect0)
effect1 = np.log(effect1)
effects[:, i] = (effect1 - effect0)
return effects
def _effects_at(effects, at):
if at == 'all':
effects = effects
elif at == 'overall':
effects = effects.mean(0)
else:
effects = effects[0,:]
return effects
def _margeff_cov_params_dummy(model, cov_margins, params, exog, dummy_ind,
method, J):
"""
Returns the Jacobian for discrete regressors for use in margeff_cov_params.
For discrete regressors the marginal effect is
\Delta F = F(XB) | d = 1 - F(XB) | d = 0
The row of the Jacobian for this variable is given by
f(XB)*X | d = 1 - f(XB)*X | d = 0
Where F is the default prediction of the model.
"""
for i in dummy_ind:
exog0 = exog.copy()
exog1 = exog.copy()
exog0[:,i] = 0
exog1[:,i] = 1
dfdb0 = model._derivative_predict(params, exog0, method)
dfdb1 = model._derivative_predict(params, exog1, method)
dfdb = (dfdb1 - dfdb0)
if dfdb.ndim >= 2: # for overall
dfdb = dfdb.mean(0)
if J > 1:
K = dfdb.shape[1] // (J-1)
cov_margins[i::K, :] = dfdb
else:
cov_margins[i, :] = dfdb # how each F changes with change in B
return cov_margins
def _margeff_cov_params_count(model, cov_margins, params, exog, count_ind,
method, J):
"""
Returns the Jacobian for discrete regressors for use in margeff_cov_params.
For discrete regressors the marginal effect is
\Delta F = F(XB) | d += 1 - F(XB) | d -= 1
The row of the Jacobian for this variable is given by
(f(XB)*X | d += 1 - f(XB)*X | d -= 1) / 2
where F is the default prediction for the model.
"""
for i in count_ind:
exog0 = exog.copy()
exog0[:,i] -= 1
dfdb0 = model._derivative_predict(params, exog0, method)
exog0[:,i] += 2
dfdb1 = model._derivative_predict(params, exog0, method)
dfdb = (dfdb1 - dfdb0)
if dfdb.ndim >= 2: # for overall
dfdb = dfdb.mean(0) / 2
if J > 1:
K = dfdb.shape[1] / (J-1)
cov_margins[i::K, :] = dfdb
else:
cov_margins[i, :] = dfdb # how each F changes with change in B
return cov_margins
def margeff_cov_params(model, params, exog, cov_params, at, derivative,
dummy_ind, count_ind, method, J):
"""
Computes the variance-covariance of marginal effects by the delta method.
Parameters
----------
model : model instance
The model that returned the fitted results. Its pdf method is used
for computing the Jacobian of discrete variables in dummy_ind and
count_ind
params : array-like
estimated model parameters
exog : array-like
exogenous variables at which to calculate the derivative
cov_params : array-like
The variance-covariance of the parameters
at : str
Options are:
- 'overall', The average of the marginal effects at each
observation.
- 'mean', The marginal effects at the mean of each regressor.
- 'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation.
Only overall has any effect here.you
derivative : function or array-like
If a function, it returns the marginal effects of the model with
respect to the exogenous variables evaluated at exog. Expected to be
called derivative(params, exog). This will be numerically
differentiated. Otherwise, it can be the Jacobian of the marginal
effects with respect to the parameters.
dummy_ind : array-like
Indices of the columns of exog that contain dummy variables
count_ind : array-like
Indices of the columns of exog that contain count variables
Notes
-----
For continuous regressors, the variance-covariance is given by
Asy. Var[MargEff] = [d margeff / d params] V [d margeff / d params]'
where V is the parameter variance-covariance.
The outer Jacobians are computed via numerical differentiation if
derivative is a function.
"""
if callable(derivative):
from statsmodels.tools.numdiff import approx_fprime_cs
params = params.ravel('F') # for Multinomial
try:
jacobian_mat = approx_fprime_cs(params, derivative,
args=(exog,method))
except TypeError: # norm.cdf doesn't take complex values
from statsmodels.tools.numdiff import approx_fprime
jacobian_mat = approx_fprime(params, derivative,
args=(exog,method))
if at == 'overall':
jacobian_mat = np.mean(jacobian_mat, axis=1)
else:
jacobian_mat = jacobian_mat.squeeze() # exog was 2d row vector
if dummy_ind is not None:
jacobian_mat = _margeff_cov_params_dummy(model, jacobian_mat,
params, exog, dummy_ind, method, J)
if count_ind is not None:
jacobian_mat = _margeff_cov_params_count(model, jacobian_mat,
params, exog, count_ind, method, J)
else:
jacobian_mat = derivative
#NOTE: this won't go through for at == 'all'
return np.dot(np.dot(jacobian_mat, cov_params), jacobian_mat.T)
def margeff_cov_with_se(model, params, exog, cov_params, at, derivative,
dummy_ind, count_ind, method, J):
"""
See margeff_cov_params.
Same function but returns both the covariance of the marginal effects
and their standard errors.
"""
cov_me = margeff_cov_params(model, params, exog, cov_params, at,
derivative, dummy_ind,
count_ind, method, J)
return cov_me, np.sqrt(np.diag(cov_me))
def margeff():
pass
def _check_at_is_all(method):
if method['at'] == 'all':
raise NotImplementedError("Only margeff are available when `at` is "
"all. Please input specific points if you would like to "
"do inference.")
_transform_names = dict(dydx='dy/dx',
eyex='d(lny)/d(lnx)',
dyex='dy/d(lnx)',
eydx='d(lny)/dx')
class Margins(object):
"""
Mostly a do nothing class. Lays out the methods expected of a sub-class.
This is just a sketch of what we may want out of a general margins class.
I (SS) need to look at details of other models.
"""
def __init__(self, results, get_margeff, derivative, dist=None,
margeff_args=()):
self._cache = resettable_cache()
self.results = results
self.dist = dist
self.get_margeff(margeff_args)
def _reset(self):
self._cache = resettable_cache()
def get_margeff(self, *args, **kwargs):
self._reset()
self.margeff = self.get_margeff(*args)
@cache_readonly
def tvalues(self):
raise NotImplementedError
@cache_readonly
def cov_margins(self):
raise NotImplementedError
@cache_readonly
def margins_se(self):
raise NotImplementedError
def summary_frame(self):
raise NotImplementedError
@cache_readonly
def pvalues(self):
raise NotImplementedError
def conf_int(self, alpha=.05):
raise NotImplementedError
def summary(self, alpha=.05):
raise NotImplementedError
#class DiscreteMargins(Margins):
class DiscreteMargins(object):
"""Get marginal effects of a Discrete Choice model.
Parameters
----------
results : DiscreteResults instance
The results instance of a fitted discrete choice model
args : tuple
Args are passed to `get_margeff`. This is the same as
results.get_margeff. See there for more information.
kwargs : dict
Keyword args are passed to `get_margeff`. This is the same as
results.get_margeff. See there for more information.
"""
def __init__(self, results, args, kwargs={}):
self._cache = resettable_cache()
self.results = results
self.get_margeff(*args, **kwargs)
def _reset(self):
self._cache = resettable_cache()
@cache_readonly
def tvalues(self):
_check_at_is_all(self.margeff_options)
return self.margeff / self.margeff_se
def summary_frame(self, alpha=.05):
"""
Returns a DataFrame summarizing the marginal effects.
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
frame : DataFrames
A DataFrame summarizing the marginal effects.
"""
_check_at_is_all(self.margeff_options)
from pandas import DataFrame
names = [_transform_names[self.margeff_options['method']],
'Std. Err.', 'z', 'Pr(>|z|)',
'Conf. Int. Low', 'Cont. Int. Hi.']
ind = self.results.model.exog.var(0) != 0 # True if not a constant
exog_names = self.results.model.exog_names
var_names = [name for i,name in enumerate(exog_names) if ind[i]]
table = np.column_stack((self.margeff, self.margeff_se, self.tvalues,
self.pvalues, self.conf_int(alpha)))
return DataFrame(table, columns=names, index=var_names)
@cache_readonly
def pvalues(self):
_check_at_is_all(self.margeff_options)
return norm.sf(np.abs(self.tvalues)) * 2
def conf_int(self, alpha=.05):
"""
Returns the confidence intervals of the marginal effects
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
conf_int : ndarray
An array with lower, upper confidence intervals for the marginal
effects.
"""
_check_at_is_all(self.margeff_options)
me_se = self.margeff_se
q = norm.ppf(1 - alpha / 2)
lower = self.margeff - q * me_se
upper = self.margeff + q * me_se
return np.asarray(lzip(lower, upper))
def summary(self, alpha=.05):
"""
Returns a summary table for marginal effects
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
Summary : SummaryTable
A SummaryTable instance
"""
_check_at_is_all(self.margeff_options)
results = self.results
model = results.model
title = model.__class__.__name__ + " Marginal Effects"
method = self.margeff_options['method']
top_left = [('Dep. Variable:', [model.endog_names]),
('Method:', [method]),
('At:', [self.margeff_options['at']]),]
from statsmodels.iolib.summary import (Summary, summary_params,
table_extend)
exog_names = model.exog_names[:] # copy
smry = Summary()
# sigh, we really need to hold on to this in _data...
_, const_idx = _get_const_index(model.exog)
if const_idx is not None:
exog_names.pop(const_idx)
J = int(getattr(model, "J", 1))
if J > 1:
yname, yname_list = results._get_endog_name(model.endog_names,
None, all=True)
else:
yname = model.endog_names
yname_list = [yname]
smry.add_table_2cols(self, gleft=top_left, gright=[],
yname=yname, xname=exog_names, title=title)
#NOTE: add_table_params is not general enough yet for margeff
# could use a refactor with getattr instead of hard-coded params
# tvalues etc.
table = []
conf_int = self.conf_int(alpha)
margeff = self.margeff
margeff_se = self.margeff_se
tvalues = self.tvalues
pvalues = self.pvalues
if J > 1:
for eq in range(J):
restup = (results, margeff[:,eq], margeff_se[:,eq],
tvalues[:,eq], pvalues[:,eq], conf_int[:,:,eq])
tble = summary_params(restup, yname=yname_list[eq],
xname=exog_names, alpha=alpha, use_t=False,
skip_header=True)
tble.title = yname_list[eq]
# overwrite coef with method name
header = ['', _transform_names[method], 'std err', 'z',
'P>|z|', '[%3.1f%% Conf. Int.]' % (100-alpha*100)]
tble.insert_header_row(0, header)
#from IPython.core.debugger import Pdb; Pdb().set_trace()
table.append(tble)
table = table_extend(table, keep_headers=True)
else:
restup = (results, margeff, margeff_se, tvalues, pvalues, conf_int)
table = summary_params(restup, yname=yname, xname=exog_names,
alpha=alpha, use_t=False, skip_header=True)
header = ['', _transform_names[method], 'std err', 'z',
'P>|z|', '[%3.1f%% Conf. Int.]' % (100-alpha*100)]
table.insert_header_row(0, header)
smry.tables.append(table)
return smry
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Parameters
----------
at : str, optional
Options are:
- 'overall', The average of the marginal effects at each
observation.
- 'mean', The marginal effects at the mean of each regressor.
- 'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation. If `at` is all
only margeff will be available.
Note that if `exog` is specified, then marginal effects for all
variables not specified by `exog` are calculated using the `at`
option.
method : str, optional
Options are:
- 'dydx' - dy/dx - No transformation is made and marginal effects
are returned. This is the default.
- 'eyex' - estimate elasticities of variables in `exog` --
d(lny)/d(lnx)
- 'dyex' - estimate semielasticity -- dy/d(lnx)
- 'eydx' - estimate semeilasticity -- d(lny)/dx
Note that tranformations are done after each observation is
calculated. Semi-elasticities for binary variables are computed
using the midpoint method. 'dyex' and 'eyex' do not make sense
for discrete variables.
atexog : array-like, optional
Optionally, you can provide the exogenous variables over which to
get the marginal effects. This should be a dictionary with the key
as the zero-indexed column number and the value of the dictionary.
Default is None for all independent variables less the constant.
dummy : bool, optional
If False, treats binary variables (if present) as continuous. This
is the default. Else if True, treats binary variables as
changing from 0 to 1. Note that any variable that is either 0 or 1
is treated as binary. Each binary variable is treated separately
for now.
count : bool, optional
If False, treats count variables (if present) as continuous. This
is the default. Else if True, the marginal effect is the
change in probabilities when each observation is increased by one.
Returns
-------
effects : ndarray
the marginal effect corresponding to the input options
Notes
-----
When using after Poisson, returns the expected number of events
per period, assuming that the model is loglinear.
"""
self._reset() # always reset the cache when this is called
#TODO: if at is not all or overall, we can also put atexog values
# in summary table head
method = method.lower()
at = at.lower()
_check_margeff_args(at, method)
self.margeff_options = dict(method=method, at=at)
results = self.results
model = results.model
params = results.params
exog = model.exog.copy() # copy because values are changed
effects_idx, const_idx = _get_const_index(exog)
if dummy:
_check_discrete_args(at, method)
dummy_idx, dummy = _get_dummy_index(exog, const_idx)
else:
dummy_idx = None
if count:
_check_discrete_args(at, method)
count_idx, count = _get_count_index(exog, const_idx)
else:
count_idx = None
# get the exogenous variables
exog = _get_margeff_exog(exog, at, atexog, effects_idx)
# get base marginal effects, handled by sub-classes
effects = model._derivative_exog(params, exog, method,
dummy_idx, count_idx)
J = getattr(model, 'J', 1)
effects_idx = np.tile(effects_idx, J) # adjust for multi-equation.
effects = _effects_at(effects, at)
if at == 'all':
if J > 1:
K = model.K - np.any(~effects_idx) # subtract constant
self.margeff = effects[:, effects_idx].reshape(-1, K, J,
order='F')
else:
self.margeff = effects[:, effects_idx]
else:
# Set standard error of the marginal effects by Delta method.
margeff_cov, margeff_se = margeff_cov_with_se(model, params, exog,
results.cov_params(), at,
model._derivative_exog,
dummy_idx, count_idx,
method, J)
# reshape for multi-equation
if J > 1:
K = model.K - np.any(~effects_idx) # subtract constant
self.margeff = effects[effects_idx].reshape(K, J, order='F')
self.margeff_se = margeff_se[effects_idx].reshape(K, J,
order='F')
self.margeff_cov = margeff_cov[effects_idx][:, effects_idx]
else:
# don't care about at constant
self.margeff_cov = margeff_cov[effects_idx][:, effects_idx]
self.margeff_se = margeff_se[effects_idx]
self.margeff = effects[effects_idx]
| bsd-3-clause |
BiaDarkia/scikit-learn | examples/applications/plot_out_of_core_classification.py | 19 | 13684 | """
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <eustache@diemert.fr>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves.urllib.request import urlretrieve
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
# --------------------------------
#
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
sys.stdout.write(
'\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb))
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
sys.stdout.write('\r')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
# ----
#
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
alternate_sign=False)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(max_iter=5),
'Perceptron': Perceptron(tol=1e-3),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(tol=1e-3),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batches of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
# ------------
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = ['b', 'g', 'r', 'c', 'm', 'y']
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
| bsd-3-clause |
Srisai85/scikit-learn | sklearn/datasets/base.py | 196 | 18554 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# 2010 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import os
import csv
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __getstate__(self):
return self.__dict__
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float)
target[i] = np.asarray(d[-1], dtype=np.float)
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
| bsd-3-clause |