repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
boland1992/seissuite_iran | build/lib.linux-x86_64-2.7/seissuite/azimuth/heatinterpolate.py | 8 | 3647 | #!/usr/bin/env python
# combining density estimation and delaunay interpolation for confidence-weighted value mapping
# Dan Stowell, April 2013
import numpy as np
from numpy import random
from math import exp, log
from scipy import stats, mgrid, c_, reshape, rot90
import matplotlib.delaunay
import matplotlib.tri as tri
import matplotlib.delaunay.interpolate
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from colorsys import hls_to_rgb
#############################
# user settings
n = 100
gridsize = 100
fontsize = 'xx-small'
#############################
# first generate some random [x,y,z] data -- random locations but closest to the middle, and random z-values
data = random.randn(3, n) * 100.
# we will add some correlation to the z-values
data[2,:] += data[1,:]
data[2,:] += data[0,:]
# scale the z-values to 0--1 for convenience
zmin = np.min(data[2,:])
zmax = np.max(data[2,:])
data[2,:] = (data[2,:] - zmin) / (zmax - zmin)
xmin = np.min(data[0,:])
xmax = np.max(data[0,:])
ymin = np.min(data[1,:])
ymax = np.max(data[1,:])
zmin = np.min(data[2,:])
zmax = np.max(data[2,:])
##################################################
# plot it simply
plt.figure()
fig = plt.subplot(2,2,1)
for datum in data.T:
plt.plot(datum[0], datum[1], 'x', color=str(1.0 - datum[2]))
plt.title("scatter", fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
##################################################
# now make a KDE of it and plot that
fig = plt.subplot(2,2,2)
kdeX, kdeY = mgrid[xmin:xmax:gridsize*1j, ymin:ymax:gridsize*1j]
positions = c_[kdeX.ravel(), kdeY.ravel()]
values = c_[data[0,:], data[1,:]]
kernel = stats.kde.gaussian_kde(values.T)
kdeZ = reshape(kernel(positions.T).T, kdeX.T.shape)
plt.imshow(rot90(kdeZ), cmap=cm.binary, aspect='auto')
plt.title("density of points", fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
##################################################
# now make a delaunay triangulation of it and plot that
fig = plt.subplot(2,2,3)
tt = matplotlib.delaunay.triangulate.Triangulation(data[0,:], data[1,:])
#triang = tri.Triangulation(data[0,:], data[1,:])
#plt.triplot(triang, 'bo-') # this plots the actual triangles of the triangulation. I'm more interested in their interpolated values
#extrap = tt.linear_extrapolator(data[2,:])
extrap = tt.nn_extrapolator(data[2,:])
interped = extrap[xmin:xmax:gridsize*1j, ymin:ymax:gridsize*1j]
plt.imshow(rot90(interped), cmap=cm.gist_earth_r, aspect='auto')
plt.title("interpolated values", fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
##################################################
# now combine delaunay with KDE
fig = plt.subplot(2,2,4)
colours = np.zeros((gridsize, gridsize, 4))
kdeZmin = np.min(kdeZ)
kdeZmax = np.max(kdeZ)
confdepth = 0.45
for x in range(gridsize):
for y in range(gridsize):
conf = (kdeZ[x,y] - kdeZmin) / (kdeZmax - kdeZmin)
val = min(1., max(0., interped[x,y]))
colour = list(cm.gist_earth_r(val))
# now fade it out to white according to conf
for index in [0,1,2]:
colour[index] = (colour[index] * conf) + (1.0 * (1. -conf))
colours[x,y,:] = colour
#colours[x,y,:] = np.hstack((hls_to_rgb(val, 0.5 + confdepth - (confdepth * conf), 1.0), 1.0))
#colours[x,y,:] = [conf, conf, 1.0-conf, val]
plt.imshow(rot90(colours), cmap=cm.gist_earth_r, aspect='auto')
plt.title("interpolated & confidence-shaded", fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
############################################
plt.savefig("output/plot_heati_simple.pdf", papertype='A4', format='pdf')
| gpl-3.0 |
palandatarxcom/sklearn_tutorial_cn | notebooks/fig_code/svm_gui.py | 47 | 11549 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('key_press_event', self.onkeypress)
canvas.mpl_connect('key_release_event', self.onkeyrelease)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.shift_down = False
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onkeypress(self, event):
if event.key == "shift":
self.shift_down = True
def onkeyrelease(self, event):
if event.key == "shift":
self.shift_down = False
def onclick(self, event):
if event.xdata and event.ydata:
if self.shift_down or event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
elif event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
samidabeast/VideoGeni | full_stuffs.py | 1 | 2835 | import sys
import scipy as sp
import scipy.misc as pil
import matplotlib.pyplot as plot
filepath = sys.argv[1]
im = pil.imread(filepath)
RMAX = 255
RMIN = 140
GMAX = 140
GMIN = 30
#green*3 >= red
BMAX = 80
BMIN = 0
def query(mx, mn):
return (mn, mx-mn)
crange = [query(RMAX,RMIN), query(GMAX,GMIN), query(BMAX,BMIN)]
#determine size of each block
#should this be based on size of image?
d = 25
def block(box):
"""Analyze block with center at (x,y)"""
avg = []
for i in range(3):
avg.append(sp.average(box[...,i])) #sp.around , decimals=0
#right now this is truncating, might want to think about edge cases more
return sp.array(avg, dtype='uint8') #return avg, but as an array
def inColorRange(c):
"""checks if average of block is in the "fire range"""
for i in range(3):
if not (crange[i][0] <= c[i] <= crange[i][0]+crange[i][1]):
return False
return True
def isFire(img):
"""Checks for fire: MAIN FUNCTION"""
# Find middle of image (initial block center)
xmax = im.shape[0]
ymax = im.shape[1]
x = xmax//2
y = ymax//2
fedex = sp.array([],dtype='uint8')
c = 0 #count, how many i's are there?
i = 0 #iteration, which i are we on?
while True:
xd = x+d
yd = y+d
#Clipping code (needs refactoring)
if xd > xmax:
if x < xmax:
xd = xmax
else:
return False
if x < 0:
if x+d > 0:
x = 0
else:
return False
if yd > ymax:
if y < ymax:
yd = ymax
else:
return False
if y < 0:
if y+d > 0:
y = 0
else:
return False
box = im[x:xd, y:yd, :]
avg = block(box)
box[:] = avg[None,None,:] #sets box to average color #sp.newaxis
if inColorRange(avg):
plot.imshow(box)
plot.show()
return True
else:
if i==0:
i = c//2 + 1
c+=1
if c%2 == 0:
x = x+d if c%4 == 0 else x-d
else:
y = y+d if c%4 == 3 else y-d
i -= 1
#change x,y to point to next block
#have end case when we've checked whole image?
plot.imshow(im)
plot.show()
if isFire(im):
print("fire colors were detected")
else:
print("no fire colors were detected")
plot.imshow(im)
plot.show()
# numpy.empty_like (b)
#how to make shapes with pixelated data...
#color range needs to be fixed
#slope>1
#instead of returning True, add it to an array (index array)
#read page on numpy advanced indexing
#paging sytem: for example 50 chunks per array
| mit |
aktech/sympy | sympy/plotting/plot_implicit.py | 83 | 14400 | """Implicit plotting module for SymPy
The module implements a data series called ImplicitSeries which is used by
``Plot`` class to plot implicit plots for different backends. The module,
by default, implements plotting using interval arithmetic. It switches to a
fall back algorithm if the expression cannot be plotted using interval arithmetic.
It is also possible to specify to use the fall back algorithm for all plots.
Boolean combinations of expressions cannot be plotted by the fall back
algorithm.
See Also
========
sympy.plotting.plot
References
==========
- Jeffrey Allen Tupper. Reliable Two-Dimensional Graphing Methods for
Mathematical Formulae with Two Free Variables.
- Jeffrey Allen Tupper. Graphing Equations with Generalized Interval
Arithmetic. Master's thesis. University of Toronto, 1996
"""
from __future__ import print_function, division
from .plot import BaseSeries, Plot
from .experimental_lambdify import experimental_lambdify, vectorized_lambdify
from .intervalmath import interval
from sympy.core.relational import (Equality, GreaterThan, LessThan,
Relational, StrictLessThan, StrictGreaterThan)
from sympy import Eq, Tuple, sympify, Symbol, Dummy
from sympy.external import import_module
from sympy.logic.boolalg import BooleanFunction
from sympy.polys.polyutils import _sort_gens
from sympy.utilities.decorator import doctest_depends_on
from sympy.utilities.iterables import flatten
import warnings
class ImplicitSeries(BaseSeries):
""" Representation for Implicit plot """
is_implicit = True
def __init__(self, expr, var_start_end_x, var_start_end_y,
has_equality, use_interval_math, depth, nb_of_points,
line_color):
super(ImplicitSeries, self).__init__()
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.get_points = self.get_raster
self.has_equality = has_equality # If the expression has equality, i.e.
#Eq, Greaterthan, LessThan.
self.nb_of_points = nb_of_points
self.use_interval_math = use_interval_math
self.depth = 4 + depth
self.line_color = line_color
def __str__(self):
return ('Implicit equation: %s for '
'%s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_raster(self):
func = experimental_lambdify((self.var_x, self.var_y), self.expr,
use_interval=True)
xinterval = interval(self.start_x, self.end_x)
yinterval = interval(self.start_y, self.end_y)
try:
temp = func(xinterval, yinterval)
except AttributeError:
if self.use_interval_math:
warnings.warn("Adaptive meshing could not be applied to the"
" expression. Using uniform meshing.")
self.use_interval_math = False
if self.use_interval_math:
return self._get_raster_interval(func)
else:
return self._get_meshes_grid()
def _get_raster_interval(self, func):
""" Uses interval math to adaptively mesh and obtain the plot"""
k = self.depth
interval_list = []
#Create initial 32 divisions
np = import_module('numpy')
xsample = np.linspace(self.start_x, self.end_x, 33)
ysample = np.linspace(self.start_y, self.end_y, 33)
#Add a small jitter so that there are no false positives for equality.
# Ex: y==x becomes True for x interval(1, 2) and y interval(1, 2)
#which will draw a rectangle.
jitterx = (np.random.rand(
len(xsample)) * 2 - 1) * (self.end_x - self.start_x) / 2**20
jittery = (np.random.rand(
len(ysample)) * 2 - 1) * (self.end_y - self.start_y) / 2**20
xsample += jitterx
ysample += jittery
xinter = [interval(x1, x2) for x1, x2 in zip(xsample[:-1],
xsample[1:])]
yinter = [interval(y1, y2) for y1, y2 in zip(ysample[:-1],
ysample[1:])]
interval_list = [[x, y] for x in xinter for y in yinter]
plot_list = []
#recursive call refinepixels which subdivides the intervals which are
#neither True nor False according to the expression.
def refine_pixels(interval_list):
""" Evaluates the intervals and subdivides the interval if the
expression is partially satisfied."""
temp_interval_list = []
plot_list = []
for intervals in interval_list:
#Convert the array indices to x and y values
intervalx = intervals[0]
intervaly = intervals[1]
func_eval = func(intervalx, intervaly)
#The expression is valid in the interval. Change the contour
#array values to 1.
if func_eval[1] is False or func_eval[0] is False:
pass
elif func_eval == (True, True):
plot_list.append([intervalx, intervaly])
elif func_eval[1] is None or func_eval[0] is None:
#Subdivide
avgx = intervalx.mid
avgy = intervaly.mid
a = interval(intervalx.start, avgx)
b = interval(avgx, intervalx.end)
c = interval(intervaly.start, avgy)
d = interval(avgy, intervaly.end)
temp_interval_list.append([a, c])
temp_interval_list.append([a, d])
temp_interval_list.append([b, c])
temp_interval_list.append([b, d])
return temp_interval_list, plot_list
while k >= 0 and len(interval_list):
interval_list, plot_list_temp = refine_pixels(interval_list)
plot_list.extend(plot_list_temp)
k = k - 1
#Check whether the expression represents an equality
#If it represents an equality, then none of the intervals
#would have satisfied the expression due to floating point
#differences. Add all the undecided values to the plot.
if self.has_equality:
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
func_eval = func(intervalx, intervaly)
if func_eval[1] and func_eval[0] is not False:
plot_list.append([intervalx, intervaly])
return plot_list, 'fill'
def _get_meshes_grid(self):
"""Generates the mesh for generating a contour.
In the case of equality, ``contour`` function of matplotlib can
be used. In other cases, matplotlib's ``contourf`` is used.
"""
equal = False
if isinstance(self.expr, Equality):
expr = self.expr.lhs - self.expr.rhs
equal = True
elif isinstance(self.expr, (GreaterThan, StrictGreaterThan)):
expr = self.expr.lhs - self.expr.rhs
elif isinstance(self.expr, (LessThan, StrictLessThan)):
expr = self.expr.rhs - self.expr.lhs
else:
raise NotImplementedError("The expression is not supported for "
"plotting in uniform meshed plot.")
np = import_module('numpy')
xarray = np.linspace(self.start_x, self.end_x, self.nb_of_points)
yarray = np.linspace(self.start_y, self.end_y, self.nb_of_points)
x_grid, y_grid = np.meshgrid(xarray, yarray)
func = vectorized_lambdify((self.var_x, self.var_y), expr)
z_grid = func(x_grid, y_grid)
z_grid[np.ma.where(z_grid < 0)] = -1
z_grid[np.ma.where(z_grid > 0)] = 1
if equal:
return xarray, yarray, z_grid, 'contour'
else:
return xarray, yarray, z_grid, 'contourf'
@doctest_depends_on(modules=('matplotlib',))
def plot_implicit(expr, x_var=None, y_var=None, **kwargs):
"""A plot function to plot implicit equations / inequalities.
Arguments
=========
- ``expr`` : The equation / inequality that is to be plotted.
- ``x_var`` (optional) : symbol to plot on x-axis or tuple giving symbol
and range as ``(symbol, xmin, xmax)``
- ``y_var`` (optional) : symbol to plot on y-axis or tuple giving symbol
and range as ``(symbol, ymin, ymax)``
If neither ``x_var`` nor ``y_var`` are given then the free symbols in the
expression will be assigned in the order they are sorted.
The following keyword arguments can also be used:
- ``adaptive``. Boolean. The default value is set to True. It has to be
set to False if you want to use a mesh grid.
- ``depth`` integer. The depth of recursion for adaptive mesh grid.
Default value is 0. Takes value in the range (0, 4).
- ``points`` integer. The number of points if adaptive mesh grid is not
used. Default value is 200.
- ``title`` string .The title for the plot.
- ``xlabel`` string. The label for the x-axis
- ``ylabel`` string. The label for the y-axis
Aesthetics options:
- ``line_color``: float or string. Specifies the color for the plot.
See ``Plot`` to see how to set color for the plots.
plot_implicit, by default, uses interval arithmetic to plot functions. If
the expression cannot be plotted using interval arithmetic, it defaults to
a generating a contour using a mesh grid of fixed number of points. By
setting adaptive to False, you can force plot_implicit to use the mesh
grid. The mesh grid method can be effective when adaptive plotting using
interval arithmetic, fails to plot with small line width.
Examples
========
Plot expressions:
>>> from sympy import plot_implicit, cos, sin, symbols, Eq, And
>>> x, y = symbols('x y')
Without any ranges for the symbols in the expression
>>> p1 = plot_implicit(Eq(x**2 + y**2, 5))
With the range for the symbols
>>> p2 = plot_implicit(Eq(x**2 + y**2, 3),
... (x, -3, 3), (y, -3, 3))
With depth of recursion as argument.
>>> p3 = plot_implicit(Eq(x**2 + y**2, 5),
... (x, -4, 4), (y, -4, 4), depth = 2)
Using mesh grid and not using adaptive meshing.
>>> p4 = plot_implicit(Eq(x**2 + y**2, 5),
... (x, -5, 5), (y, -2, 2), adaptive=False)
Using mesh grid with number of points as input.
>>> p5 = plot_implicit(Eq(x**2 + y**2, 5),
... (x, -5, 5), (y, -2, 2),
... adaptive=False, points=400)
Plotting regions.
>>> p6 = plot_implicit(y > x**2)
Plotting Using boolean conjunctions.
>>> p7 = plot_implicit(And(y > x, y > -x))
When plotting an expression with a single variable (y - 1, for example),
specify the x or the y variable explicitly:
>>> p8 = plot_implicit(y - 1, y_var=y)
>>> p9 = plot_implicit(x - 1, x_var=x)
"""
has_equality = False # Represents whether the expression contains an Equality,
#GreaterThan or LessThan
def arg_expand(bool_expr):
"""
Recursively expands the arguments of an Boolean Function
"""
for arg in bool_expr.args:
if isinstance(arg, BooleanFunction):
arg_expand(arg)
elif isinstance(arg, Relational):
arg_list.append(arg)
arg_list = []
if isinstance(expr, BooleanFunction):
arg_expand(expr)
#Check whether there is an equality in the expression provided.
if any(isinstance(e, (Equality, GreaterThan, LessThan))
for e in arg_list):
has_equality = True
elif not isinstance(expr, Relational):
expr = Eq(expr, 0)
has_equality = True
elif isinstance(expr, (Equality, GreaterThan, LessThan)):
has_equality = True
xyvar = [i for i in (x_var, y_var) if i is not None]
free_symbols = expr.free_symbols
range_symbols = Tuple(*flatten(xyvar)).free_symbols
undeclared = free_symbols - range_symbols
if len(free_symbols & range_symbols) > 2:
raise NotImplementedError("Implicit plotting is not implemented for "
"more than 2 variables")
#Create default ranges if the range is not provided.
default_range = Tuple(-5, 5)
def _range_tuple(s):
if isinstance(s, Symbol):
return Tuple(s) + default_range
if len(s) == 3:
return Tuple(*s)
raise ValueError('symbol or `(symbol, min, max)` expected but got %s' % s)
if len(xyvar) == 0:
xyvar = list(_sort_gens(free_symbols))
var_start_end_x = _range_tuple(xyvar[0])
x = var_start_end_x[0]
if len(xyvar) != 2:
if x in undeclared or not undeclared:
xyvar.append(Dummy('f(%s)' % x.name))
else:
xyvar.append(undeclared.pop())
var_start_end_y = _range_tuple(xyvar[1])
use_interval = kwargs.pop('adaptive', True)
nb_of_points = kwargs.pop('points', 300)
depth = kwargs.pop('depth', 0)
line_color = kwargs.pop('line_color', "blue")
#Check whether the depth is greater than 4 or less than 0.
if depth > 4:
depth = 4
elif depth < 0:
depth = 0
series_argument = ImplicitSeries(expr, var_start_end_x, var_start_end_y,
has_equality, use_interval, depth,
nb_of_points, line_color)
show = kwargs.pop('show', True)
#set the x and y limits
kwargs['xlim'] = tuple(float(x) for x in var_start_end_x[1:])
kwargs['ylim'] = tuple(float(y) for y in var_start_end_y[1:])
# set the x and y labels
kwargs.setdefault('xlabel', var_start_end_x[0].name)
kwargs.setdefault('ylabel', var_start_end_y[0].name)
p = Plot(series_argument, **kwargs)
if show:
p.show()
return p
| bsd-3-clause |
kbarbary/extinction | docs/conf.py | 1 | 3734 | # -*- coding: utf-8 -*-
#
# build requires sphinx_rtd_theme and numpydoc.
import sys
import os
import sphinx_rtd_theme
import matplotlib.sphinxext.plot_directive
import extinction
# ensure that plot helper is on the path
sys.path.insert(0, os.path.abspath(__file__))
# generate api directory if it doesn't already exist
if not os.path.exists('api'):
os.mkdir('api')
# -- General configuration ------------------------------------------------
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None)}
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'numpydoc',
matplotlib.sphinxext.plot_directive.__name__]
numpydoc_show_class_members = False
autosummary_generate = True
autoclass_content = "class"
autodoc_default_flags = ["members", "no-special-members"]
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'extinction'
copyright = u'2016, Kyle Barbary and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# THe short X.Y version.
version = '.'.join(extinction.__version__.split('.')[0:2])
# The full version, including alpha/beta/rc tags.
release = extinction.__version__
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'obj'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Output file base name for HTML help builder.
htmlhelp_basename = 'extinctiondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'extinction.tex', u'extinction Documentation',
u'Kyle Barbary', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'extinction', u'extinction Documentation',
[u'Kyle Barbary'], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'extinction', u'extinction Documentation',
u'Kyle Barbary', 'extinction', 'One line description of project.',
'Miscellaneous'),
]
| mit |
jayflo/scikit-learn | sklearn/metrics/tests/test_ranking.py | 75 | 40883 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
return_indicator=True,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| bsd-3-clause |
trohit/stackedBarChart | stackedBarChart.py | 1 | 4808 | #!/usr/bin/env python
# a stacked bar plot with errorbars
import numpy as np
import matplotlib.pyplot as plt
from operator import add
from pprint import pprint
#
# LEGEND
# Read cndl as Candle
# Read lbl as Label
#
# In normal config
# cndlsMasterLbl sits parallel to X Axis
# valuesMasterLbl sits parallel to Y Axis
#
# TITLE
# ^
# |
# Y | +-+
# | | |
# | +-+ | |
# | +-+ | | | |
# | | | | | | |
# +--+-+---+-+---+-+------->
# X Axis cndlsMasterLbl
# Legend
# lyrColors -> lyrNames
#
def stackedBarChart(Title, cndlsMasterLbl, valuesMasterLbl, cndlThickness,
minValue, maxValue, cndlCount, cndlLbls, arrVals, lyrNames,
lyrColors):
#draw bar chart
print("cndlCount:"+str(cndlCount))
assert( len(arrVals) == len(lyrColors))
# XXX: Perhaps cndlCount can be derived from cndlLbls
assert( (cndlCount) == len(cndlLbls))
minReading = minValue
# just so that the maximum value doesn't get missed
maxReading = maxValue + 1
# gradual gradient markers to help read the values
stepSize = (maxReading - minReading) /10
ind = np.arange(cndlCount) # the x locations for the groups
emptyArr = [0] * cndlCount
sumArr = emptyArr
containerArr = []
graphBottom = None
p = [] # list of bar properties
plt.subplots_adjust(bottom=0.2) #make room for the legend
for i in xrange(len(arrVals)):
selectedColor = lyrColors[i]
print("Adding color " + selectedColor)
tmpContainer = plt.bar(ind, arrVals[i], cndlThickness, color=selectedColor, bottom=graphBottom)
containerArr.append(tmpContainer)
tmpArr = arrVals[i]
sumArr = [tmpArr[j] + sumArr[j] for j in range(cndlCount)]
graphBottom = sumArr
pprint(graphBottom)
plt.ylabel(valuesMasterLbl)
plt.xlabel(cndlsMasterLbl)
plt.title(Title)
#plt.xticks(ind+cndlThickness/2., cndlLbls)
spacing = ind+cndlThickness/2.
pprint(spacing)
plt.xticks(spacing, cndlLbls)
legArr = []
print(type(legArr))
plt.yticks(np.arange(minReading, maxReading, stepSize))
#plt.legend( (p1[0], p2[0], p3[0], p4[0]), lyrNames)
#TODO: solve the part of passing variable length args to a function
print("len(containerArr):" + str(len(containerArr)))
for zz in range(0, len(containerArr)):
legArr.append(containerArr[zz])
# its called the splat operator
# http://stackoverflow.com/questions/7745952/python-expand-list-to-function-arguments
# http://stackoverflow.com/questions/12720450/unpacking-arguments-only-named-arguments-may-follow-expression
plt.legend(
bbox_to_anchor=(0.5, -0.3),
loc='lower center',
ncol=7,labels=(lyrNames),*legArr)
plt.grid()
plt.show()
if __name__ == "__main__":
Title = 'Visitors by block and day'
cndlsMasterLbl = 'Date'
valuesMasterLbl = 'No. of Visitors'
cndlThickness = 0.30 # the cndlThickness of the bars: can also be len(x) sequence
# Mo, Tu, We, Th, Fr, Sa, Su
blkCntAr = (10, 30, 30, 35, 27, 67, 21)
blkCntBr = (20, 32, 34, 20, 25, 76, 21)
blkCntEl = (30, 34, 34, 20, 25, 76, 21)
blkCntGl = (40, 36, 34, 20, 25, 76, 21)
blkCntHl = (25, 32, 34, 20, 25, 76, 21)
blkCntJg = (25, 32, 34, 20, 25, 76, 21)
blkCntJl = (25, 32, 34, 20, 25, 76, 21)
blkCntZr = (25, 32, 34, 20, 25, 76, 21)
blkCntMB = (25, 32, 34, 20, 25, 76, 21)
blkCntMR = (25, 32, 34, 20, 25, 76, 21)
blkCntMT = (25, 32, 34, 20, 25, 76, 21)
blkCntRs = (25, 32, 34, 20, 25, 76, 21)
blkCntOh = (25, 32, 34, 20, 25, 76, 21)
minValue = 0
maxValue = 1000
# XXX: Round off to nearest bracket ..multiple of 10..50..100..500..1000
maxObsValue = max(map(sum,zip(blkCntAr,blkCntBr,blkCntEl,blkCntGl,blkCntHl,blkCntJg,blkCntJl,blkCntZr,blkCntMB,blkCntMR,blkCntMT,blkCntRs,blkCntOh)))
assert(maxValue > maxObsValue),"Max val " + str(maxValue) + " < max observed val " + str(maxObsValue)
cndlCount = len(blkCntAr) # count of elements in each category
blocks=["Ar","Br","El","Gl","Hl","Jl","Jg","Zr","MB","MR","MT","Rs","Oh"]
lyrNames = blocks
cndlLbls = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
arrVals = blkCntAr,blkCntBr,blkCntEl,blkCntGl,blkCntHl,blkCntJg,blkCntJl,blkCntZr,blkCntMB,blkCntMR,blkCntMT,blkCntRs,blkCntOh
lyrColors = ('r','g','b','c','m','y','k','Brown', 'Crimson','w','BlueViolet', 'DarkOrange','DeepPink')
# days -> cndls -> X
# blocks -> lyrs -> Y
# len(arrVals) = len(lyrColors)
stackedBarChart(Title, cndlsMasterLbl, valuesMasterLbl, cndlThickness, minValue, maxValue, cndlCount, cndlLbls, arrVals, lyrNames, lyrColors)
| mit |
arahuja/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 26 | 1523 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
clf_1 = DecisionTreeRegressor(max_depth=4)
clf_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
clf_1.fit(X, y)
clf_2.fit(X, y)
# Predict
y_1 = clf_1.predict(X)
y_2 = clf_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
lidalei/IR-Project | src/CoAuthor/CoAuthors.py | 1 | 4315 | '''
Created on Oct 15, 2015
@author: Dalei
'''
import json, nltk
import networkx as nx
import matplotlib.pyplot as plt
# This module is used to extract co-authors of articles
## we only need authors information
# articles_authors = {}
#
# with open('../Dataset/metadata.json') as f:
# metadata = json.load(f)
# for year in metadata:
# one_year_articles = metadata[year]
# for article_ID in one_year_articles:
# # convert authors string to list
# articles_authors[article_ID] = one_year_articles[article_ID]['authors'].split(';')
# f.close()
## end get authors information only
## begin get articles_authors
# with open('../../Dataset/Local/articles_authors.json', 'r') as f:
# articles_authors = json.load(f)
# f.close()
## end get articles_authors
## begin get single authors from file
# with open('../../Dataset/Local/single_authors.json', 'r') as f:
# authors = json.load(f)
# f.close()
## end get single authors from file
## begin single authors
# how to get single authors from articles_authors
#
# authors = {}
#
# for article_ID in articles_authors:
# for author in articles_authors[article_ID]:
# if author not in authors:
# authors[author] = 1
# else:
# authors[author] += 1
#
# with open('../Dataset/Local/single_authors.json', 'w') as f:
# json.dump(authors, f)
# f.close()
## end single authors
## begin building graph
# authors_graph = {}
#
# for author in authors:
# authors_graph[author] = {}
#
# for article_ID in articles_authors:
#
# article_authors = articles_authors[article_ID]
#
# # author exists in the article
# if author in article_authors:
# for author_ in article_authors:
# if author_ != author:
# if author_ not in authors_graph[author]:
# authors_graph[author][author_] = 1
# else:
# authors_graph[author][author_] += 1
# removing less relation
# notably_authors_graph = {}
#
# for author in authors_graph:
# notably_authors_graph[author] = {}
# for adjacent_author in authors_graph[author]:
# if authors_graph[author][adjacent_author] >= 2:
# notably_authors_graph[author][adjacent_author] = authors_graph[author][adjacent_author]
#
# for author in authors_graph:
# if len(notably_authors_graph[author]) < 1:
# del notably_authors_graph[author]
#
#
# with open('authors_graph.json', 'w') as f:
# json.dump(authors_graph, f)
# f.close()
## end building graph
## begin building a visual graph from the graph file
with open('../../Dataset/Local/authors_graph.json', 'r') as f:
authors_graph = json.load(f)
f.close()
coauthor_network = nx.Graph()
coauthor_network.add_nodes_from(authors_graph.keys())
weighted_edges = [(node, adjacent_node, {'weight':authors_graph[node][adjacent_node]}) for node in authors_graph for adjacent_node in authors_graph[node]]
coauthor_network.add_edges_from(weighted_edges)
nx.write_gml(coauthor_network, 'coauthor_network.graph')
edges = [(node, adjacent_node) for node in authors_graph for adjacent_node in authors_graph[node]]
position = nx.spring_layout(coauthor_network)
nx.draw_networkx_nodes(coauthor_network, position, node_size = 2)
nx.draw_networkx_edges(coauthor_network, position, edge_list = edges, edge_color = 'r')
# plt.show()
plt.savefig('authors_graph.png', dpi = 1200)
## end building a visual graph from the grapg file
# with open('single_authors.json', 'r') as f:
# authors = json.load(f)
# f.close()
#
# authors_support = []
#
# for author_index, author in zip(range(len(authors)), authors):
# authors_support.append([author, 0])
# for article_ID in articles_authors:
# for author_ in articles_authors[article_ID]:
# if author == author_:
# authors_support[author_index][1] += 1
# break
#
# authors_support = sorted(authors_support, reverse = True, key=lambda author: author[1])
#
# with open('authors_support.json', 'w') as f:
# json.dump(authors_support, f)
# f.close()
# associative rule mining
if __name__ == '__main__':
pass | gpl-2.0 |
sunzhxjs/JobGIS | lib/python2.7/site-packages/pandas/computation/ops.py | 9 | 15234 | """Operator classes for eval.
"""
import operator as op
from functools import partial
from datetime import datetime
import numpy as np
import pandas as pd
from pandas.compat import PY3, string_types, text_type
import pandas.core.common as com
from pandas.core.base import StringMixin
from pandas.computation.common import _ensure_decoded, _result_type_many
from pandas.computation.scope import _DEFAULT_GLOBALS
_reductions = 'sum', 'prod'
_unary_math_ops = ('sin', 'cos', 'exp', 'log', 'expm1', 'log1p',
'sqrt', 'sinh', 'cosh', 'tanh', 'arcsin', 'arccos',
'arctan', 'arccosh', 'arcsinh', 'arctanh', 'abs')
_binary_math_ops = ('arctan2',)
_mathops = _unary_math_ops + _binary_math_ops
_LOCAL_TAG = '__pd_eval_local_'
class UndefinedVariableError(NameError):
"""NameError subclass for local variables."""
def __init__(self, name, is_local):
if is_local:
msg = 'local variable {0!r} is not defined'
else:
msg = 'name {0!r} is not defined'
super(UndefinedVariableError, self).__init__(msg.format(name))
class Term(StringMixin):
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, string_types) else cls
supr_new = super(Term, klass).__new__
return supr_new(klass)
def __init__(self, name, env, side=None, encoding=None):
self._name = name
self.env = env
self.side = side
tname = text_type(name)
self.is_local = (tname.startswith(_LOCAL_TAG) or
tname in _DEFAULT_GLOBALS)
self._value = self._resolve_name()
self.encoding = encoding
@property
def local_name(self):
return self.name.replace(_LOCAL_TAG, '')
def __unicode__(self):
return com.pprint_thing(self.name)
def __call__(self, *args, **kwargs):
return self.value
def evaluate(self, *args, **kwargs):
return self
def _resolve_name(self):
res = self.env.resolve(self.local_name, is_local=self.is_local)
self.update(res)
if hasattr(res, 'ndim') and res.ndim > 2:
raise NotImplementedError("N-dimensional objects, where N > 2,"
" are not supported with eval")
return res
def update(self, value):
"""
search order for local (i.e., @variable) variables:
scope, key_variable
[('locals', 'local_name'),
('globals', 'local_name'),
('locals', 'key'),
('globals', 'key')]
"""
key = self.name
# if it's a variable name (otherwise a constant)
if isinstance(key, string_types):
self.env.swapkey(self.local_name, key, new_value=value)
self.value = value
@property
def isscalar(self):
return np.isscalar(self._value)
@property
def type(self):
try:
# potentially very slow for large, mixed dtype frames
return self._value.values.dtype
except AttributeError:
try:
# ndarray
return self._value.dtype
except AttributeError:
# scalar
return type(self._value)
return_type = type
@property
def raw(self):
return com.pprint_thing('{0}(name={1!r}, type={2})'
''.format(self.__class__.__name__, self.name,
self.type))
@property
def is_datetime(self):
try:
t = self.type.type
except AttributeError:
t = self.type
return issubclass(t, (datetime, np.datetime64))
@property
def value(self):
return self._value
@value.setter
def value(self, new_value):
self._value = new_value
@property
def name(self):
return self._name
@name.setter
def name(self, new_name):
self._name = new_name
@property
def ndim(self):
return self._value.ndim
class Constant(Term):
def __init__(self, value, env, side=None, encoding=None):
super(Constant, self).__init__(value, env, side=side,
encoding=encoding)
def _resolve_name(self):
return self._name
@property
def name(self):
return self.value
_bool_op_map = {'not': '~', 'and': '&', 'or': '|'}
class Op(StringMixin):
"""Hold an operator of arbitrary arity
"""
def __init__(self, op, operands, *args, **kwargs):
self.op = _bool_op_map.get(op, op)
self.operands = operands
self.encoding = kwargs.get('encoding', None)
def __iter__(self):
return iter(self.operands)
def __unicode__(self):
"""Print a generic n-ary operator and its operands using infix
notation"""
# recurse over the operands
parened = ('({0})'.format(com.pprint_thing(opr))
for opr in self.operands)
return com.pprint_thing(' {0} '.format(self.op).join(parened))
@property
def return_type(self):
# clobber types to bool if the op is a boolean operator
if self.op in (_cmp_ops_syms + _bool_ops_syms):
return np.bool_
return _result_type_many(*(term.type for term in com.flatten(self)))
@property
def has_invalid_return_type(self):
types = self.operand_types
obj_dtype_set = frozenset([np.dtype('object')])
return self.return_type == object and types - obj_dtype_set
@property
def operand_types(self):
return frozenset(term.type for term in com.flatten(self))
@property
def isscalar(self):
return all(operand.isscalar for operand in self.operands)
@property
def is_datetime(self):
try:
t = self.return_type.type
except AttributeError:
t = self.return_type
return issubclass(t, (datetime, np.datetime64))
def _in(x, y):
"""Compute the vectorized membership of ``x in y`` if possible, otherwise
use Python.
"""
try:
return x.isin(y)
except AttributeError:
if com.is_list_like(x):
try:
return y.isin(x)
except AttributeError:
pass
return x in y
def _not_in(x, y):
"""Compute the vectorized membership of ``x not in y`` if possible,
otherwise use Python.
"""
try:
return ~x.isin(y)
except AttributeError:
if com.is_list_like(x):
try:
return ~y.isin(x)
except AttributeError:
pass
return x not in y
_cmp_ops_syms = '>', '<', '>=', '<=', '==', '!=', 'in', 'not in'
_cmp_ops_funcs = op.gt, op.lt, op.ge, op.le, op.eq, op.ne, _in, _not_in
_cmp_ops_dict = dict(zip(_cmp_ops_syms, _cmp_ops_funcs))
_bool_ops_syms = '&', '|', 'and', 'or'
_bool_ops_funcs = op.and_, op.or_, op.and_, op.or_
_bool_ops_dict = dict(zip(_bool_ops_syms, _bool_ops_funcs))
_arith_ops_syms = '+', '-', '*', '/', '**', '//', '%'
_arith_ops_funcs = (op.add, op.sub, op.mul, op.truediv if PY3 else op.div,
op.pow, op.floordiv, op.mod)
_arith_ops_dict = dict(zip(_arith_ops_syms, _arith_ops_funcs))
_special_case_arith_ops_syms = '**', '//', '%'
_special_case_arith_ops_funcs = op.pow, op.floordiv, op.mod
_special_case_arith_ops_dict = dict(zip(_special_case_arith_ops_syms,
_special_case_arith_ops_funcs))
_binary_ops_dict = {}
for d in (_cmp_ops_dict, _bool_ops_dict, _arith_ops_dict):
_binary_ops_dict.update(d)
def _cast_inplace(terms, dtype):
"""Cast an expression inplace.
Parameters
----------
terms : Op
The expression that should cast.
dtype : str or numpy.dtype
The dtype to cast to.
"""
dt = np.dtype(dtype)
for term in terms:
try:
new_value = term.value.astype(dt)
except AttributeError:
new_value = dt.type(term.value)
term.update(new_value)
def is_term(obj):
return isinstance(obj, Term)
class BinOp(Op):
"""Hold a binary operator and its operands
Parameters
----------
op : str
left : Term or Op
right : Term or Op
"""
def __init__(self, op, lhs, rhs, **kwargs):
super(BinOp, self).__init__(op, (lhs, rhs))
self.lhs = lhs
self.rhs = rhs
self._disallow_scalar_only_bool_ops()
self.convert_values()
try:
self.func = _binary_ops_dict[op]
except KeyError:
# has to be made a list for python3
keys = list(_binary_ops_dict.keys())
raise ValueError('Invalid binary operator {0!r}, valid'
' operators are {1}'.format(op, keys))
def __call__(self, env):
"""Recursively evaluate an expression in Python space.
Parameters
----------
env : Scope
Returns
-------
object
The result of an evaluated expression.
"""
# handle truediv
if self.op == '/' and env.scope['truediv']:
self.func = op.truediv
# recurse over the left/right nodes
left = self.lhs(env)
right = self.rhs(env)
return self.func(left, right)
def evaluate(self, env, engine, parser, term_type, eval_in_python):
"""Evaluate a binary operation *before* being passed to the engine.
Parameters
----------
env : Scope
engine : str
parser : str
term_type : type
eval_in_python : list
Returns
-------
term_type
The "pre-evaluated" expression as an instance of ``term_type``
"""
if engine == 'python':
res = self(env)
else:
# recurse over the left/right nodes
left = self.lhs.evaluate(env, engine=engine, parser=parser,
term_type=term_type,
eval_in_python=eval_in_python)
right = self.rhs.evaluate(env, engine=engine, parser=parser,
term_type=term_type,
eval_in_python=eval_in_python)
# base cases
if self.op in eval_in_python:
res = self.func(left.value, right.value)
else:
res = pd.eval(self, local_dict=env, engine=engine,
parser=parser)
name = env.add_tmp(res)
return term_type(name, env=env)
def convert_values(self):
"""Convert datetimes to a comparable value in an expression.
"""
def stringify(value):
if self.encoding is not None:
encoder = partial(com.pprint_thing_encoded,
encoding=self.encoding)
else:
encoder = com.pprint_thing
return encoder(value)
lhs, rhs = self.lhs, self.rhs
if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.isscalar:
v = rhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = pd.Timestamp(_ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert('UTC')
self.rhs.update(v)
if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.isscalar:
v = lhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = pd.Timestamp(_ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert('UTC')
self.lhs.update(v)
def _disallow_scalar_only_bool_ops(self):
if ((self.lhs.isscalar or self.rhs.isscalar) and
self.op in _bool_ops_dict and
(not (issubclass(self.rhs.return_type, (bool, np.bool_)) and
issubclass(self.lhs.return_type, (bool, np.bool_))))):
raise NotImplementedError("cannot evaluate scalar only bool ops")
def isnumeric(dtype):
return issubclass(np.dtype(dtype).type, np.number)
class Div(BinOp):
"""Div operator to special case casting.
Parameters
----------
lhs, rhs : Term or Op
The Terms or Ops in the ``/`` expression.
truediv : bool
Whether or not to use true division. With Python 3 this happens
regardless of the value of ``truediv``.
"""
def __init__(self, lhs, rhs, truediv, *args, **kwargs):
super(Div, self).__init__('/', lhs, rhs, *args, **kwargs)
if not isnumeric(lhs.return_type) or not isnumeric(rhs.return_type):
raise TypeError("unsupported operand type(s) for {0}:"
" '{1}' and '{2}'".format(self.op,
lhs.return_type,
rhs.return_type))
if truediv or PY3:
_cast_inplace(com.flatten(self), np.float_)
_unary_ops_syms = '+', '-', '~', 'not'
_unary_ops_funcs = op.pos, op.neg, op.invert, op.invert
_unary_ops_dict = dict(zip(_unary_ops_syms, _unary_ops_funcs))
class UnaryOp(Op):
"""Hold a unary operator and its operands
Parameters
----------
op : str
The token used to represent the operator.
operand : Term or Op
The Term or Op operand to the operator.
Raises
------
ValueError
* If no function associated with the passed operator token is found.
"""
def __init__(self, op, operand):
super(UnaryOp, self).__init__(op, (operand,))
self.operand = operand
try:
self.func = _unary_ops_dict[op]
except KeyError:
raise ValueError('Invalid unary operator {0!r}, valid operators '
'are {1}'.format(op, _unary_ops_syms))
def __call__(self, env):
operand = self.operand(env)
return self.func(operand)
def __unicode__(self):
return com.pprint_thing('{0}({1})'.format(self.op, self.operand))
@property
def return_type(self):
operand = self.operand
if operand.return_type == np.dtype('bool'):
return np.dtype('bool')
if (isinstance(operand, Op) and
(operand.op in _cmp_ops_dict or operand.op in _bool_ops_dict)):
return np.dtype('bool')
return np.dtype('int')
class MathCall(Op):
def __init__(self, func, args):
super(MathCall, self).__init__(func.name, args)
self.func = func
def __call__(self, env):
operands = [op(env) for op in self.operands]
return self.func.func(*operands)
def __unicode__(self):
operands = map(str, self.operands)
return com.pprint_thing('{0}({1})'.format(self.op, ','.join(operands)))
class FuncNode(object):
def __init__(self, name):
if name not in _mathops:
raise ValueError("\"{0}\" is not a supported function".format(name))
self.name = name
self.func = getattr(np, name)
def __call__(self, *args):
return MathCall(self, args)
| mit |
sumspr/scikit-learn | examples/calibration/plot_compare_calibration.py | 241 | 5008 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilties to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilties closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
aktech/sympy | sympy/physics/quantum/tests/test_circuitplot.py | 93 | 2065 | from sympy.physics.quantum.circuitplot import labeller, render_label, Mz, CreateOneQubitGate,\
CreateCGate
from sympy.physics.quantum.gate import CNOT, H, SWAP, CGate, S, T
from sympy.external import import_module
from sympy.utilities.pytest import skip
mpl = import_module('matplotlib')
def test_render_label():
assert render_label('q0') == r'$|q0\rangle$'
assert render_label('q0', {'q0': '0'}) == r'$|q0\rangle=|0\rangle$'
def test_Mz():
assert str(Mz(0)) == 'Mz(0)'
def test_create1():
Qgate = CreateOneQubitGate('Q')
assert str(Qgate(0)) == 'Q(0)'
def test_createc():
Qgate = CreateCGate('Q')
assert str(Qgate([1],0)) == 'C((1),Q(0))'
def test_labeller():
"""Test the labeller utility"""
assert labeller(2) == ['q_1', 'q_0']
assert labeller(3,'j') == ['j_2', 'j_1', 'j_0']
def test_cnot():
"""Test a simple cnot circuit. Right now this only makes sure the code doesn't
raise an exception, and some simple properties
"""
if not mpl:
skip("matplotlib not installed")
else:
from sympy.physics.quantum.circuitplot import CircuitPlot
c = CircuitPlot(CNOT(1,0),2,labels=labeller(2))
assert c.ngates == 2
assert c.nqubits == 2
assert c.labels == ['q_1', 'q_0']
c = CircuitPlot(CNOT(1,0),2)
assert c.ngates == 2
assert c.nqubits == 2
assert c.labels == []
def test_ex1():
if not mpl:
skip("matplotlib not installed")
else:
from sympy.physics.quantum.circuitplot import CircuitPlot
c = CircuitPlot(CNOT(1,0)*H(1),2,labels=labeller(2))
assert c.ngates == 2
assert c.nqubits == 2
assert c.labels == ['q_1', 'q_0']
def test_ex4():
if not mpl:
skip("matplotlib not installed")
else:
from sympy.physics.quantum.circuitplot import CircuitPlot
c = CircuitPlot(SWAP(0,2)*H(0)* CGate((0,),S(1)) *H(1)*CGate((0,),T(2))\
*CGate((1,),S(2))*H(2),3,labels=labeller(3,'j'))
assert c.ngates == 7
assert c.nqubits == 3
assert c.labels == ['j_2', 'j_1', 'j_0']
| bsd-3-clause |
keflavich/mpld3 | examples/linked_brush.py | 21 | 1136 | """
Linked Brushing Example
=======================
This example uses the standard Iris dataset and plots it with a linked brushing
tool for dynamically exploring the data. The paintbrush button at the bottom
left can be used to enable and disable the behavior.
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
import mpld3
from mpld3 import plugins, utils
data = load_iris()
X = data.data
y = data.target
# dither the data for clearer plotting
X += 0.1 * np.random.random(X.shape)
fig, ax = plt.subplots(4, 4, sharex="col", sharey="row", figsize=(8, 8))
fig.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95,
hspace=0.1, wspace=0.1)
for i in range(4):
for j in range(4):
points = ax[3 - i, j].scatter(X[:, j], X[:, i],
c=y, s=40, alpha=0.6)
# remove tick labels
for axi in ax.flat:
for axis in [axi.xaxis, axi.yaxis]:
axis.set_major_formatter(plt.NullFormatter())
# Here we connect the linked brush plugin
plugins.connect(fig, plugins.LinkedBrush(points))
mpld3.show()
| bsd-3-clause |
moonbury/pythonanywhere | github/MasteringMatplotlib/eros.py | 2 | 2549 | """
Sample Python library for working with the Landsat data from EROS/USGS.
"""
import glob
import os.path
import typecheck
import numpy as np
import matplotlib.pyplot as plt
from skimage import io, exposure
import skimage as ski
BAND_COASTAL_AEROSOL = 1
BAND_BLUE = 2
BAND_GREEN = 3
BAND_RED = 4
BAND_NEAR_IR = 5
BAND_SW_IR_1 = 6
BAND_SW_IR_2 = 7
BAND_PANCHROM = 8
BAND_CIRRUS = 9
BAND_LW_IR_1 = 10
BAND_LW_IR_2 = 11
def inclusive(min, max):
return lambda x: x in range(min, max + 1)
@typecheck.typecheck
def read_band(path, scene_id, n: inclusive(1, 11)):
"""Load Landsat 8 band
Input: path - full path to the scene data directory
scene_id - Landsat scene ID
n - integer in the range 1-11
Output: img - 2D array of uint16 type"""
ext = ".TIF"
band_name = "_B" + str(n) + ext
if path.startswith("http"):
filename = os.path.join(path, scene_id + band_name)
else:
filename = os.path.join(path, scene_id, scene_id + band_name)
return ski.io.imread(filename)
def extract_rgb(path, scene_id):
red = read_band(path, scene_id, BAND_RED)
green = read_band(path, scene_id, BAND_GREEN)
blue = read_band(path, scene_id, BAND_BLUE)
return np.dstack((red, green, blue))
def extract_swir2nirg(path, scene_id):
red = read_band(path, scene_id, BAND_SW_IR_2)
green = read_band(path, scene_id, BAND_NEAR_IR)
blue = read_band(path, scene_id, BAND_COASTAL_AEROSOL)
return np.dstack((red, green, blue))
def show_image(img, title="", filename="", **kwargs):
"""Show image
Input: img - 3D array of uint16 type
title - string"""
fig = plt.figure(**kwargs)
fig.set_facecolor('white')
plt.imshow(img / 65535)
plt.title(title)
if filename:
plt.savefig(filename)
else:
plt.show()
def show_color_hist(rgb_image, xlim=None, ylim=None, **kwargs):
(fig, axes) = plt.subplots(**kwargs)
fig.set_facecolor('white')
for color, channel in zip('rgb', np.rollaxis(rgb_image, axis=-1)):
counts, centers = ski.exposure.histogram(channel)
plt.plot(centers[1::], counts[1::], color=color)
if xlim:
axes.set_xlim(xlim)
if ylim:
axes.set_ylim(ylim)
plt.show()
def update_image(image, r_limits, g_limits, b_limits):
image_he = np.empty(image.shape, dtype='uint16')
for channel, lim in enumerate([r_limits, g_limits, b_limits]):
image_he[:, :, channel] = ski.exposure.rescale_intensity(
image[:, :, channel], lim)
return image_he | gpl-3.0 |
prheenan/Research | Perkins/AnalysisUtil/ForceExtensionAnalysis/FEC_Plot.py | 1 | 9081 | # force floating point division. Can still use integer with //
from __future__ import division
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.pyplot as plt
import sys
from Research.Perkins.AnalysisUtil.ForceExtensionAnalysis import FEC_Util
import GeneralUtil.python.PlotUtilities as PlotUtilities
from GeneralUtil.python.IgorUtil import SavitskyFilter
import copy
def_conversion_opts =dict(ConvertX = lambda x: x*1e9,
ConvertY = lambda y: y*1e12)
def _fec_base_plot(x,y,n_filter_points=None,label="",
style_data=dict(color='k',alpha=0.3),
style_filtered=None):
"""
base function; plots x and y (and their filtered versions)
Args:
x/y: the x and y to use for plotting
n_filter_points: how many points for the savitsky golay
style_<data/filtered>: plt.plot options for the raw and filtered data.
defaults to filtered just being alpha=1 (not transparent)
Returns:
x and y, filtered versions
"""
if (style_filtered is None):
style_filtered = dict(**style_data)
style_filtered['alpha'] = 1
style_filtered['label'] = label
if (n_filter_points is None):
n_filter_points = int(np.ceil(x.size * FEC_Util.default_filter_pct))
x_filtered = SavitskyFilter(x,nSmooth=n_filter_points)
y_filtered = SavitskyFilter(y,nSmooth=n_filter_points)
plt.plot(x,y,**style_data)
plt.plot(x_filtered,y_filtered,**style_filtered)
return x_filtered,y_filtered
def _ApproachRetractCurve(Appr,Retr,NFilterPoints=100,
x_func = lambda x: x.Separation,
y_func = lambda y: y.Force,
ApproachLabel="Approach",
RetractLabel="Retract"):
"""
Most of the brains for the approach/retract curve. does *not* show anything
Args:
TimeSepForceObject: what we are plotting
NFilterPoints: how many points to filter down
ApproachLabel: label to put on the approach
RetractLabel: label to put on the retract
"""
# plot the separation and force, with their filtered counterparts
_fec_base_plot(x_func(Appr),y_func(Appr),n_filter_points=NFilterPoints,
style_data=dict(color='r',alpha=0.3),label=ApproachLabel)
_fec_base_plot(x_func(Retr),y_func(Retr),n_filter_points=NFilterPoints,
style_data=dict(color='b',alpha=0.3),label=RetractLabel)
def FEC_AlreadySplit(Appr,Retr,
XLabel = "Separation (nm)",
YLabel = "Force (pN)",
ConversionOpts=def_conversion_opts,
PlotLabelOpts=dict(),
PreProcess=False,
NFilterPoints=50,
LegendOpts=dict(loc='best'),
**kwargs):
"""
Args:
XLabel: label for x axis
YLabel: label for y axis
ConversionOpts: see FEC_Util.SplitAndProcess
PlotLabelOpts: see arguments after filtering of ApproachRetractCurve
PreProcess: if true, pre-processes the approach and retract separately
(ie: to zero and flip the y axis).
NFilterPoints: see FEC_Util.SplitAndProcess, for Savitsky-golay
PreProcess: passed to
"""
ApprCopy = FEC_Util.UnitConvert(Appr,**ConversionOpts)
RetrCopy = FEC_Util.UnitConvert(Retr,**ConversionOpts)
if (PreProcess):
ApprCopy,RetrCopy = FEC_Util.PreProcessApproachAndRetract(ApprCopy,
RetrCopy,
**kwargs)
_ApproachRetractCurve(ApprCopy,RetrCopy,
NFilterPoints=NFilterPoints,**PlotLabelOpts)
PlotUtilities.lazyLabel(XLabel,YLabel,"")
PlotUtilities.legend(**LegendOpts)
def z_sensor_vs_time(time_sep_force,**kwargs):
"""
plots z sensor versus time. See force_versus_time
"""
plot_labels = dict(x_func=lambda x : x.Time,
y_func=lambda x : x.ZSnsr)
FEC(time_sep_force,
PlotLabelOpts=plot_labels,
XLabel="Time (s)",
YLabel="ZSnsr (nm)",**kwargs)
def force_versus_time(time_sep_force,**kwargs):
"""
Plots force versus time
Args:
**kwargs: see FEC
"""
plot_labels = dict(x_func=lambda x : x.Time,
y_func=lambda x: x.Force)
FEC(time_sep_force,
PlotLabelOpts=plot_labels,
XLabel="Time (s)",
YLabel="Force (pN)",**kwargs)
def FEC(TimeSepForceObj,NFilterPoints=50,
PreProcessDict=dict(),
**kwargs):
"""
Plots a force extension curve. Splits the curve into approach and
Retract and pre-processes by default
Args:
TimeSepForceObj: 'Raw' TimeSepForce Object
PreProcessDict: passed directly to FEC_Util.PreProcessFEC
**kwargs: passed directly to FEC_Plot.FEC_AlreadySplit
"""
Appr,Retr= FEC_Util.PreProcessFEC(TimeSepForceObj,
NFilterPoints=NFilterPoints,
**PreProcessDict)
# plot the approach and retract with the appropriate units
FEC_AlreadySplit(Appr,Retr,NFilterPoints=NFilterPoints,**kwargs)
def heat_map_fec(time_sep_force_objects,num_bins=(100,100),
separation_max = None,n_filter_func=None,use_colorbar=True,
ConversionOpts=def_conversion_opts,cmap='afmhot'):
"""
Plots a force extension curve. Splits the curve into approach and
Retract and pre-processes by default
Args:
time_sep_force_objects: list of (zeroed, but SI) TimeSepForce Object
num_bins: tuple of <x,y> bins. Passed to hist2d
n_filter_func: if not none, histograms the savitsky-golay *filtered*
versuon of the objects given, with n_filter_func being a function
taking in the TimeSepForce object and returning an integer number of
points
use_colorbar: if true, add a color bar
separation_max: if not None, only histogram up to and including this
separation. should be in units *after* conversion (default: nanometers)
ConversionOpts: passed to UnitConvert. Default converts x to nano<X>
and y to pico<Y>
"""
# convert everything...
objs = [FEC_Util.UnitConvert(r,**ConversionOpts)
for r in time_sep_force_objects]
if n_filter_func is not None:
objs = [FEC_Util.GetFilteredForce(o,n_filter_func(o))
for o in objs]
filtered_data = [(retr.Separation,retr.Force) for retr in objs]
separations = np.concatenate([r[0] for r in filtered_data])
forces = np.concatenate([r[1] for r in filtered_data])
if (separation_max is not None):
idx_use = np.where(separations < separation_max)
else:
# use everything
idx_use = slice(0,None,1)
separations = separations[idx_use]
forces = forces[idx_use]
# make a heat map, essentially
counts, xedges, yedges, Image = plt.hist2d(separations, forces,
bins=num_bins,cmap=cmap)
PlotUtilities.lazyLabel("Separation (nm)",
"Force (pN)",
"Force-Extension Heatmap")
if (use_colorbar):
cbar = plt.colorbar()
label = '# of points in (Force,Separation) Bin'
cbar.set_label(label,labelpad=10,rotation=270)
def _n_rows_and_cols(processed,n_cols=3):
n_rows = int(np.ceil(len(processed)/n_cols))
return n_rows,n_cols
def gallery_fec(processed,xlim_nm,ylim_pN,NFilterPoints=100,n_cols=3,
x_label="Separation (nm)",y_label="Force (pN)",
approach_label="Approach",
retract_label="Retract"):
n_rows,n_cols = _n_rows_and_cols(processed,n_cols)
for i,r in enumerate(processed):
plt.subplot(n_rows,n_cols,(i+1))
appr,retr = r
is_labelled = i == 0
is_first = (i % n_cols == 0)
is_bottom = ((i + (n_cols)) >= len(processed))
XLabel = x_label if is_bottom else ""
YLabel = y_label if is_first else ""
ApproachLabel = approach_label if is_labelled else ""
RetractLabel = retract_label if is_labelled else ""
PlotLabelOpts = dict(ApproachLabel=ApproachLabel,
RetractLabel=RetractLabel)
LegendOpts = dict(loc='upper left',frameon=True)
FEC_AlreadySplit(appr,retr,XLabel=XLabel,YLabel=YLabel,
LegendOpts=LegendOpts,
PlotLabelOpts=PlotLabelOpts,
NFilterPoints=NFilterPoints)
plt.xlim(xlim_nm)
plt.ylim(ylim_pN)
ax = plt.gca()
if (not is_bottom):
ax.tick_params(labelbottom='off')
if (not is_first):
ax.tick_params(labelleft='off')
| gpl-3.0 |
rodrigoduranna/frogsounds | plots.py | 1 | 2507 | import glob
import os
import librosa
import numpy as np
import time
import matplotlib.pyplot as plt
import librosa.display
from matplotlib.pyplot import specgram
plt.style.use('ggplot')
#Tamanho dos plots . Caso as imagens estiverem muito grandes ou pequenas, alterar aqui
H_SIZE = 10
V_SIZE = 22
DDPI = 96
#Parametros de fonte dos graficos
plt.rcParams['font.size'] = 12
plt.rcParams['axes.labelsize'] = 11
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['axes.titlesize'] = 14
plt.rcParams['xtick.labelsize'] = 10
plt.rcParams['ytick.labelsize'] = 10
plt.rcParams['legend.fontsize'] = 11
plt.rcParams['figure.titlesize'] = 13
#carrega os arquivos de som
def load_sound_files(file_paths):
raw_sounds = []
for fp in file_paths:
X,sr = librosa.load(fp)
raw_sounds.append(X)
return raw_sounds
#plota o grafico de waveform
def plot_waves(sound_names,raw_sounds):
i = 1
fig = plt.figure(figsize=(H_SIZE,V_SIZE), dpi = DDPI)
for n,f in zip(sound_names,raw_sounds):
plt.subplot(10,1,i)
librosa.display.waveplot(np.array(f),sr=22050)
plt.title(n.title())
i += 1
plt.suptitle('Figura 1: Waveform',x=0.5, y=0.915,fontsize=12)
plt.show()
#plota o grafico de espectograma
def plot_specgram(sound_names,raw_sounds):
i = 1
fig = plt.figure(figsize=(H_SIZE,V_SIZE), dpi = DDPI)
for n,f in zip(sound_names,raw_sounds):
plt.subplot(10,1,i)
specgram(np.array(f), Fs=22050)
plt.title(n.title())
i += 1
plt.suptitle('Figura 2: Espectograma',x=0.5, y=0.915,fontsize=12)
plt.show()
#plota o grafico de power log espectograma
def plot_log_power_specgram(sound_names,raw_sounds):
i = 1
fig = plt.figure(figsize=(H_SIZE,V_SIZE), dpi = DDPI)
for n,f in zip(sound_names,raw_sounds):
plt.subplot(10,1,i)
D = librosa.logamplitude(np.abs(librosa.stft(f))**2, ref_power=np.max)
librosa.display.specshow(D,x_axis='time' ,y_axis='log')
plt.title(n.title())
i += 1
plt.suptitle('Figur4 3: Espectograma Logaritmico',x=0.5, y=0.915,fontsize=12)
plt.show()
#arquivos de som a serem criados os graficos
sound_file_paths = ["Perereca-grande (Hypsiboas raniceps).wav"]
sound_names = ["Perereca-grande (Hypsiboas raniceps)"]
#carrega os arquivos de som
raw_sounds = load_sound_files(sound_file_paths)
#plota os graficos
plot_waves(sound_names,raw_sounds)
plot_specgram(sound_names,raw_sounds)
plot_log_power_specgram(sound_names,raw_sounds) | mit |
carlvlewis/bokeh | bokeh/charts/builder/donut_builder.py | 31 | 8206 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Donut class which lets you build your Donut charts just passing
the arguments to the Chart class and calling the proper functions.
It also add a new chained stacked method.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division
from math import pi
import pandas as pd
from ..utils import cycle_colors, polar_to_cartesian
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, GlyphRenderer, Range1d
from ...models.glyphs import AnnularWedge, Text, Wedge
from ...properties import Any, Bool, Either, List
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Donut(values, cat=None, width=800, height=800, xgrid=False, ygrid=False, **kws):
""" Creates a Donut chart using :class:`DonutBuilder <bokeh.charts.builder.donut_builder.DonutBuilder>`
to render the geometry from values and cat.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
cat (list or bool, optional): list of string representing the categories.
Defaults to None.
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.charts import Donut, output_file, show
# dict, OrderedDict, lists, arrays and DataFrames are valid inputs
xyvalues = [[2., 5., 3.], [4., 1., 4.], [6., 4., 3.]]
donut = Donut(xyvalues, ['cpu1', 'cpu2', 'cpu3'])
output_file('donut.html')
show(donut)
"""
return create_and_build(
DonutBuilder, values, cat=cat, width=width, height=height,
xgrid=xgrid, ygrid=ygrid, **kws
)
class DonutBuilder(Builder):
"""This is the Donut class and it is in charge of plotting
Donut chart in an easy and intuitive way.
Essentially, it provides a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the donut slices and angles.
And finally add the needed glyphs (Wedges and AnnularWedges) taking
the references from the source.
"""
cat = Either(Bool, List(Any), help="""
List of string representing the categories. (Defaults to None.)
""")
def _process_data(self):
"""Take the chart data from self._values.
It calculates the chart properties accordingly (start/end angles).
Then build a dict containing references to all the calculated
points to be used by the Wedge glyph inside the ``_yield_renderers`` method.
"""
dd = dict(zip(self._values.keys(), self._values.values()))
self._df = df = pd.DataFrame(dd)
self._groups = df.index = self.cat
df.columns = self._values.keys()
# Get the sum per category
aggregated = df.T.sum()
# Get the total (sum of all categories)
self._total_units = total = aggregated.sum()
radians = lambda x: 2*pi*(x/total)
angles = aggregated.map(radians).cumsum()
end_angles = angles.tolist()
start_angles = [0] + end_angles[:-1]
colors = cycle_colors(self.cat, self.palette)
self.set_and_get("", "colors", colors)
self.set_and_get("", "end", end_angles)
self.set_and_get("", "start", start_angles)
def _set_sources(self):
"""Push the Donut data into the ColumnDataSource and calculate
the proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = Range1d(start=-2, end=2)
self.y_range = Range1d(start=-2, end=2)
def draw_central_wedge(self):
"""Draw the central part of the donut wedge from donut.source and
its calculated start and end angles.
"""
glyph = Wedge(
x=0, y=0, radius=1, start_angle="start", end_angle="end",
line_color="white", line_width=2, fill_color="colors"
)
yield GlyphRenderer(data_source=self._source, glyph=glyph)
def draw_central_descriptions(self):
"""Draw the descriptions to be placed on the central part of the
donut wedge
"""
text = ["%s" % cat for cat in self.cat]
x, y = polar_to_cartesian(0.7, self._data["start"], self._data["end"])
text_source = ColumnDataSource(dict(text=text, x=x, y=y))
glyph = Text(
x="x", y="y", text="text",
text_align="center", text_baseline="middle"
)
yield GlyphRenderer(data_source=text_source, glyph=glyph)
def draw_external_ring(self, colors=None):
"""Draw the external part of the donut wedge from donut.source
and its related descriptions
"""
if colors is None:
colors = cycle_colors(self.cat, self.palette)
first = True
for i, (cat, start_angle, end_angle) in enumerate(zip(
self.cat, self._data['start'], self._data['end'])):
details = self._df.ix[i]
radians = lambda x: 2*pi*(x/self._total_units)
angles = details.map(radians).cumsum() + start_angle
end = angles.tolist() + [end_angle]
start = [start_angle] + end[:-1]
base_color = colors[i]
#fill = [ base_color.lighten(i*0.05) for i in range(len(details) + 1) ]
fill = [base_color for i in range(len(details) + 1)]
text = [rowlabel for rowlabel in details.index]
x, y = polar_to_cartesian(1.25, start, end)
source = ColumnDataSource(dict(start=start, end=end, fill=fill))
glyph = AnnularWedge(
x=0, y=0, inner_radius=1, outer_radius=1.5,
start_angle="start", end_angle="end",
line_color="white", line_width=2,
fill_color="fill"
)
yield GlyphRenderer(data_source=source, glyph=glyph)
text_angle = [(start[i]+end[i])/2 for i in range(len(start))]
text_angle = [angle + pi if pi/2 < angle < 3*pi/2 else angle
for angle in text_angle]
if first and text:
text.insert(0, '')
offset = pi / 48
text_angle.insert(0, text_angle[0] - offset)
start.insert(0, start[0] - offset)
end.insert(0, end[0] - offset)
x, y = polar_to_cartesian(1.25, start, end)
first = False
data = dict(text=text, x=x, y=y, angle=text_angle)
text_source = ColumnDataSource(data)
glyph = Text(
x="x", y="y", text="text", angle="angle",
text_align="center", text_baseline="middle"
)
yield GlyphRenderer(data_source=text_source, glyph=glyph)
def _yield_renderers(self):
"""Use the AnnularWedge and Wedge glyphs to display the wedges.
Takes reference points from data loaded at the ColumnDataSurce.
"""
# build the central round area of the donut
renderers = []
renderers += self.draw_central_wedge()
# write central descriptions
renderers += self.draw_central_descriptions()
# build external donut ring
renderers += self.draw_external_ring()
return renderers
| bsd-3-clause |
PaulGrimal/peach | tutorial/neural-networks/self-organizing-maps.py | 6 | 2858 | ################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/self-organizing-maps.py
# Extended example on self-organizing maps
################################################################################
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace. We will
# also need the random module:
from numpy import *
import random
import peach as p
# A self-organizing map has the ability to automatically recognize and classify
# patterns. This tutorial shows graphically how this happens. We have a set of
# points in the cartesian plane, each coordinate obtained from a central point
# plus a random (gaussian, average 0, small variance) shift in some direction.
# We use this set to build the network.
# First, we create the training set:
train_size = 300
centers = [ array([ 1.0, 0.0 ], dtype=float),
array([ 1.0, 1.0 ], dtype=float),
array([ 0.0, 1.0 ], dtype=float),
array([-1.0, 1.0 ], dtype=float),
array([-1.0, 0.0 ], dtype=float) ]
xs = [ ]
for i in range(train_size):
x1 = random.gauss(0.0, 0.1)
x2 = random.gauss(0.0, 0.1)
xs.append(centers[i%5] + array([ x1, x2 ], dtype=float))
# Since we are working on the plane, each example and each neuron will have two
# coordinates. We will use five neurons (since we have five centers). The
# self-organizing map is created by the line below. Our goal is to show how the
# weights converge to the mass center of the point clouds, so we initialize the
# weights to show it:
nn = p.SOM((5, 2))
for i in range(5):
nn.weights[i, 0] = 0.3 * cos(i*pi/4)
nn.weights[i, 1] = 0.3 * sin(i*pi/4)
# We use these lists to track the variation of each neuron:
wlog = [ [ nn.weights[0] ],
[ nn.weights[1] ],
[ nn.weights[2] ],
[ nn.weights[3] ],
[ nn.weights[4] ] ]
# Here we feed and update the network. We could use the ``train`` method, but
# we want to track the weights:
for x in xs:
y = nn(x)
nn.learn(x)
wlog[y].append(array(nn.weights[y]))
# If the system has the plot package matplotlib, this tutorial tries to plot
# and save the convergence of synaptic weights and error. The plot is saved in
# the file ``self-organizing-maps.png``.
try:
from matplotlib import *
from matplotlib.pylab import *
figure(1).set_size_inches(8, 4)
a1 = axes([ 0.125, 0.10, 0.775, 0.8 ])
a1.hold(True)
for x in xs:
plot( [x[0]], [x[1]], 'ko')
for w in wlog:
w = array(w[1:])
plot( w[:, 0], w[:, 1], '-x')
savefig("self-organizing-maps.png")
except ImportError:
print "After %d iterations:" % (train_size,)
print nn.weights | lgpl-2.1 |
openconnectome/m2g | MR-OCP/mrcap/lcc.py | 2 | 11171 |
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
Created on Mar 12, 2012
@author: dsussman
'''
import pyximport;
pyximport.install()
import numpy as np
from scipy import sparse as sp
import mrcap.roi as roi
import mrcap.fibergraph as fibergraph
import zindex
from scipy.io import loadmat, savemat
from collections import Counter
#from mayavi import mlab # DM - commented out
import itertools as itt
# from matplotlib import pyplot as plt # DM - commented out
import mrcap.fa as fa
#import mprage # DM - commented out
import argparse
import os
class ConnectedComponent(object):
vertexCC = None
ccsize = None
ncc = 0
n = 0
def __init__(self,G=None, fn=None):
if G is not None:
self.get_from_fiber_graph(G)
elif fn is not None:
self.load_from_file(fn)
def get_from_fiber_graph(self,G):
self.ncc,vertexCC = sp.cs_graph_components(G+G.transpose())
self.n = vertexCC.shape[0]
noniso = np.nonzero(np.not_equal(vertexCC,-2))[0]
cccounter = Counter(vertexCC[noniso])
cc_badLabel,_ = zip(*cccounter.most_common())
cc_dict = dict(zip(cc_badLabel, np.arange(self.ncc)+1))
cc_dict[-2] = 0
self.vertexCC = np.array([cc_dict[v] for v in vertexCC])
self.ccsize = Counter(vertexCC)
def save(self,fn, suffix=True):
if suffix:
np.save(fn+'_concomp.npy',sp.lil_matrix(self.vertexCC))
else:
np.save(fn,sp.lil_matrix(self.vertexCC))
def load_from_file(self,fn):
self.vertexCC = np.load(fn).item().toarray()
self.n = self.vertexCC.shape[1]
self.vertexCC = self.vertexCC.reshape(self.n)
def induced_subgraph(self, G, cc=1):
incc = np.equal(self.vertexCC,cc).nonzero()[0]
return G[:,incc][incc,:]
def __getitem__(self,key):
if type(key) is int:
return self.get_cc(key)
elif type(key) is tuple:
return self.get_coord_cc(key)
def get_cc(self,v):
return self.vertexCC[v]
def get_coord_cc(self,xyz):
return self.get_cc(zindex.XYZMorton(xyz))
def get_3d_cc(self,shape):
"""Takes a shape which is the shape of the new 3d image and 'colors' the image by connected component
Input
=====
shape -- 3-tuple
Output
======
cc3d -- array of with shape=shape. colored so that ccz[x,y,z]=vcc[i] where x,y,z is the XYZ coordinates for Morton index i
"""
cc3d = np.NaN*np.zeros(shape)
allCoord = itt.product(*[xrange(sz) for sz in shape])
[cc3d.itemset((xyz), self.vertexCC[zindex.XYZMorton(xyz)])
for xyz in allCoord if not self.vertexCC[zindex.XYZMorton(xyz)]==0];
return cc3d
def get_coords_for_lccs(self, ncc):
"""Computes coordinates for each voxel in the top ncc connected components"""
inlcc = (np.less_equal(self.vertexCC,ncc)*np.greater(self.vertexCC,0)).nonzero()[0]
coord = np.array([zindex.MortonXYZ(v) for v in inlcc])
return np.concatenate((coord,self.vertexCC[inlcc][np.newaxis].T),axis=1)
def _load_fibergraph(roi_fn, mat_fn):
"""Load fibergraph from roi_fn and mat_fn"""
roix = roi.ROIXML(roi_fn+'.xml')
rois = roi.ROIData(roi_fn+'.raw', roix.getShape())
fg = fibergraph.FiberGraph(roix.getShape(),rois,[])
fg.loadFromMatlab('fibergraph', mat_fn)
return fg
def cc_for_each_brain(graphDir, roiDir, ccDir, figDir):
"""Go through the directory graphDir and find the connected components
Saves the all connected component info in ccDir and saves some 3d-pics into figDir
If figDir is None then it does not save
"""
fiberSfx = '_fiber.mat'
roiSfx = '_roi'
brainFiles = [fn.split('_')[0] for fn in os.listdir(graphDir)]
for brainFn in brainFiles:
print "Processing brain "+brainFn
fg = _load_fibergraph(roiDir+brainFn+roiSfx,graphDir+brainFn+fiberSfx)
print 'Processing connected components'
vcc = ConnectedComponent(fg.graph)
vcc.save(ccDir+brainFn)
print 'ncc='+repr(vcc.ncc)
if figDir:
save_figures(vcc.get_coords_for_lccs(10), figDir+brainFn)
del fg
'''
Created on June 29, 2012
@author: dmhembe1
Determine lcc on a single big graph a provided my a remote user
This is for use in the one-click processing pipeline to be found at http://www.openconnecto.me/STUB
'''
def process_single_brain(graph_fn, lccOutputFileName):
print "Computint LCC for single brain... "
vcc = ConnectedComponent(loadmat(graph_fn)['fibergraph'])
if not os.path.exists(os.path.dirname(lccOutputFileName)):
print "Creating lcc directory %s" % os.path.dirname(lccOutputFileName)
os.makedirs(os.path.dirname(lccOutputFileName))
lcc = sp.lil_matrix(vcc.vertexCC)
np.save(lccOutputFileName, lcc) # save as .npy
return lcc
def get_slice(img3d, s, xyz):
if xyz=='xy':
return img3d[:,:,s]
if xyz=='xz':
return img3d[:,s,::-1].T
if xyz=='yz':
return img3d[s,::-1,::-1].T
print 'Not a valid view'
def show_overlay(img3d, cc3d, ncc=10, s=85, xyz = 'xy',alpha=.8):
"""Shows the connected components overlayed over img3d
Input
======
img3d -- 3d array
cc3d -- 3d array ( preferably of same shape as img3d, use get_3d_cc(...) )
ncc -- where to cut off the color scale
s -- slice to show
xyz -- which projection to use in {'xy','xz','yz'}
"""
cc = get_slice(cc3d,s,xyz)
img = get_slice(img3d,s,xyz)
notcc = np.isnan(cc)
incc = np.not_equal(notcc,True)
img4 = plt.cm.gray(img/np.nanmax(img))
if ncc is not np.Inf:
cc = plt.cm.jet(cc/float(ncc))
else:
cc = plt.cm.jet(np.log(cc)/np.log(np.nanmax(cc)))
cc[notcc,:]=img4[notcc,:]
cc[incc,3] = 1-img[incc]/(2*np.nanmax(img))
plt.imshow(cc)
#if ncc is not np.Inf:
# plt.imshow(cc,cmap=plt.cm.jet,clim=(1,ncc))
#else:
# plt.imshow(np.log(cc),cmap=plt.cm.jet)
#plt.imshow(img,alpha=alpha,cmap=plt.cm.gray)
def save_fa_overlay(faDir, ccDir, figDir, slist, orientationList):
brainFiles = [fn.split('_')[0] for fn in os.listdir(ccDir)]
f = plt.figure();
for bfn in brainFiles:
vcc = ConnectedComponent(fn=ccDir+bfn+'_concomp.npy')
fax = fa.FAXML(faDir+bfn+'_fa.xml')
fas = fa.FAData(faDir+bfn+'_fa.raw',fax.getShape())
cc3d = vcc.get_3d_cc(fax.getShape())
for view,s,xyz in zip(np.arange(len(slist)),slist,orientationList):
show_overlay(fas.data,cc3d,np.Inf,s,xyz,.5)
plt.savefig(figDir+bfn+'_ccfaOverlay_view'+repr(view)+'.pdf',)
plt.clf()
plt.close(f)
def save_overlay(faDir, mprDir, ccDir, figDir, slist, orientationList):
brainFiles = [fn.split('_')[0] for fn in os.listdir(ccDir)]
f = plt.figure(figsize=(14,9));
for bfn in brainFiles:
vcc = ConnectedComponent(fn=ccDir+bfn+'_concomp.npy')
fax = fa.FAXML(faDir+bfn+'_fa.xml')
fas = fa.FAData(faDir+bfn+'_fa.raw',fax.getShape())
mpx = mprage.MPRAGEXML(mprDir+'mprage_'+bfn+'_ss_crop.xml')
mpd = mprage.MPRAGEData(mprDir+'mprage_'+bfn+'_ss_crop.raw',mpx.getShape())
cc3d = vcc.get_3d_cc(fax.getShape())
for view,s,xyz in zip(np.arange(len(slist)),slist,orientationList):
plt.clf()
plt.subplot(221);
plt.title('FA Overlay')
show_overlay(fas.data,cc3d,np.Inf,s,xyz,.5)
plt.subplot(222);
plt.title('FA Original; '+bfn+', '+xyz+'-slice '+repr(s))
plt.imshow(get_slice(fas.data,s,xyz),cmap=plt.cm.gray)
plt.colorbar()
plt.subplot(223); plt.title('MPRAGE Overlay')
show_overlay(mpd.data,cc3d,np.Inf,s,xyz,.5)
plt.subplot(224);
plt.title('MPRAGE Original')
plt.imshow(get_slice(mpd.data,s,xyz),cmap=plt.cm.gray)
plt.colorbar()
#plt.tight_layout()
plt.savefig(figDir+bfn+'_ccfaOverlay_view'+repr(view)+'.pdf')
plt.close(f)
'''
def save_figures(coord, fn):
"""Saves 3 images which are 3d color representations of the coordinates in coord
Input
=====
coord -- an nx4 array of x,y,z coordinates and another scalar that gives color
fn -- save filename prefix"""
x,y,z,c = np.hsplit(coord,4)
f = mlab.figure()
mlab.points3d(x,y,z,c, mask_points=50, scale_mode='none',scale_factor=2.0)
mlab.view(0,180)
mlab.savefig(fn+'_view0,180.png',figure=f,magnification=4)
mlab.view(0,90)
mlab.savefig(fn+'_view0,90.png',figure=f,magnification=4)
mlab.view(90,90)
mlab.savefig(fn+'_view90,90.png',figure=f,magnification=4)
mlab.close(f)
'''
def get_3d_cc(vcc,shape):
"""Takes an array vcc and shape which is the shape of the new 3d image and 'colors' the image by connected component
For some reason this is 3 times as fast as the same thing in the ConnectedComponet class ?
Input
=====
vcc 1d array
shape 3tuple
Output
======
cc3d array of with shape=shape. colored so that ccz[x,y,z]=vcc[i] where x,y,z is the XYZ coordinates for Morton index i
"""
cc3d = np.NaN*np.zeros(shape)
allCoord = itt.product(*[xrange(sz) for sz in shape])
[cc3d.itemset((xyz), vcc[zindex.XYZMorton(xyz)])
for xyz in allCoord if not vcc[zindex.XYZMorton(xyz)]==0];
return cc3d
def main ():
parser = argparse.ArgumentParser(description='Draw the ROI map of a brain.')
parser.add_argument('roixmlfile', action="store")
parser.add_argument('roirawfile', action="store")
parser.add_argument('fibergraphfile', action="store")
parser.add_argument('ccfile', action="store")
result = parser.parse_args()
roix = roi.ROIXML(result.roixmlfile)
rois = roi.ROIData(result.roirawfile, roix.getShape())
fg = fibergraph.FiberGraph(roix.getShape(),rois,[])
fg.loadFromMatlab('fibergraph', result.fibergraphfile)
vcc = ConnectedComponent(G=fg.graph)
vcc.save(results.ccfile)
if __name__=='__main__':
# Added for -h flag # DM
parser = argparse.ArgumentParser(description="Largest connected component generator")
result = parser.parse_args()
graphDir = '/mnt/braingraph1data/projects/MRN/graphs/biggraphs/'
roiDir = '/mnt/braingraph1data/projects/will/mar12data/roi/'
ccDir = '/data/biggraphs/connectedcomp/'
figDir = '/home/dsussman/Dropbox/Figures/DTMRI/lccPics/'
cc_for_each_brain(graphDir, roiDir, ccDir, figDir)
| apache-2.0 |
LEX2016WoKaGru/pyClamster | examples/clustering/cloud_clustering.py | 1 | 3075 | # -*- coding: utf-8 -*-
"""
Created on 13.06.16
Created for pyclamster
@author: Tobias Sebastian Finn, tobias.sebastian.finn@studium.uni-hamburg.de
Copyright (C) {2016} {Tobias Sebastian Finn}
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# System modules
import pickle
import warnings
import glob
import os
import time
# External modules
import numpy as np
import scipy.misc
import scipy.ndimage
from sklearn.cluster import MiniBatchKMeans
from sklearn.preprocessing import StandardScaler
from skimage.feature import match_template
from skimage.segmentation import random_walker
from skimage.morphology import watershed
from skimage.feature import peak_local_max
from skimage import morphology
# Internal modules
from pyclamster import Image, Labels
from pyclamster.matching.cloud import Cloud, SpatialCloud
from pyclamster.clustering.preprocess import LCN, ZCA
from pyclamster.clustering.kmeans import KMeans
from pyclamster.functions import localBrightness, rbDetection
warnings.catch_warnings()
warnings.filterwarnings('ignore')
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
image_directory = os.path.join(BASE_DIR, "examples", "images", 'wettermast')
trained_models = os.path.join(BASE_DIR, "data")
good_angle = 45
center = int(1920/2)
good_angle_dpi = int(np.round(1920 / 180 * good_angle))
denoising_ratio = 10
all_images = glob.glob(os.path.join(image_directory, "Image_*.jpg"))
predictor = pickle.load(open(os.path.join(trained_models, "kmeans.pk"), "rb"))
for image_path in all_images:
image = Image(image_path)
image.cut([center - good_angle_dpi, center-good_angle_dpi, center+good_angle_dpi, center + good_angle_dpi]).save('test.jpg')
image.data = LCN(size=(50,50,3), scale=False).fit_transform(image.data)
image.data = image.data[center - good_angle_dpi:center + good_angle_dpi,
center - good_angle_dpi:center + good_angle_dpi]
raw_image = rbDetection(image.data)
w, h = original_shape = tuple(raw_image[:, :].shape)
raw_image = np.reshape(raw_image, (w * h, 1))
label = predictor.predict(raw_image)
label.reshape((960, 960), replace=True)
scipy.misc.imsave("cloud.png", label.labels)
masks = label.getMaskStore()
masks.denoise([1], 960)
cloud_labels, _ = masks.labelMask([1,])
scipy.misc.imsave("labels.png", cloud_labels.labels)
scipy.misc.imshow(cloud_labels.labels)
cloud_store = cloud_labels.getMaskStore()
| gpl-3.0 |
Jianlong-Peng/pytools | PMF/bin1/pls_train.py | 1 | 8247 | '''
#=============================================================================
# FileName: pls_train.py
# Desc:
# Author: jlpeng
# Email: jlpeng1201@gmail.com
# HomePage:
# Created: 2014-08-21 14:10:24
# LastChange: 2014-08-22 13:12:34
# History:
#=============================================================================
'''
import sys
import pickle
from getopt import getopt
import numpy as np
try:
from sklearn.cross_decomposition import PLSRegression
except ImportError:
from sklearn.pls import PLSRegression
from sklearn.base import clone
from sklearn.cross_validation import KFold
train_y_file = None
nijr_file = None
n_comp = 0
cv = 0
verbose = False
model_file = None
USE_KFOLD = False
def parse_arguments(argv):
options,args = getopt(argv[1:], 'n:', ['try=','nijr=','cv=','verbose'])
global train_y_file
global nijr_file
global n_comp
global cv
global verbose
global model_file
for opt,val in options:
if opt == '--try':
train_y_file = val
elif opt == '--nijr':
nijr_file = val
elif opt == '-n':
n_comp = int(val)
assert n_comp > 0
elif opt == '--cv':
cv = int(val)
assert cv > 1
elif opt == '--verbose':
verbose = True
else:
print >>sys.sderr, "Error: invalid option",opt
sys.exit(1)
if train_y_file is None:
print >>sys.stderr, "Error: '--try' is needed"
sys.exit(1)
if nijr_file is None:
print >>sys.stderr, "Error: '--nijr' should be given"
sys.exit(1)
if cv > 0:
if n_comp==0:
print >>sys.stderr, "Error: '-n' is needed when '--cv' is given"
sys.exit(1)
else:
assert len(args) == 1
model_file = args[0]
def exit_with_help(name):
print "\nUsage:"
print " %s [options] output.model"%name
print "\n[options]"
print " --try file: each line should be `name y-value`"
print " --nijr file: generated by get_nijr.py"
print " -n n_comp: specify the number of components to be used"
print " if not given, then it'll be optimized using cross-validation"
print " to get the number of components yielding smallest RMSE"
print " --cv n: <optional>"
print " do {n}-fold cross-validation"
print " in this case, '-n' must be given"
print " --verbose : if given, additional information will be displayed"
print " e.g. the predicting result of each sample will be displayed"
print " show the number of components being tested"
print " ..."
print ""
sys.exit(1)
def main(argv=sys.argv):
if len(argv) < 2:
exit_with_help(argv[0])
#parse options
parse_arguments(argv)
global train_y_file
global nijr_file
global n_comp
global cv
global verbose
global model_file
#read training set
train_list = []
train_ys = []
inf = open(train_y_file,'r')
for line in inf:
if line.startswith('#'):
continue
name,val = line.split()
train_list.append(name)
train_ys.append(float(val))
inf.close()
train_ys = np.asarray(train_ys)
if verbose:
print "train_ys.shape:",train_ys.shape
#read nijr
#each item `i,j,n` ==> Xs[][j*len(DC_TYPES)+i] = n
inf = open(nijr_file,'r')
line = inf.readline()
num_DC_TYPES = int(line.split(',')[0].split('=')[-1])
num_bins = int(line.split('=')[-1])
train_Xs = np.zeros((len(train_list), num_DC_TYPES*num_bins))
count = 0
for line in inf:
if line.startswith('#'):
continue
line = line.split()
if line[0] in train_list:
k = train_list.index(line[0])
for item in line[1:]:
i,j,n = map(int, item.split(','))
index = j*num_DC_TYPES+i
train_Xs[k, index] = n
count += 1
else:
print line[0],"not found in",train_y_file
sys.exit(1)
inf.close()
if count != len(train_list):
print >>sys.stderr, "Error: number of samples(%d) in %s not equal to those(%d) in %s"%(count, nijr_file, len(train_list), train_y_file)
sys.exit(1)
if verbose:
print "train_Xs.shape:",train_Xs.shape
#do cross-validation only
if cv > 0:
print "do %d-fold cross-validation"%cv
actualY,predY = doCV(train_Xs, train_ys, n_comp, cv)
mae = calcMAE(actualY,predY)
rmse = calcRMSE(actualY,predY)
r = calcR(actualY, predY)
print "\n statistics of %d-fold CV"%cv
print " MAE=%g, RMSE=%g, r=%g\n"%(mae, rmse, r)
return
#train PLS model
if n_comp > 0:
if n_comp > train_Xs.shape[1]:
print "Error: the number of components specified(%d) is larger than the number of predictors(%d)"%(n_comp, train_Xs.shape[1])
sys.exit(1)
print "\nPLS will be trained using %d components"%n_comp
pls1 = PLSRegression(n_components=n_comp)
else:
n_components,best_val = search_n_components(train_Xs, train_ys, verbose)
print "\nPLS will be trained using %d components, with RMSE=%g of 5-fold CV"%(n_components, best_val)
pls1 = PLSRegression(n_components=n_components)
pls1.fit(train_Xs,train_ys)
print "\nto apply the PLS model to training set"
predict_and_display(pls1, train_Xs, train_ys, train_list, verbose)
#save model
outf = open(model_file,'w')
pickle.dump(pls1, outf)
outf.close()
calcMAE = lambda actualY, predictY: np.mean(np.abs(actualY-predictY))
calcRMSE = lambda actualY, predictY: np.sqrt(np.mean(np.power(actualY-predictY,2)))
calcR = lambda actualY, predictY: np.corrcoef(actualY, predictY)[0][1]
def predict_and_display(pls1, Xs, ys, names, verbose):
predY = pls1.predict(Xs)
predY = np.ndarray.flatten(predY)
rmse = calcRMSE(ys, predY)
mae = calcMAE(ys, predY)
r = calcR(ys, predY)
if verbose:
print "name actualY predictY"
for i in xrange(len(names)):
print names[i],ys[i],predY[i]
print ""
print " RMSE=%g, MAE=%g, r=%g\n"%(rmse, mae, r)
def search_n_components(Xs, Ys, verbose=False):
best_val = 1e38
best_n = 0
if verbose:
print "\nsearch for the best number of components"
for i in xrange(1, Xs.shape[1]+1):
if verbose:
print "> to use %d components"%i,
actualY,predY = doCV(Xs, Ys, i, 5)
val = calcRMSE(actualY, predY)
r = calcR(actualY, predY)
if verbose:
print " => result of 5-fold CV: RMSE=%g, r=%g"%(val,r)
if val < best_val:
best_val = val
best_n = i
if verbose:
print "best_n_components=%d, best_val=%g"%(best_n, best_val)
return best_n,best_val
def doCV(Xs, Ys, n_components, nfold):
global USE_KFOLD
if USE_KFOLD:
print "using KFold"
kf = KFold(Xs.shape[0], n_folds=nfold, shuffle=True)
else:
kf = MyFold(Xs.shape[0], nfold)
actualY = []
predictY = []
for train,test in kf:
X_train,X_test = Xs[train],Xs[test]
y_train,y_test = Ys[train],Ys[test]
pls1 = PLSRegression(n_components=n_components)
pls1.fit(X_train,y_train)
y_test_pred = pls1.predict(X_test)
actualY.extend(list(y_test))
predictY.extend(list(np.ndarray.flatten(y_test_pred)))
return np.asarray(actualY), np.asarray(predictY)
class MyFold:
def __init__(self, length, nfolds):
self.length = length
self.nfolds = nfolds
self.i = 0
def __iter__(self):
return self
def next(self):
self.i += 1
if self.i > self.nfolds:
raise StopIteration
tr_idx = []
te_idx = []
for i in xrange(self.length):
if i%(self.nfolds) == self.i-1:
te_idx.append(i)
else:
tr_idx.append(i)
return (tr_idx,te_idx)
if __name__ == "__main__":
main()
| gpl-2.0 |
mikkkee/Bubble | bubble.py | 1 | 44991 | from __future__ import print_function
from itertools import islice, product
import logging
import MDAnalysis as md
import math
import random
import numpy as np
import pandas as pd
import plotly
import plotly.graph_objs as go
import subprocess
import scipy
import scipy.stats
import string
import time
import settings
class Atom(object):
def __init__(self, identifier, **kwargs):
self.id = identifier
self.type = kwargs.get('type', None)
self.element = kwargs.get('element', None)
self.xyz = kwargs.get('xyz', None)
self.stress = kwargs.get('stress', None)
self.normal = kwargs.get('normal', False)
self.distance = None
self.sin_theta = None
self.cos_theta = None
self.sin_phi = None
self.cos_phi = None
self.spherical_stress = None
self.voro_volume = 0
def calc_spherical_stress(self):
"""
Calculate spherical stress tensor from cartesian one
ref: http://www.brown.edu/Departments/Engineering/Courses/En221/Notes/Polar_Coords/Polar_Coords.htm
"""
xx, yy, zz, xy, xz, yz = self.stress
cart = np.array( [ [xx, xy, xz], [xy, yy, yz], [xz, yz, zz] ] )
# 1 for theta, the angle between xyz and z axis, 2 for phi,
# angle between x axis and the projection on xy-plane
sin1 = self.sin_theta
cos1 = self.cos_theta
sin2 = self.sin_phi
cos2 = self.cos_phi
conv = np.array( [ [sin1*cos2, cos1*cos2, -sin2],
[sin1*sin2, cos1*sin2, -cos2],
[cos1, -sin1, 0], ] )
sphe = np.dot( conv, cart.dot( np.transpose(conv) ) )
# Of format [ [rr, rTheta, rPhi], [rTheta, thetaTheta, thetaPhi], [rPhi, thetaPhi, phiPhi] ]
self.spherical_stress = sphe
class Box(object):
PI = 3.1415926
def __init__(self, timestep=0, radius=None, use_atomic_volume=True, average_on_atom=False, **kwargs):
# Current timestep.
self.timestep = timestep
# Maximum bubble radius in box.
self.radius = radius
self.count = 0
# XYZ boundaries.
self.bx = kwargs.get('bx', None)
self.by = kwargs.get('by', None)
self.bz = kwargs.get('bz', None)
# Bubble center coordinates.
self._center = kwargs.get('center', None)
# All atoms.
self.atoms = []
# Container of atoms for each element.
self._elements = {}
# Container of shell stress for each element.
self._shell_stress = {}
self._shell_stress_r = {}
self._shell_stress_theta = {}
self._shell_stress_phi = { }
# Container of shell atom count for each element.
self.nbins = None
self._shell_atoms = {}
self._shell_atom_objs = []
self._shell_volumes = {}
# Indicator of stats status.
self._stats_finished = False
self._measured = False
# Dump atom coordinates to calculate voro tessellation volume
self.voro_file_name = 'atom_coors'
self.use_atomic_volume = use_atomic_volume
self.average_on_atom = average_on_atom
@property
def measured(self):
"""Returns true if all atoms have a distance (to bubble center)."""
if all([x.distance for x in self.atoms]):
self._measured = True
else:
self._measured = False
return self._measured
@property
def center(self):
return self._center
@center.setter
def center(self, coor):
self._center = coor
self._measured = False
self._stats_finished = False
def combine_water_atoms(self):
"""
Combine H and O together into a new particle
stress = S_h + S_o
coor = center of mass
The sequency of H and O atoms are O H H
"""
self._old_atoms = self.atoms
self.atoms = []
self._old_atoms.sort( key=lambda x: x.id )
water = []
for atom in self._old_atoms:
if atom.element not in ['H', 'O']:
self.atoms.append( atom )
else:
water.append(atom)
if len( water ) == 3:
# need to combine the 3 atoms into 1 now
assert [ _ele.element for _ele in water ] == ['O', 'H', 'H']
new_stress = [a+b+c for a, b, c in zip(water[0].stress, water[1].stress, water[2].stress)]
new_volume = sum( _ele.voro_volume for _ele in water )
masses = [ 16 if _ele.element == 'O' else 1 for _ele in water ]
xs = [ _ele.xyz[0] for _ele in water]
ys = [ _ele.xyz[ 1 ] for _ele in water ]
zs = [ _ele.xyz[ 2 ] for _ele in water ]
cx = sum( m*x for m,x in zip(masses, xs) ) / sum(masses)
cy = sum( m * y for m, y in zip( masses, ys ) ) / sum( masses )
cz = sum( m * z for m, z in zip( masses, zs ) ) / sum( masses )
new_xyz = (cx, cy, cz)
new_id = water[0].id
normal = water[0].normal
self.atoms.append( Atom(new_id, type=3, element='H', xyz=new_xyz, stress=new_stress, normal=normal) )
water = []
def dump_atoms_for_voro( self, length=None ):
'''
Dump atom coordinates so we can calculate Voronoi tessellation using Voro++
from http://math.lbl.gov/voro++/
The input file format for voro++ is
<atom id> <x> <y> <z>
and output file format is
<atom id> <x> <y> <z> <tessellation volume>
'''
logging.info( 'Dump atom coordinates to {}'.format( self.voro_file_name ) )
fmt = '{} {} {} {}\n'
if length:
xmin, xmax = self.center[0] - length, self.center[0] + length
ymin, ymax = self.center[1] - length, self.center[1] + length
zmin, zmax = self.center[2] - length, self.center[2] + length
with open( self.voro_file_name, 'w' ) as output:
for atom in self.atoms:
x, y, z = atom.xyz
if length:
if xmin <= x <= xmax and ymin<= y <= ymax and zmin <= z <= zmax:
output.write( fmt.format( atom.id, x, y, z ) )
else:
output.write( fmt.format( atom.id, x, y, z ) )
def voro_cmd( self, gnuplot=False, length=None ):
'''
CMD to run voro++ in bash
gnuplot=True will also export gnu plot file. Be careful when system is large as
this file will be extremely large
default to use -o to preserve the atom order. This has small memory and performance
impact as the documentation says.
'''
# when have length -o will not work
cmd = 'voro++' if length else 'voro++ -o'
fmt = cmd + ' {opts} {{xmin}} {{xmax}} {{ymin}} {{ymax}} {{zmin}} {{zmax}} {{infile}}'
opts = '-g' if gnuplot else ''
fmt = fmt.format( opts=opts )
if length:
xmin, xmax = self.center[0] - length, self.center[0] + length
ymin, ymax = self.center[1] - length, self.center[1] + length
zmin, zmax = self.center[2] - length, self.center[2] + length
else:
xmin, xmax = self.bx
ymin, ymax = self.by
zmin, zmax = self.bz
return fmt.format( xmin=xmin, xmax=xmax,
ymin=ymin, ymax=ymax,
zmin=zmin, zmax=zmax,
infile=self.voro_file_name)
def run_voro_cmd( self, gnuplot=False, length=None ):
logging.info( 'Calculating voro volumes for atoms' )
cmd = self.voro_cmd( gnuplot=gnuplot, length=length )
logging.info( "Running: {}".format( cmd ))
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = sp.communicate()
if err:
raise Exception(err)
logging.info( "Finished: {}".format( cmd ) )
def read_voro_volumes( self ):
voro_out = self.voro_file_name + '.vol'
logging.info( 'Reading voro volumes from {}'.format( voro_out ) )
with open( voro_out, 'r' ) as volumes:
idx = 0
for line in volumes:
atom_id, x, y, z, vol = [ float(ele) for ele in line.split() ]
atom_id = int( atom_id )
atom = self.atoms[ idx ]
try:
assert( atom.id == atom_id )
except Exception as e:
print( atom.id, atom_id )
raise e
atom.voro_volume = vol
idx += 1
def calc_voro_volumes( self, gnuplot=False, length=None ):
''' Calculate voro tessellation volume using voro '''
self.dump_atoms_for_voro( length=length )
self.run_voro_cmd( gnuplot=gnuplot, length=length )
if not length:
self.read_voro_volumes()
def adjust_water_vol(self, ratio=(0.5, 0.25)):
""" Adjust volume of H and O in water. For pure water system only """
satoms = sorted( self.atoms, key= lambda x: x.id)
assert( len( satoms ) % 3 == 0 )
assert( ratio[0] + 2 * ratio[1] == 1.0)
for idx in xrange( len(satoms) / 3):
o = satoms[ idx * 3 ]
h1 = satoms[ idx * 3 + 1 ]
h2 = satoms[ idx * 3 + 2 ]
vsum = sum( ele.voro_volume for ele in [o, h1, h2])
vo = ratio[0] * vsum
vh = ratio[1] * vsum
o.adj_vol = vo
h1.adj_vol = vh
h2.adj_vol = vh
def set_boundary(self, bx, by, bz):
"""Set bx by bz together."""
self.bx = bx
self.by = by
self.bz = bz
def add_atom(self, atom):
self.atoms.append(atom)
self.count += 1
# Need to run stats after new atom added.
self._stats_finished = False
if atom.element in self._elements:
self._elements[atom.element].append(atom)
else:
self._elements[atom.element] = [atom]
def measure(self):
"""Measure distance to bubble center for each atom."""
for atom in self.atoms:
coor = np.array(atom.xyz)
atom.distance = np.linalg.norm(coor - self.center)
if atom.normal:
# Calculate sin cos for theta and phi
dx = coor[0] - self.center[0]
dy = coor[1] - self.center[1]
dz = coor[2] - self.center[2]
xy_square = math.sqrt(dx*dx + dy*dy)
atom.sin_theta = xy_square / atom.distance
atom.cos_theta = dz / atom.distance
atom.sin_phi = dy / xy_square
atom.cos_phi = dx / xy_square
self.calc_voro_volumes()
def stats(self, dr, normal):
"""
System stats.
Generate data for atom stats and stress stats for each element.
self._shell_atoms = {}
self._shell_stress = {}
"""
if not self.measured:
raise AtomUnmeasuredError("Some atoms are unmeasuerd")
self.nbins = int(math.ceil(self.radius / float(dr)))
self._shell_atom_objs = [ { } for x in range( self.nbins ) ]
for ele, atoms in self._elements.iteritems():
# Do stats for each element.
for atom in atoms:
if atom.distance < self.radius:
shell_idx = int( atom.distance / dr )
self._shell_atom_objs[ shell_idx ].setdefault(ele, []).append( atom )
if normal:
atom.calc_spherical_stress()
self._stats_finished = True
def atom_stats(self, element, dr):
"""Atom ratio stats inside bubble."""
if not self._stats_finished:
self.stats(dr)
nbins = len(self._shell_atoms[element])
bubble_atoms = {}
# Init bubble atoms by copying shell atoms
for ele, count in self._shell_atoms.iteritems():
bubble_atoms[ele] = [x for x in count]
for i in range(1, nbins):
bubble_atoms[ele][i] += bubble_atoms[ele][i - 1]
bubble_atoms[ele] = np.array(bubble_atoms[ele])
return bubble_atoms[element] / sum(bubble_atoms.values())
def pressure_stats(self, elements, dr):
"""Average pressure stats inside bubble for species in elements."""
if not self._stats_finished:
self.stats(dr)
nbins = len(self._shell_stress[elements[0]])
# Calculate stress for all element in elements as whole.
# Convert numpy.Array to mutable list.
stress_in = [x for x in sum([self._shell_stress[ele] for ele in elements])]
stress_out = [x for x in stress_in]
for i in range(1, nbins):
# Cumulative stress.
stress_in[i] += stress_in[i-1]
stress_out[nbins - 1 - i] += stress_out[nbins - i]
for i in range(1, nbins):
# Stress -> pressure.
stress_in[i] = 0 - stress_in[i] / self.vol_sphere((i+1)*dr) / 3.0
stress_out[nbins-1-i] = 0 - stress_out[nbins-1-i] / (self.vol_sphere(self.radius) - self.vol_sphere((nbins-i-1)*dr)) / 3
# Head and tail.
stress_in[0] = 0 - stress_in[0] / self.vol_sphere(dr) / 3
stress_out[nbins - 1] = 0 - stress_out[nbins - 1] / (self.vol_sphere(self.radius) - self.vol_sphere((nbins - 1)*dr)) / 3
return {'in': stress_in, 'out': stress_out}
def shell_pressure_stats(self, elements, dr, normal=False):
"""Average pressure of elements inside shell."""
self.stats(dr, normal=normal)
print( "NNNNNumber of bins: {}".format(self.nbins) )
if not normal:
# atom.stress has 3 elements, xx yy zz components
if self.use_atomic_volume:
if self.average_on_atom:
# atomic volume is used, pressure is calculated for each atom and then averaged together
stress = []
for idx, shell_atoms in enumerate(self._shell_atom_objs):
pressure_raw = {}
for element, atoms in shell_atoms.iteritems():
if element in elements:
# P = -(S_xx + S_yy + S_zz)/3/V
pressure_raw[element] = [ - sum(atom.stress)/atom.voro_volume/3.0 for atom in atoms ]
# Average pressure = sum(Pressure)/n_atoms
n_atoms = sum( len(_ele) for _ele in pressure_raw.values() )
if n_atoms != 0:
pressure_ave = sum( sum(_ele) for _ele in pressure_raw.values() ) / n_atoms
else:
pressure_ave = 0
stress.append(pressure_ave)
return stress
else:
# pressure is calculated as sum(atom stress in a shell) / sum(atom volume in a shell)
stress = []
for idx, shell_atoms in enumerate( self._shell_atom_objs ):
stress_all = 0
volume_all = 0
for element, atoms in shell_atoms.iteritems():
if element in elements:
stress_all += sum( sum(atom.stress[:3]) for atom in atoms )
volume_all += sum( atom.voro_volume for atom in atoms )
if volume_all != 0:
pressure_ave = - stress_all / 3.0 / volume_all
else:
pressure_ave = 0
stress.append( pressure_ave )
return stress
else:
# use shell volume
stress = [ ]
for idx, shell_atoms in enumerate( self._shell_atom_objs ):
r_min, r_max = idx * dr, (idx + 1)*dr
stress_all = 0
volume_all = self.vol_sphere(r_max) - self.vol_sphere(r_min)
for element, atoms in shell_atoms.iteritems():
if element in elements:
stress_all += sum( sum( atom.stress[:3] ) for atom in atoms )
pressure_ave = - stress_all / 3.0 / volume_all
stress.append( pressure_ave )
return stress
else:
# normal pressure, atom.spherical_stress has 6 items: xx, yy, zz, xy, xz, yz.
stress_r = []
stress_theta = []
stress_phi = []
if self.use_atomic_volume:
if self.average_on_atom:
# Pressure is calculate as average of pressure on each atom
for idx, shell_atoms in enumerate( self._shell_atom_objs ):
pressure_r_raw = {}
pressure_theta_raw = {}
pressure_phi_raw = {}
for element, atoms in shell_atoms.iteritems():
if element in elements:
pressure_r_raw[element] = [ - atom.spherical_stress[0][0] / atom.voro_volume for atom in atoms ]
pressure_theta_raw[element] = [ - atom.spherical_stress[1][1] / atom.voro_volume for atom in atoms ]
pressure_phi_raw[element] = [ - atom.spherical_stress[2][2] / atom.voro_volume for atom in atoms ]
n_atoms = sum( len( _ele ) for _ele in pressure_r_raw.values() )
if n_atoms != 0:
pressure_r_ave = sum( sum(_ele) for _ele in pressure_r_raw.values() ) / n_atoms
pressure_theta_ave = sum( sum(_ele) for _ele in pressure_theta_raw.values() ) / n_atoms
pressure_phi_ave = sum( sum(_ele) for _ele in pressure_phi_raw.values() ) / n_atoms
else:
pressure_r_ave = pressure_theta_ave = pressure_phi_ave = 0
stress_r.append( pressure_r_ave )
stress_theta.append( pressure_theta_ave )
stress_phi.append( pressure_phi_ave )
return { 'r': stress_r, 'theta': stress_theta, 'phi': stress_phi, }
else:
# Pressure is calculated as sum(stress)/sum(atomic_volume)
for idx, shell_atoms in enumerate( self._shell_atom_objs ):
stress_r_all = 0
stress_theta_all = 0
stress_phi_all = 0
volume_all = 0
for element, atoms in shell_atoms.iteritems():
if element in elements:
stress_r_all += sum( atom.spherical_stress[0][0] for atom in atoms )
stress_theta_all += sum( atom.spherical_stress[1][1] for atom in atoms )
stress_phi_all += sum( atom.spherical_stress[2][2] for atom in atoms )
volume_all += sum( atom.voro_volume for atom in atoms )
if volume_all != 0:
pressure_r_ave = - stress_r_all / volume_all
pressure_theta_ave = - stress_theta_all / volume_all
pressure_phi_ave = - stress_phi_all / volume_all
else:
pressure_r_ave = pressure_theta_ave = pressure_phi_ave = 0
stress_r.append( pressure_r_ave )
stress_theta.append( pressure_theta_ave )
stress_phi.append( pressure_phi_ave )
return { 'r': stress_r, 'theta': stress_theta, 'phi': stress_phi, }
else:
# Use shell volume
for idx, shell_atoms in enumerate( self._shell_atom_objs ):
r_min, r_max = idx * dr, (idx+1) * dr
stress_r_all = 0
stress_theta_all = 0
stress_phi_all = 0
volume_all = self.vol_sphere(r_max) - self.vol_sphere(r_min)
for element, atoms in shell_atoms.iteritems():
if element in elements:
stress_r_all += sum( atom.spherical_stress[ 0 ][ 0 ] for atom in atoms )
stress_theta_all += sum( atom.spherical_stress[ 1 ][ 1 ] for atom in atoms )
stress_phi_all += sum( atom.spherical_stress[ 2 ][ 2 ] for atom in atoms )
pressure_r_ave = - stress_r_all / volume_all
pressure_theta_ave = - stress_theta_all / volume_all
pressure_phi_ave = - stress_phi_all / volume_all
stress_r.append( pressure_r_ave )
stress_theta.append( pressure_theta_ave )
stress_phi.append( pressure_phi_ave )
return { 'r': stress_r, 'theta': stress_theta, 'phi': stress_phi, }
def pressure_between(self, rlow, rhigh):
"""Return the average pressure and number of atoms between rlow
and rhigh."""
stress = 0
count = 0
for atom in self.atoms:
if atom.distance > rlow and atom.distance <= rhigh:
count += 1
stress += sum(atom.stress)
volume = self.vol_sphere(rhigh) - self.vol_sphere(rlow)
return stress / volume / 3, count
def shell_density(self, elements, mole, dr):
"""Shell density for species inside elements.
mole unit - g/cm^3
dr unit - angstrom
"""
# Usually density_dr is different from stats_dr.
self.stats(dr)
# Avogadro constant. Modified by coefficient used to
# convert angstrom^3 to cm^3.
NA = 6.022 / 10
nbins = len(self._shell_atoms[elements[0]])
# Calculate atom count for all species in elements as whole.
# Convert numpy.Array to mutable list.
count = [x for x in sum([self._shell_atoms[ele] for ele in elements])]
# Calculate density.
for i in range(nbins):
r_low = i * dr
r_high = r_low + dr
# Volume unit is Angstrom^3.
volume = self.vol_sphere(r_high) - self.vol_sphere(r_low)
count[i] = count[i] / NA / volume
return count
def bubble_density(self, elements, mole, dr):
pass
def xyz_density(self, elements, mole, dx):
"""Density distribution along x, y, and z inside box."""
# Avogadro constant. Modified by coefficient used to
# convert angstrom^3 to cm^3.
NA = 6.022 / 10
nx = int(math.ceil((self.bx[1] - self.bx[0]) / dx))
ny = int(math.ceil((self.by[1] - self.by[0]) / dx))
nz = int(math.ceil((self.bz[1] - self.bz[0]) / dx))
dist = {}
dist['x'] = [0 for x in range(nx)]
dist['y'] = [0 for y in range(ny)]
dist['z'] = [0 for z in range(nz)]
for ele in elements:
# Count atoms.
for atom in self._elements[ele]:
dist['x'][int(atom.xyz[0] / dx)] += 1
dist['y'][int(atom.xyz[1] / dx)] += 1
dist['z'][int(atom.xyz[2] / dx)] += 1
volx = (self.by[1] - self.by[0]) * (self.bz[1] - self.bz[0]) * dx
voly = (self.bx[1] - self.bx[0]) * (self.bz[1] - self.bz[0]) * dx
volz = (self.by[1] - self.by[0]) * (self.bx[1] - self.bx[0]) * dx
for i in range(nx):
# Calculate density.
dist['x'][i] = dist['x'][i] / NA / volx
dist['y'][i] = dist['y'][i] / NA / voly
dist['z'][i] = dist['z'][i] / NA / volz
return dist
def vol_sphere(self, r):
"""Volume of sphere with radius r."""
return 4.0/3 * Box.PI * (r ** 3)
def volume(self):
""" Box volume """
return (self.bx[1] - self.bx[0]) * (self.by[1] - self.by[0]) * (self.bz[1] - self.bz[0])
class Trajectory( object ):
'''Gas molecule trajectory class'''
def __init__( self, pdbPath, xtcPath ):
self.universe = md.Universe( pdbPath, xtcPath )
self.set_density_params()
@property
def n_frames( self ):
return self.universe.trajectory.n_frames
@property
def frame( self ):
return self.universe.trajectory.frame
def set_density_params(self, low=0.4, high=0.5, length=60 ):
'''
Generate grid with length of dnesity_grid_length at x,y,z directions.
Grids whose density are between low * max_density and high * max_density
will be used for radius calculation. d
'''
self.density_low = low
self.density_high = high
self.density_grid_length = length
def set_frame( self, frame ):
self.universe.trajectory[ frame ]
def radius( self, frame ):
'''
Bubble radius at one frame.
Method:
1. Load the snapshot at frame
2. Load x, y, z coordinates
3. Calculate density grid mesh at grid points
4. Filter the shell grids with density between low * max density and high * max density
5. Calculate the average radius
'''
start = time.clock()
self.set_frame( frame )
# Load x, y, z coordinates
data = pd.DataFrame( list(self.universe.coord), columns=['x','y','z'])
x = data[ 'x' ].values
y = data[ 'y' ].values
z = data[ 'z' ].values
# Density grid
xyz = scipy.vstack( [ x, y, z ] )
kde = scipy.stats.gaussian_kde( xyz )
xmin, ymin, zmin = x.min(), y.min(), z.min()
xmax, ymax, zmax = x.max(), y.max(), z.max()
NI = complex( imag=self.density_grid_length)
xi, yi, zi = scipy.mgrid[ xmin:xmax:NI, ymin:ymax:NI, zmin:zmax:NI ]
coords = scipy.vstack([item.ravel() for item in [xi, yi, zi]])
density = kde(coords).reshape(xi.shape)
# Filter density grid
density_max = density.max()
density_low = self.density_low * density_max
density_high = self.density_high * density_max
xyzs = []
N = self.density_grid_length
for idx, idy, idz in product( xrange(N), xrange(N), xrange(N) ):
if density_low < density[ idx, idy, idz ] <= density_high:
xyzs.append( [ xi[ idx, idy, idz ], yi[ idx, idy, idz ], zi[ idx, idy, idz ] ] )
xyzs = np.array( xyzs )
# Average radius
center = xyzs.mean( axis=0 )
rs = []
for xyz_ele in xyzs:
rs.append( np.linalg.norm( center - xyz_ele ) )
duration = time.clock() - start
print( "Radius for frame {} calculated in {:.2f} seconds".format( frame, duration ) )
return center, scipy.mean( rs )
def radius_for_frames( self, start, end, step=1 ):
ret = []
for frame in xrange( start, end, step ):
center, radius = self.radius( frame )
ret.append( [ frame, radius ] )
return ret
def all_radius( self ):
return self.radius_for_frames( 0, self.n_frames, 1 )
def regression( self, radiusList ):
''' Input (frame, radius) lists and do linear regression on the data '''
ts = [ ele[0] for ele in radiusList ]
rs = [ ele[1] for ele in radiusList ]
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress( ts, rs )
return slope, intercept, r_value, p_value, std_err
def plot_radius( self, rs, notebook=False ):
''' plot dots and linear regression results '''
xs = [ ele[0] for ele in rs ]
ys = [ ele[1] for ele in rs ]
x_min = min( xs )
x_max = max( xs )
x_min = x_min - ( x_max - x_min ) * 0.05
x_max = x_max + ( x_max - x_min ) * 0.05
slope, intercept, r_value, p_value, std_err = self.regression( rs )
xs_line = [ x_min ] + xs + [ x_max ]
ys_line = [ ele * slope + intercept for ele in xs_line ]
# Scatter plot
scatter = go.Scatter(
x = [ele[0] for ele in rs],
y = [ele[1] for ele in rs],
mode = 'markers',
name = 'Radius'
)
reg_line = go.Scatter(
x = xs_line, y = ys_line,
mode='lines', name='y={:.4f}x+{:.4f}, p-value={:.2f}, StdErr={:.3f}'.format(slope, intercept, p_value, std_err)
)
data = go.Data([scatter, reg_line])
plot = plotly.offline.iplot if notebook else plotly.offline.plot
plot( {
'data': data,
'layout': go.Layout( title='Radius vs Frame', xaxis={'title':'Frame'}, yaxis={'title':'Radius'} )
} )
def flux_info( self, start, end, step=1 ):
'''
Flux info for frames [start:end:step]. Info are, for each step,
nframe, center, radius, n atoms inside sphere
'''
info = []
for nframe in xrange( start, end, step ):
center, radius = self.radius( nframe )
# Selector for AtomGroup in MDAnalysis
selector = 'point ' + ' '.join( str( ele ) for ele in list( center ) + [ radius ] )
# Explicitly set frame here
self.set_frame( nframe )
atoms = self.universe.select_atoms( selector )
natoms = atoms.n_atoms
info.append( (nframe, center, radius, natoms) )
return info
#################################################
################# Exceptions ####################
#################################################
class AtomUnmeasuredError(Exception):
pass
################################################
################## Functions ###################
################################################
def next_n_lines(file_opened, N, strip='right'):
strip_dic = {
'right': string.rstrip,
'left': string.lstrip,
'both': string.strip
}
if strip:
return [strip_dic[strip](x) for x in islice(file_opened, N)]
else:
return list(islice(file_opened, N))
def read_stress(stress_file, N=settings.NLINES, normalPressure=False):
"""
Read dump file into a list of atoms, which have type / coordinates /
stresses info stored as Atom properties.
Dump file data format:
atom_id atom_type x y z stress_x stress_y stress_z
"""
atoms = {}
count = 0
data = next_n_lines(stress_file, N)[9:]
while data:
atoms[count] = []
for line in data:
line = line.strip().split()
identifier = int(line[0])
atom_type = int(line[1])
element = settings.ELEMENTS[atom_type]
xyz = tuple([float(x) for x in line[2:5]])
if normalPressure:
# To calculate normal pressure, we need xx, yy, zz, xy, xz, yz
stress = tuple([float(x) for x in line[5:11]])
else:
# To calculate pressure, we need xx, yy, zz
stress = tuple([float(x) for x in line[5:8]])
atom = Atom(identifier, type=atom_type, element=element, xyz=xyz, stress=stress, normal=normalPressure)
atoms[count].append(atom)
# Process next N lines.
data = next_n_lines(stress_file, N)[9:]
count += 1
return atoms
def read_pdb(filename):
"""
Read pdb file as a list of atoms
"""
logging.info( "Reading {}".format(filename) )
atoms_lines = []
with open(filename, 'r') as pdbfile:
for line in pdbfile:
if line.startswith('CRYST'):
cryst_line = line
elif line.startswith('ATOM'):
atoms_lines.append( line )
x, y, z = [float(ele) for ele in cryst_line.strip().split()[1:4] ]
atoms = []
for line in atoms_lines:
data = line.strip().split()
idx = int(data[1])
element = data[2][:2]
coor = [ float(ele) for ele in data[5:8] ]
atoms.append( Atom(identifier=idx, element=element, xyz=coor) )
return atoms, (x,y,z)
def combine_water(atoms, remove=True):
"""
Combine water atoms
"""
combined = []
ne = [ ele for ele in atoms if ele.element == 'Ne' ]
wat = [ele for ele in atoms if ele.element != 'Ne' ]
logging.info("Before:: {} Ne, {} Water atoms".format(len(ne), len(wat)))
idx_wat = len(ne) + 1
comb_wat = []
for idx in range( len( wat ) / 3 ):
coor1 = np.array( wat[ idx * 3 ].xyz )
coor2 = np.array( wat[ idx * 3 + 1 ].xyz )
coor3 = np.array( wat[ idx * 3 + 2 ].xyz )
coor = (coor1 + coor2 + coor3) / 3.
comb_wat.append(Atom(identifier=idx_wat, element='W', xyz=coor))
idx_wat += 1
if remove:
selected = random.sample(comb_wat, len(comb_wat)/4)
else:
selected = comb_wat
n_ne = len(ne)
for idx in xrange(len(selected)):
selected[idx].id = idx + 1 + n_ne
logging.info("After:: {} Ne, {} Water atoms".format(len(ne), len(selected)))
return ne + selected
def write_lammps_data(atoms, xyz, filename):
"""
LAMMPS data
format: atom idx, molecule idx, atom type, x, y, z,
"""
atom_types = {'Ne':1, 'W':2}
x, y, z = xyz
header = "LAMMPS bubble\n\n" \
"{n_atoms} atoms\n\n" \
"{n_types} atom types\n" \
"0 bond types\n" \
"0 angle types\n\n" \
"0 {x} xlo xhi\n0 {y} ylo yhi\n0 {z} zlo zhi\n\n"\
"Atoms\n\n".format(n_atoms=len(atoms), n_types=2,x=x,y=y,z=z)
print(header)
fmt = "{idx} {mol} {atype} {charge} {x} {y} {z}\n"
for idx, atom in enumerate(atoms):
header += fmt.format(idx=atom.id, mol=atom.id, atype=atom_types[atom.element], charge=0, x=atom.xyz[0], y=atom.xyz[1], z=atom.xyz[2])
with open(filename, 'w') as output:
output.write(header)
def average_atom_stress(write=True, step=0, *args):
"""Calculates averaged stress from multiple stress files.
write determines whether to write output or not.
step determines which timestep to average."""
n_files = float(len(args))
stress_list = []
for ele in args:
stress_list.append(read_stress(ele)[step])
# Sort atoms by id.
stress_list[-1].sort(key=lambda x: x.id)
n_atoms = len(stress_list[0])
atoms = []
# Average stress for each atom id.
for i in range(n_atoms):
sx = sum([x[i].stress[0] for x in stress_list]) / n_files
sy = sum([x[i].stress[1] for x in stress_list]) / n_files
sz = sum([x[i].stress[2] for x in stress_list]) / n_files
atom = stress_list[0][i]
atoms.append(
Atom(atom.id, type=atom.type, element=atom.element, xyz=atom.xyz, stress=(sx, sy, sz))
)
# Write averaged stress to file.
if write:
out_name = '.'.join(args[0].name.split('.')[:-1]) + '_averaged.dat'
with open(out_name, 'w') as output:
# Write header lines to be compatitable with LAMMPS dump files.
output.write('Header line\n' * 9)
for atom in atoms:
# Do not write element here to be compatitable with
# LAMMPS dump files.
output.write("{} {} {} {} {} {} {} {}\n".format(
atom.id, atom.type,
atom.xyz[0], atom.xyz[1], atom.xyz[2],
atom.stress[0], atom.stress[1], atom.stress[2]))
print("Average Stress saved to {}.".format(out_name))
return atoms
def build_box(atoms, timestep, radius, center, use_atomic_volume, average_on_atom, bx, by, bz):
"""Build a box from a list of atoms."""
box = Box(timestep, radius=radius, center=center, use_atomic_volume=use_atomic_volume, average_on_atom=average_on_atom)
for atom in atoms:
box.add_atom(atom)
box.set_boundary(bx=bx, by=by, bz=bz)
box.measure()
return box
def write_density(density, dr, outname, header):
"""Write density (both shell and xyz density) stats to output file.
One density list at a time.
"""
with open(outname, 'w') as output:
output.write(header)
for i, item in enumerate(density):
low = i * dr
high = low + dr
output.write('{l:.3f}\t{h:.3f}\t{d:.13f}\n'.format(l=low, h=high, d=item))
def write_pressure(pressure, dr, outname, header, bubble=False):
"""Write pressure (both bubble and shell pressure) stats to output file.
If bubble is True, r_low is always zero.
"""
logging.info( "Writing output to {}".format(outname) )
if bubble:
# Bubble pressure has in pressure and out pressure.
with open(outname, 'w') as output:
output.write(header)
nbins = len(pressure['in'])
for i in range(nbins):
low = 0
high = (i + 1) * dr
if i < nbins - 1:
output.write('{l:.3f}\t{h:.3f}\t{pin:.13f}\t{pout:.13f}\n'.format(
l=low, h=high,
pin=pressure['in'][i], pout=pressure['out'][i+1]
))
else:
output.write('{l:.3f}\t{h:.3f}\t{pin:.13f}\t{pout:.13f}\n'.format(
l=low, h=high,
pin=pressure['in'][i], pout=0
))
else:
# Shell pressure.
with open(outname, 'w') as output:
output.write(header)
for i, item in enumerate(pressure):
low = i * dr
high = low + dr
output.write('{l:.3f}\t{h:.3f}\t{p:.13f}\n'.format(l=low, h=high, p=item))
def write_ratio(ratio, dr, outname, header, bubble=True):
"""Write atom ratio stats to output file.
If bubble is True, r_low is always zero.
"""
with open(outname, 'w') as output:
output.write(header)
for i, item in enumerate(ratio):
low = 0 if bubble else i * dr
high = (i + 1) * dr
output.write('{l:.3f}\t{h:.3f}\t{r:.13f}\n'.format(l=low, h=high, r=item))
def bubble_ratio(box, elements, out_fmt, header, dr, time, container, debug=False):
"""Calculate bubble ratio stats and write results to disk."""
for eles in elements:
# Ratio stats for each element.
e = ''.join(eles)
print('Bubble ratio stats for {e}'.format(e=e))
# Calculate ratio.
ratio = box.atom_stats(eles[0], dr)
# Write to file.
outname = out_fmt.format(time=time, ele=e)
write_ratio(ratio, dr, outname, header, bubble=True)
if debug:
# For testing.
with open(container, 'a') as cc:
cc.write(outname + '\n')
def shell_ratio(box, elements, out_fmt, header, dr, time, container, debug=False):
"""Calculate shell ratio stats and write results to disk."""
pass
def bubble_pressure(box, elements, out_fmt, header, dr, time, container, debug=False):
"""Calculate bubble pressure and write results to disk."""
for eles in elements:
# Bubble pressure stats for each group of specified elements.
e = ''.join(eles)
print("Bubble pressure stats for {e}\n".format(e=e))
# Calculate bubble pressure.
bubble_pressure = box.pressure_stats(eles, dr)
# Write bubble pressure.
outname = out_fmt.format(time=time, ele=e)
write_pressure(bubble_pressure, dr, outname, header, bubble=True)
if debug:
# For testing.
with open(container, 'a') as cc:
cc.write(outname + '\n')
def shell_pressure(box, elements, out_fmt, header, dr, time, container, normal=False, debug=False):
"""Calculate shell pressure and write results to disk."""
for eles in elements:
# Shell pressure stats for each group of specified elements.
e = ''.join(eles)
print('Shell pressure stats for {e}\n'.format(e=e))
# Shell pressure.
if not normal:
shell_pressure = box.shell_pressure_stats(eles, dr, normal=normal)
# Write to disk.
outname = out_fmt.format(time=time, ele=e)
write_pressure(shell_pressure, dr, outname, header, bubble=False)
if debug:
# For testing.
with open(container, 'a') as cc:
cc.write(outname + '\n')
else:
shell_pressure = box.shell_pressure_stats(eles, dr, normal=normal)
shell_r, shell_theta, shell_phi = shell_pressure['r'], shell_pressure['theta'], shell_pressure['phi']
# Write to disk.
outname1 = out_fmt.format(time=time, ele=e) + '_r'
outname2 = out_fmt.format(time=time, ele=e) + '_theta'
outname3 = out_fmt.format( time=time, ele=e ) + '_phi'
write_pressure(shell_r, dr, outname1, header, bubble=False)
write_pressure(shell_theta, dr, outname2, header, bubble=False)
write_pressure( shell_phi, dr, outname3, header, bubble=False )
if debug:
# For testing.
with open(container, 'a') as cc:
cc.write( outname1 + '\n' )
cc.write( outname2 + '\n' )
cc.write( outname3 + '\n' )
def bubble_density(box, elements, mole, out_fmt, header, dr, time, container, debug=False):
"""Calculate bubble density stats and write results to disk."""
for eles in elements:
# Bubble density stats for each group of specified elements.
e = ''.join(eles)
print('Bubble density stats for {e}\n'.format(e=e))
# Bubble density.
bubble_density = box.bubble_density(eles, mole, dr)
# Write to disk.
outname = out_fmt.format(time=time, ele=e)
write_density(bubble_density, dr, outname, header)
if debug:
# For testing.
with open(container, 'a') as cc:
cc.write(outname + '\n')
def shell_density(box, elements, mole, out_fmt, header, dr, time, container, debug=False):
"""Calculate shell density stats and write results to disk."""
for eles in elements:
# Shell density stats for each group of specified elements.
e = ''.join(eles)
print('Shell density stats for {e}\n'.format(e=e))
# Shell density.
shell_density = box.shell_density(eles, mole, dr)
# Write to disk.
outname = out_fmt.format(time=time, ele=e)
write_density(shell_density, dr, outname, header)
if debug:
# For testing.
with open(container, 'a') as cc:
cc.write(outname + '\n')
def xyz_density(box, elements, mole, out_fmt, header, dr, time, container, debug=False):
"""Calculate xyz density stats and write results to disk."""
for eles in elements:
# XYZ density stats for each group of specified elements.
e = ''.join(eles)
print('XYZ density stats for {e}\n'.format(e=e))
# XYZ density.
xyz_density = box.xyz_density(eles, mole, dr)
# Write to disk.
xout = out_fmt.format(time=time, ele=e, xyz='x')
yout = out_fmt.format(time=time, ele=e, xyz='y')
zout = out_fmt.format(time=time, ele=e, xyz='z')
write_density(xyz_density['x'], dr, xout, header)
write_density(xyz_density['y'], dr, yout, header)
write_density(xyz_density['z'], dr, zout, header)
if debug:
# For testing.
with open(container, 'a') as cc:
out = '\n'.join([xout, yout, zout, ''])
cc.write(out)
def get_radius(box, element, dr, n=1, ratio=0.5):
"""Get the radius of a bubble.
Radius is determined to be r with closest value of n_element / n_atoms
to ratio, i.e. within radius, n_element / n_atoms should be as close to
ratio as possible.
n specifies number of radiuses to return, i.e. n radiuses that have
n_element / n_atoms values closest to ratio."""
bubble_ratio = box.atom_stats(element, dr)
deltas = [abs(x - ratio) for x in bubble_ratio]
# Use nanmin to ignore NaNs in ratio vector.
# Do not select radiuses smaller than 10 angstrom.
min_index = deltas.index(np.nanmin(deltas))
n = n / 2
ret = []
for i in range(-n, n + 1):
index = min_index + i
ret.append((dr * (index + 1), bubble_ratio[index]))
return ret
| mit |
AlexanderFabisch/scikit-learn | examples/calibration/plot_compare_calibration.py | 241 | 5008 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilties to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilties closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
ewmoore/numpy | numpy/lib/function_base.py | 1 | 115391 | __docformat__ = "restructuredtext en"
__all__ = ['select', 'piecewise', 'trim_zeros', 'copy', 'iterable',
'percentile', 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex',
'disp', 'extract', 'place', 'nansum', 'nanmax', 'nanargmax',
'nanargmin', 'nanmin', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov',
'corrcoef', 'msort', 'median', 'sinc', 'hamming', 'hanning',
'bartlett', 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc',
'add_docstring', 'meshgrid', 'delete', 'insert', 'append', 'interp',
'add_newdoc_ufunc']
import warnings
import types
import sys
import numpy.core.numeric as _nx
from numpy.core import linspace
from numpy.core.numeric import ones, zeros, arange, concatenate, array, \
asarray, asanyarray, empty, empty_like, ndarray, around
from numpy.core.numeric import ScalarType, dot, where, newaxis, intp, \
integer, isscalar
from numpy.core.umath import pi, multiply, add, arctan2, \
frompyfunc, isnan, cos, less_equal, sqrt, sin, mod, exp, log10
from numpy.core.fromnumeric import ravel, nonzero, choose, sort, mean
from numpy.core.numerictypes import typecodes, number
from numpy.core import atleast_1d, atleast_2d
from numpy.lib.twodim_base import diag
from _compiled_base import _insert, add_docstring
from _compiled_base import digitize, bincount, interp as compiled_interp
from arraysetops import setdiff1d
from utils import deprecate
from _compiled_base import add_newdoc_ufunc
import numpy as np
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try: iter(y)
except: return 0
return 1
def histogram(a, bins=10, range=None, normed=False, weights=None, density=None):
"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored.
normed : bool, optional
This keyword is deprecated in Numpy 1.6 due to confusing/buggy
behavior. It will be removed in Numpy 2.0. Use the density keyword
instead.
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that this latter behavior is
known to be buggy with unequal bin widths; use `density` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1
density : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the `normed` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `normed` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
if (range is not None):
mn, mx = range
if (mn > mx):
raise AttributeError(
'max must be larger than min in range parameter.')
if not iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError("`bins` should be a positive integer.")
if range is None:
if a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
range = (0, 1)
else:
range = (a.min(), a.max())
mn, mx = [mi+0.0 for mi in range]
if mn == mx:
mn -= 0.5
mx += 0.5
bins = linspace(mn, mx, bins+1, endpoint=True)
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
raise AttributeError(
'bins must increase monotonically.')
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = int
else:
ntype = weights.dtype
n = np.zeros(bins.shape, ntype)
block = 65536
if weights is None:
for i in arange(0, len(a), block):
sa = sort(a[i:i+block])
n += np.r_[sa.searchsorted(bins[:-1], 'left'), \
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), block):
tmp_a = a[i:i+block]
tmp_w = weights[i:i+block]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero,], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'), \
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if density is not None:
if density:
db = array(np.diff(bins), float)
return n/db/n.sum(), bins
else:
return n, bins
else:
# deprecated, buggy behavior. Remove for Numpy 2.0
if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitely in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : bool, optional
If False, returns the number of samples in each bin. If True, returns
the bin density, ie, the bin count divided by the bin hypervolume.
weights : array_like (N,), optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False, the
values of the returned histogram are equal to the sum of the weights
belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights for
the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise AttributeError(
'The dimension of bins must be equal'\
' to the dimension of the sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
# Handle empty input. Range can't be determined in that case, use 0-1.
if N == 0:
smin = zeros(D)
smax = ones(D)
else:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
if bins[i] < 1:
raise ValueError("Element at index %s in `bins` should be "
"a positive integer." % i)
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1)
else:
edges[i] = asarray(bins[i], float)
nbin[i] = len(edges[i])+1 # +1 for outlier bins
dedges[i] = diff(edges[i])
if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError("""
Found bin edge of size <= 0. Did you specify `bins` with
non-monotonic sequence?""")
nbin = asarray(nbin)
# Handle empty input.
if N == 0:
return np.zeros(nbin-2), edges
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:,i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
if not np.isinf(mindiff):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
on_edge = where(around(sample[:,i], decimal) == around(edges[i][-1],
decimal))[0]
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i,j)
ni[i],ni[j] = ni[j],ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1,-1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : {array_type, double}
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays -- useful if your data contains
"missing" values
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
if not isinstance(a, np.matrix) :
a = np.asarray(a)
if weights is None :
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else :
a = a + 0.0
wgt = np.array(weights, dtype=a.dtype, copy=0)
# Sanity checks
if a.shape != wgt.shape :
if axis is None :
raise TypeError(
"Axis must be specified when shapes of a "\
"and weights differ.")
if wgt.ndim != 1 :
raise TypeError(
"1D weights expected when shapes of a and "\
"weights differ.")
if wgt.shape[0] != a.shape[axis] :
raise ValueError(
"Length of weights not compatible with "\
"specified axis.")
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
scl = wgt.sum(axis=axis)
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt).sum(axis)/scl
if returned:
scl = np.multiply(avg, 0) + scl
return avg, scl
else:
return avg
def asarray_chkfinite(a, dtype=None, order=None):
"""
Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a, dtype=float)
array([1., 2.])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print 'ValueError'
...
ValueError
"""
a = asarray(a, dtype=dtype, order=order)
if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have undefined values.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.linspace(-2.5, 2.5, 6)
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if isscalar(condlist) or \
not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray)):
condlist = [condlist]
condlist = [asarray(c, dtype=bool) for c in condlist]
n = len(condlist)
if n == n2-1: # compute the "otherwise" condition.
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
zerod = False
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
if x.ndim == 0:
x = x[None]
zerod = True
newcondlist = []
for k in range(n):
if condlist[k].ndim == 0:
condition = condlist[k][None]
else:
condition = condlist[k]
newcondlist.append(condition)
condlist = newcondlist
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not callable(item):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
n = len(condlist)
n2 = len(choicelist)
if n2 != n:
raise ValueError(
"list of cases must be same length as list of conditions")
choicelist = [default] + choicelist
S = 0
pfac = 1
for k in range(1, n+1):
S += k * pfac * asarray(condlist[k-1])
if k < n:
pfac *= (1-asarray(condlist[k-1]))
# handle special case of a 1-element condition but
# a multi-element choice
if type(S) in ScalarType or max(asarray(S).shape)==1:
pfac = asarray(1)
for k in range(n2+1):
pfac = pfac + asarray(choicelist[k])
if type(S) in ScalarType:
S = S*ones(asarray(pfac).shape, type(S))
else:
S = S*ones(asarray(pfac).shape, S.dtype)
return choose(S, tuple(choicelist))
def copy(a, order='K'):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :meth:ndarray.copy are very
similar, but have different default values for their order=
arguments.)
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, order=order, copy=True)
# Basic operations
def gradient(f, *varargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using central differences in the interior
and first differences at the boundaries. The returned gradient hence has
the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
`*varargs` : scalars
0, 1, or N scalars specifying the sample distances in each direction,
that is: `dx`, `dy`, `dz`, ... The default distance is 1.
Returns
-------
gradient : ndarray
N arrays of the same shape as `f` giving the derivative of `f` with
respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]),
array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == N:
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
# use central differences on interior and first differences on endpoints
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D', 'm', 'M']:
otype = 'd'
# Difference of datetime64 elements results in timedelta64
if otype == 'M' :
# Need to use the full dtype name because it contains unit information
otype = f.dtype.name.replace('datetime', 'timedelta')
elif otype == 'm' :
# Needs to keep the specific units, can't be a general unit
otype = f.dtype
for axis in range(N):
# select out appropriate parts for this dimension
out = np.empty_like(f, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (f[2:] - f[:-2])/2.0
out[slice1] = (f[slice2] - f[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (f[1] - f[0])
out[slice1] = (f[slice2] - f[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (f[-1] - f[-2])
out[slice1] = (f[slice2] - f[slice3])
# divide by step size
outvals.append(out / dx[axis])
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
if N == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th order discrete difference along given axis.
The first order difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher order differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
diff : ndarray
The `n` order differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
See Also
--------
gradient, ediff1d
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, defaults is `fp[-1]`.
Returns
-------
y : {float, ndarray}
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasingness is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
"""
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : {ndarray, scalar}
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd+pi, 2*pi)-pi
_nx.copyto(ddmod, pi, where=(ddmod==-pi) & (dd > 0))
ph_correct = ddmod - dd;
_nx.copyto(ph_correct, 0, where=abs(dd)<discont)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a,copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.: break
else: first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.: break
else: last = last - 1
return filt[first:last]
import sys
if sys.hexversion < 0x2040000:
from sets import Set as set
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True],tmp[1:]!=tmp[:-1]))
return tmp[idx]
except AttributeError:
items = list(set(x))
items.sort()
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
Returns
-------
extract : ndarray
Rank 1 array of values from `arr` where `condition` is True.
See Also
--------
take, put, copyto, compress
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
`place` uses the first N elements of `vals`, where N is the number of
True values in `mask`, while `copyto` uses the elements where `mask`
is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : array_like
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
copyto, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
return _insert(arr, mask, vals)
def _nanop(op, fill, a, axis=None):
"""
General operation on arrays with not-a-number values.
Parameters
----------
op : callable
Operation to perform.
fill : float
NaN values are set to fill before doing the operation.
a : array-like
Input array.
axis : {int, None}, optional
Axis along which the operation is computed.
By default the input is flattened.
Returns
-------
y : {ndarray, scalar}
Processed data.
"""
y = array(a, subok=True)
# We only need to take care of NaN's in floating point arrays
dt = y.dtype
if np.issubdtype(dt, np.integer) or np.issubdtype(dt, np.bool_):
return op(y, axis=axis)
mask = isnan(a)
# y[mask] = fill
# We can't use fancy indexing here as it'll mess w/ MaskedArrays
# Instead, let's fill the array directly...
np.copyto(y, fill, where=mask)
res = op(y, axis=axis)
mask_all_along_axis = mask.all(axis=axis)
# Along some axes, only nan's were encountered. As such, any values
# calculated along that axis should be set to nan.
if mask_all_along_axis.any():
if np.isscalar(res):
res = np.nan
else:
res[mask_all_along_axis] = np.nan
return res
def nansum(a, axis=None):
"""
Return the sum of array elements over a given axis treating
Not a Numbers (NaNs) as zero.
Parameters
----------
a : array_like
Array containing numbers whose sum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the sum is computed. The default is to compute
the sum of the flattened array.
Returns
-------
y : ndarray
An array with the same shape as a, with the specified axis removed.
If a is a 0-d array, or if axis is None, a scalar is returned with
the same dtype as `a`.
See Also
--------
numpy.sum : Sum across array including Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
If positive or negative infinity are present the result is positive or
negative infinity. But if both positive and negative infinity are present,
the result is Not A Number (NaN).
Arithmetic is modular when using integer types (all elements of `a` must
be finite i.e. no elements that are NaNs, positive infinity and negative
infinity because NaNs are floating point types), and no error is raised
on overflow.
Examples
--------
>>> np.nansum(1)
1
>>> np.nansum([1])
1
>>> np.nansum([1, np.nan])
1.0
>>> a = np.array([[1, 1], [1, np.nan]])
>>> np.nansum(a)
3.0
>>> np.nansum(a, axis=0)
array([ 2., 1.])
When positive infinity and negative infinity are present
>>> np.nansum([1, np.nan, np.inf])
inf
>>> np.nansum([1, np.nan, np.NINF])
-inf
>>> np.nansum([1, np.nan, np.inf, np.NINF])
nan
"""
return _nanop(np.sum, 0, a, axis)
def nanmin(a, axis=None):
"""
Return the minimum of an array or minimum along an axis ignoring any NaNs.
Parameters
----------
a : array_like
Array containing numbers whose minimum is desired.
axis : int, optional
Axis along which the minimum is computed.The default is to compute
the minimum of the flattened array.
Returns
-------
nanmin : ndarray
A new array or a scalar array with the result.
See Also
--------
numpy.amin : Minimum across array including any Not a Numbers.
numpy.nanmax : Maximum across array ignoring any Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative infinity
is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.min.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmin(a)
1.0
>>> np.nanmin(a, axis=0)
array([ 1., 2.])
>>> np.nanmin(a, axis=1)
array([ 1., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmin([1, 2, np.nan, np.inf])
1.0
>>> np.nanmin([1, 2, np.nan, np.NINF])
-inf
"""
a = np.asanyarray(a)
if axis is not None:
return np.fmin.reduce(a, axis)
else:
return np.fmin.reduce(a.flat)
def nanargmin(a, axis=None):
"""
Return indices of the minimum values over an axis, ignoring NaNs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmin, nanargmax
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmin(a)
0
>>> np.nanargmin(a)
2
>>> np.nanargmin(a, axis=0)
array([1, 1])
>>> np.nanargmin(a, axis=1)
array([1, 0])
"""
return _nanop(np.argmin, np.inf, a, axis)
def nanmax(a, axis=None):
"""
Return the maximum of an array or maximum along an axis ignoring any NaNs.
Parameters
----------
a : array_like
Array containing numbers whose maximum is desired. If `a` is not
an array, a conversion is attempted.
axis : int, optional
Axis along which the maximum is computed. The default is to compute
the maximum of the flattened array.
Returns
-------
nanmax : ndarray
An array with the same shape as `a`, with the specified axis removed.
If `a` is a 0-d array, or if axis is None, a ndarray scalar is
returned. The the same dtype as `a` is returned.
See Also
--------
numpy.amax : Maximum across array including any Not a Numbers.
numpy.nanmin : Minimum across array ignoring any Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative infinity
is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.max.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmax(a)
3.0
>>> np.nanmax(a, axis=0)
array([ 3., 2.])
>>> np.nanmax(a, axis=1)
array([ 2., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmax([1, 2, np.nan, np.NINF])
2.0
>>> np.nanmax([1, 2, np.nan, np.inf])
inf
"""
a = np.asanyarray(a)
if axis is not None:
return np.fmax.reduce(a, axis)
else:
return np.fmax.reduce(a.flat)
def nanargmax(a, axis=None):
"""
Return indices of the maximum values over an axis, ignoring NaNs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmax, nanargmin
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmax(a)
0
>>> np.nanargmax(a)
1
>>> np.nanargmax(a, axis=0)
array([1, 0])
>>> np.nanargmax(a, axis=1)
array([1, 1])
"""
return _nanop(np.argmax, -np.inf, a, axis)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
import sys
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If `None`, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
.. versionadded:: 1.7.0
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
Returns
-------
vectorized : callable
Vectorized function.
Examples
--------
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
such as the coefficients for a polynomial as in `polyval`:
>>> def mypolyval(p, x):
... _p = list(p)
... res = _p.pop(0)
... while _p:
... res = res*x + _p.pop(0)
... return res
>>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
>>> vpolyval(p=[1, 2, 3], x=[0, 1])
array([3, 6])
Positional arguments may also be excluded by specifying their position:
>>> vpolyval.excluded.add(0)
>>> vpolyval([1, 2, 3], x=[0, 1])
array([3, 6])
Notes
-----
The `vectorize` function is provided primarily for convenience, not for
performance. The implementation is essentially a for loop.
If `otypes` is not specified, then a call to the function with the first
argument will be used to determine the number of outputs. The results of
this call will be cached if `cache` is `True` to prevent calling the
function twice. However, to implement the cache, the original function must
be wrapped which will slow down subsequent calls, so only do this if your
function is expensive.
The new keyword argument interface and `excluded` argument support further
degrades performance.
"""
def __init__(self, pyfunc, otypes='', doc=None, excluded=None, cache=False):
self.pyfunc = pyfunc
self.cache = cache
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError("Invalid otype specified: %s" % (char,))
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError("Invalid otype specification")
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
if self.otypes and not self.excluded:
self._ufunc = None # Caching to improve default performance
def __call__(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
"""
excluded = self.excluded
if not kwargs and not excluded:
func = self.pyfunc
vargs = args
else:
# The wrapper accepts only positional arguments: we use `names` and
# `inds` to mutate `the_args` and `kwargs` to pass to the original
# function.
nargs = len(args)
names = [_n for _n in kwargs if _n not in excluded]
inds = [_i for _i in range(nargs) if _i not in excluded]
the_args = list(args)
def func(*vargs):
for _n, _i in enumerate(inds):
the_args[_i] = vargs[_n]
kwargs.update(zip(names, vargs[len(inds):]))
return self.pyfunc(*the_args, **kwargs)
vargs = [args[_i] for _i in inds]
vargs.extend([kwargs[_n] for _n in names])
return self._vectorize_call(func=func, args=vargs)
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
assert args
if self.otypes:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
inputs = [asarray(_a).flat[0] for _a in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple functions
# at least -- this wrapping can almost double the execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = ''.join([asarray(outputs[_k]).dtype.char
for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
def _vectorize_call(self, func, args):
"""Vectorized call to `func` over positional `args`."""
if not args:
_res = func()
else:
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
# Convert args to object arrays first
inputs = [array(_a, copy=False, subok=True, dtype=object)
for _a in args]
outputs = ufunc(*inputs)
if ufunc.nout == 1:
_res = array(outputs,
copy=False, subok=True, dtype=otypes[0])
else:
_res = tuple([array(_x, copy=False, subok=True, dtype=_t)
for _x, _t in zip(outputs, otypes)])
return _res
def cov(m, y=None, rowvar=1, bias=0, ddof=None):
"""
Estimate a covariance matrix, given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
form as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError("ddof must be integer")
X = array(m, ndmin=2, dtype=float)
if X.size == 0:
# handle empty arrays
return np.array(m)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
axis = 0
tup = (slice(None),newaxis)
else:
axis = 1
tup = (newaxis, slice(None))
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=float)
X = concatenate((X,y), axis)
X -= X.mean(axis=1-axis)[tup]
if rowvar:
N = X.shape[1]
else:
N = X.shape[0]
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
fact = float(N - ddof)
if not rowvar:
return (dot(X.T, X.conj()) / fact).squeeze()
else:
return (dot(X, X.T.conj()) / fact).squeeze()
def corrcoef(x, y=None, rowvar=1, bias=0, ddof=None):
"""
Return correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `P`, and the
covariance matrix, `C`, is
.. math:: P_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `P` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : {None, int}, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
"""
c = cov(x, y, rowvar, bias, ddof)
if c.size == 0:
# handle empty arrays
return c
try:
d = diag(c)
except ValueError: # scalar covariance
return 1
return c/sqrt(multiply.outer(d,d))
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return 0.42-0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with
the first and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy.fft import fft, fftshift
>>> window = np.bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return where(less_equal(n,(M-1)/2.0),2.0*n/(M-1),2.0-2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius van Hann, an Austrian meterologist. It is
also known as the Cosine Bell. Some authors prefer that it be called a
Hann window, to help avoid confusion with the very similar Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return 0.5-0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
is described in Blackman and Tukey. It was recommended for smoothing the
truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1,float)
n = arange(0,M)
return 0.54-0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in xrange(1,len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is partitioned
into the two intervals [0,8] and (8,inf), and Chebyshev polynomial
expansions are employed in each interval. Relative error on the domain
[0,30] using IEEE arithmetic is documented [3]_ as having a peak of 5.8e-16
with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x<0)
x[ind] = -x[ind]
ind = (x<=8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M,beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple approximation
to the DPSS window based on Bessel functions.
The Kaiser window is a very good approximation to the Digital Prolate
Spheroidal Sequence, or Slepian window, which is the transform which
maximizes the energy in the main lobe of the window relative to total
energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> np.kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0,M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a
Lanczos resampling filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.linspace(-4, 4, 41)
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.linspace(-4, 4, 401)
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi* where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a,subok=True,copy=True)
b.sort(0)
return b
def median(a, axis=None, out=None, overwrite_input=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int, optional
Axis along which the medians are computed. The default (axis=None)
is to compute the median along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. Note that, if `overwrite_input` is True and the input
is not already an ndarray, an error will be raised.
Returns
-------
median : ndarray
A new array holding the result (unless `out` is specified, in
which case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, percentile
Notes
-----
Given a vector V of length N, the median of V is the middle value of
a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is
odd. When N is even, it is the average of the two middle values of
``V_sorted``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
if overwrite_input:
if axis is None:
sorted = a.ravel()
sorted.sort()
else:
a.sort(axis=axis)
sorted = a
else:
sorted = sort(a, axis=axis)
if sorted.shape == ():
# make 0-D arrays work
return sorted.item()
if axis is None:
axis = 0
indexer = [slice(None)] * sorted.ndim
index = int(sorted.shape[axis]/2)
if sorted.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(sorted[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None, overwrite_input=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute which must be between 0 and 100 inclusive.
axis : int, optional
Axis along which the percentiles are computed. The default (None)
is to compute the median along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted.
Default is False. Note that, if `overwrite_input` is True and the
input is not already an array, an error will be raised.
Returns
-------
pcntile : ndarray
A new array holding the result (unless `out` is specified, in
which case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, median
Notes
-----
Given a vector V of length N, the qth percentile of V is the qth ranked
value in a sorted copy of V. A weighted average of the two nearest
neighbors is used if the normalized ranking does not match q exactly.
The same as the median if ``q=50``, the same as the minimum if ``q=0``
and the same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
3.5
>>> np.percentile(a, 50, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.percentile(a, 50, axis=1)
array([ 7., 2.])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.percentile(b, 50, axis=None, overwrite_input=True)
3.5
"""
a = np.asarray(a)
if q == 0:
return a.min(axis=axis, out=out)
elif q == 100:
return a.max(axis=axis, out=out)
if overwrite_input:
if axis is None:
sorted = a.ravel()
sorted.sort()
else:
a.sort(axis=axis)
sorted = a
else:
sorted = sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted, q, axis, out)
# handle sequence of q's without calling sort multiple times
def _compute_qth_percentile(sorted, q, axis, out):
if not isscalar(q):
p = [_compute_qth_percentile(sorted, qi, axis, None)
for qi in q]
if out is not None:
out.flat = p
return p
q = q / 100.0
if (q < 0) or (q > 1):
raise ValueError("percentile must be either in the range [0,100]")
indexer = [slice(None)] * sorted.ndim
Nx = sorted.shape[axis]
index = q*(Nx-1)
i = int(index)
if i == index:
indexer[axis] = slice(i, i+1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i+2)
j = i + 1
weights = array([(j - index), (index - i)],float)
wshape = [1]*sorted.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use add.reduce in both cases to coerce data type as well as
# check and use out array.
return add.reduce(sorted[indexer]*weights, axis=axis, out=out)/sumval
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
If `x` is None, then spacing between all `y` elements is `dx`.
dx : scalar, optional
If `x` is None, spacing given by `dx` is assumed. Default is 1.
axis : int, optional
Specify the axis.
Returns
-------
trapz : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points will
be taken from `y` array, by default x-axis distances between points will be
1.0, alternatively they can be provided with `x` array or with `dx` scalar.
Return value will be equal to combined area under the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1,None)
slice2[axis] = slice(None,-1)
try:
ret = (d * (y[slice1] +y [slice2]) / 2.0).sum(axis)
except ValueError: # Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
"""
try:
new = {}
exec 'from %s import %s' % (place, obj) in new
if isinstance(doc, str):
add_docstring(new[obj], doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new[obj], doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new[obj], val[0]), val[1].strip())
except:
pass
# Based on scitools meshgrid
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from two or more coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
copy : bool, optional
If False, a view into the original arrays are returned in
order to conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous arrays.
Furthermore, more than one element of a broadcast array may refer to
a single memory location. If you need to write to the arrays, make
copies first.
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing keyword
argument. Giving the string 'ij' returns a meshgrid with matrix indexing,
while 'xy' returns a meshgrid with Cartesian indexing. In the 2-D case
with inputs of length M and N, the outputs are of shape (N, M) for 'xy'
indexing and (M, N) for 'ij' indexing. In the 3-D case with inputs of
length M, N and P, outputs are of shape (N, M, P) for 'xy' indexing and (M,
N, P) for 'ij' indexing. The difference is illustrated by the following
code snippet::
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
"""
if len(xi) < 2:
msg = 'meshgrid() takes 2 or more arguments (%d given)' % int(len(xi) > 0)
raise ValueError(msg)
args = np.atleast_1d(*xi)
ndim = len(args)
copy_ = kwargs.get('copy', True)
sparse = kwargs.get('sparse', False)
indexing = kwargs.get('indexing', 'xy')
if not indexing in ['xy', 'ij']:
raise ValueError("Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [x.reshape(s0[:i] + (-1,) + s0[i + 1::]) for i, x in enumerate(args)]
shape = [x.size for x in output]
if indexing == 'xy':
# switch first and second axis
output[0].shape = (1, -1) + (1,)*(ndim - 2)
output[1].shape = (-1, 1) + (1,)*(ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim;
axis = ndim-1;
if ndim == 0:
if wrap:
return wrap(arr)
else:
return arr.copy()
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, (int, long, integer)):
if (obj < 0): obj += N
if (obj < 0 or obj >=N):
raise ValueError(
"invalid entry")
newshape[axis]-=1;
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj,None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1,None)
new[slobj] = arr[slobj2]
elif isinstance(obj, slice):
start, stop, step = obj.indices(N)
numtodel = len(xrange(start, stop, step))
if numtodel <= 0:
if wrap:
return wrap(new)
else:
return arr.copy()
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arr.flags.fnc)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel,None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
obj = arange(start, stop, step, dtype=intp)
all = arange(start, stop, dtype=intp)
obj = setdiff1d(all, obj)
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = obj
new[slobj] = arr[slobj2]
else: # default behavior
obj = array(obj, dtype=intp, copy=0, ndmin=1)
all = arange(N, dtype=intp)
obj = setdiff1d(all, obj)
slobj[axis] = obj
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
delete : Delete elements from an array.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim-1
if (ndim == 0):
arr = arr.copy()
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, (int, long, integer)):
if (obj < 0): obj += N
if obj < 0 or obj > N:
raise ValueError(
"index (%d) out of range (0<=index<=%d) "\
"in dimension %d" % (obj, N, axis))
if isscalar(values):
obj = [obj]
else:
values = asarray(values)
if ndim > values.ndim:
obj = [obj]
else:
obj = [obj] * len(values)
elif isinstance(obj, slice):
# turn it into a range object
obj = arange(*obj.indices(N),**{'dtype':intp})
# get two sets of indices
# one is the indices which will hold the new stuff
# two is the indices where arr will be copied over
obj = asarray(obj, dtype=intp)
numnew = len(obj)
index1 = obj + arange(numnew)
index2 = setdiff1d(arange(numnew+N),index1)
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj2 = [slice(None)]*ndim
slobj[axis] = index1
slobj2[axis] = index2
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If `axis`
is not specified, `values` can be any shape and will be flattened
before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not given,
both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that `append`
does not occur in-place: a new array is allocated and filled. If
`axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
| bsd-3-clause |
ravindrapanda/tensorflow | tensorflow/python/estimator/canned/dnn_test.py | 25 | 16780 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dnn.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.estimator.canned import dnn
from tensorflow.python.estimator.canned import dnn_testing_utils
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _dnn_classifier_fn(*args, **kwargs):
return dnn.DNNClassifier(*args, **kwargs)
class DNNModelFnTest(dnn_testing_utils.BaseDNNModelFnTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNModelFnTest.__init__(self, dnn._dnn_model_fn)
class DNNLogitFnTest(dnn_testing_utils.BaseDNNLogitFnTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNLogitFnTest.__init__(self,
dnn._dnn_logit_fn_builder)
class DNNWarmStartingTest(dnn_testing_utils.BaseDNNWarmStartingTest,
test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNWarmStartingTest.__init__(self, _dnn_classifier_fn,
_dnn_regressor_fn)
class DNNClassifierEvaluateTest(
dnn_testing_utils.BaseDNNClassifierEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__(
self, _dnn_classifier_fn)
class DNNClassifierPredictTest(
dnn_testing_utils.BaseDNNClassifierPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierPredictTest.__init__(
self, _dnn_classifier_fn)
class DNNClassifierTrainTest(
dnn_testing_utils.BaseDNNClassifierTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierTrainTest.__init__(
self, _dnn_classifier_fn)
def _dnn_regressor_fn(*args, **kwargs):
return dnn.DNNRegressor(*args, **kwargs)
class DNNRegressorEvaluateTest(
dnn_testing_utils.BaseDNNRegressorEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__(
self, _dnn_regressor_fn)
class DNNRegressorPredictTest(
dnn_testing_utils.BaseDNNRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorPredictTest.__init__(
self, _dnn_regressor_fn)
class DNNRegressorTrainTest(
dnn_testing_utils.BaseDNNRegressorTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorTrainTest.__init__(
self, _dnn_regressor_fn)
def _queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = data_flow_ops.FIFOQueue(capacity=100, dtypes=queue_dtypes)
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
input_queue,
[input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
class DNNRegressorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
label_dimension, batch_size):
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
est = dnn.DNNRegressor(
hidden_units=(2, 2),
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
label_dimension = 1
batch_size = 10
data = np.linspace(0., 2., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
class DNNClassifierIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _as_label(self, data_in_float):
return np.rint(data_in_float).astype(np.int64)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
n_classes, batch_size):
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
est = dnn.DNNClassifier(
hidden_units=(2, 2),
feature_columns=feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PROBABILITIES]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
n_classes = 3
input_dimension = 2
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
x_data = data.reshape(batch_size, input_dimension)
y_data = np.reshape(self._as_label(data[:batch_size]), (batch_size, 1))
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
input_dimension = 1
n_classes = 3
batch_size = 10
data = np.linspace(0., n_classes - 1., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(self._as_label(data))
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
n_classes = 3
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum)),
'y':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=self._as_label(datum[:1]))),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([1], dtypes.int64),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
if __name__ == '__main__':
test.main()
| apache-2.0 |
formalmethods/intrepyd | intrepyd/atg/mcdc.py | 1 | 11337 | """
Copyright (C) 2017 Roberto Bruttomesso <roberto.bruttomesso@gmail.com>
This file is distributed under the terms of the 3-clause BSD License.
A copy of the license can be found in the root directory or at
https://opensource.org/licenses/BSD-3-Clause.
Author: Roberto Bruttomesso <roberto.bruttomesso@gmail.com>
Date: 27/03/2017
This module implements a toolbox for Automated Test Generation
"""
import pandas as pd
from intrepyd.engine import EngineResult
def compute_mcdc(context, class_, decisions, max_depth):
"""
Computes MC/DC tests in form of a table.
Args:
context: the intrepyd context to use
class_ (class): a python class that defines the circuit
decisions (Dictionary): each key of the dictionary is the
name of a decision, and the values are the
corresponding conditions
{ netName : [netName, ...] }
^ ^
the decision the list of conditions
Returns:
(Dictionary) the MC/DC tables, one per each decision
"""
# Fetches and duplicates the circuit
inst_a = class_(context, 'InstA')
inst_b = class_(context, 'InstB')
inst_a.mk_circuit(True)
inst_b.mk_circuit(True)
# Flattens inputs and outputs into nets, for simplicity
inst_a.nets.update(inst_a.inputs)
inst_a.nets.update(inst_a.outputs)
inst_b.nets.update(inst_b.inputs)
inst_b.nets.update(inst_b.outputs)
# Creates test objectives
decision2testobjectives = {decision :\
compute_mcdc_targets(context, inst_a, inst_b, decision, conditions)\
for decision, conditions in decisions.iteritems()}
# Compute MC/DC traces: this is the computationally
# expensive part that calls model checking routines
decision2traces, trace2condition, decision2unreachable =\
solve_mcdc_targets(context, decision2testobjectives, max_depth)
# Compute MC/DC tables from traces
decision2table, decision2independencepairs =\
compute_mcdc_tables(context, inst_a, inst_b, decision2traces, trace2condition, decisions)
return decision2table, decision2independencepairs, decision2unreachable
def compute_mcdc_targets(context, inst_a, inst_b, decision, conditions):
"""
Computes the MC/DC reachability target for the
two copies of the circuit.
Args:
context: the intrepyd context to use
inst_a: an instance of the circuit
inst_b: an instance of the circuit
decision: the name of a decision
conditions: the list of names of the conditions
Returns:
a map from targets to conditions for which target is an independence pair
"""
decision_a = inst_a.nets[decision]
decision_b = inst_b.nets[decision]
decisiona_diff_decisionb = context.mk_neq(decision_a, decision_b)
conditiona_neq_conditionb = []
conditions_a = []
conditions_b = []
for condition in conditions:
condition_a = inst_a.nets[condition]
conditions_a.append(condition_a)
condition_b = inst_b.nets[condition]
conditions_b.append(condition_b)
conditiona_neq_conditionb.append(context.mk_neq(condition_a, condition_b))
targets = {}
for i in range(len(conditions)):
# Building test objective for the i-th condition
condition_a = conditions_a[i]
condition_b = conditions_b[i]
# i-th condition must differ in A and B
conj1 = conditiona_neq_conditionb[i]
# test objective must differ in A and B
conj2 = decisiona_diff_decisionb
# decisionA[not(cA)/cA] != decisionA
not_condition_a = context.mk_not(condition_a)
decisiona_not_ca = context.mk_substitute(decision_a, not_condition_a, condition_a)
conj3 = context.mk_neq(decisiona_not_ca, decision_a)
# decisionB[not(cB)/cB] != decisionB
not_condition_b = context.mk_not(condition_b)
decisiona_not_cb = context.mk_substitute(decision_b, not_condition_b, condition_b)
conj4 = context.mk_neq(decisiona_not_cb, decision_b)
# Creates final conjunction
tmp1 = context.mk_and(conj1, conj2)
tmp2 = context.mk_and(tmp1, conj3)
target = context.mk_and(tmp2, conj4)
targets[target] = i
return targets
def solve_mcdc_targets(context, decision2testobjectives, max_depth):
"""
Solves the MC/DC targets, maximizing the number of solved
test objectives per each call.
"""
# Bmc engine will be used to compute counterexamples
bmc = context.mk_optimizing_bmc()
testobjectives2decision = {}
testobjectives2condition = {}
decision2traces = {}
decision2unreachable = {}
targets = []
for decision, tos in decision2testobjectives.iteritems():
decision2traces[decision] = []
decision2unreachable[decision] = []
for test_objective, condition in tos.iteritems():
testobjectives2decision[test_objective] = decision
testobjectives2condition[test_objective] = condition
targets.append(test_objective)
assert len(testobjectives2decision) == len(testobjectives2condition)
total_targets = len(targets)
# Compute unreachable targets first
total_unreached = 0
total_reached = 0
for target in targets:
breach = context.mk_backward_reach()
breach.add_target(target)
result = breach.reach_targets()
if result == EngineResult.UNREACHABLE:
decision = testobjectives2decision[target]
decision2unreachable[decision].append(target)
total_unreached += 1
elif result == EngineResult.REACHABLE:
bmc.add_target(target)
total_reached += 1
# print 'There are', totalTargets, 'test objectives:'
# print '-', totalUnreached, 'unreachable test objectives'
# print '-', totalReached, 'reachable test objectives'
if total_unreached == total_targets:
return decision2traces, decision2unreachable
trace2condition = {}
# Compute counterexamples for reachable targets
done = False
depth = 0
bmc_total_reached = 0
while not done:
bmc.set_current_depth(depth)
result = bmc.reach_targets()
if result != EngineResult.REACHABLE:
if depth == max_depth:
done = True
depth += 1
continue
reached = bmc.get_last_reached_targets()
trace = bmc.get_last_trace()
for reached_target in reached:
decision = testobjectives2decision[reached_target]
decision2traces[decision].append(trace)
trace2condition[trace] = testobjectives2condition[reached_target]
bmc_total_reached += 1
bmc.remove_last_reached_targets()
if bmc_total_reached == total_reached:
done = True
total_undecided = total_reached - bmc_total_reached
if total_undecided != 0:
print 'There are', total_undecided, 'undecided test objectives within depth', max_depth
return decision2traces, trace2condition, decision2unreachable
def compute_mcdc_tables(context, inst_a, inst_b, decision2traces, trace2condition, decisions):
"""
Computes MC/DC tables out of counterexamples.
"""
decision2table = {}
decision2independencepairs = {}
seen = {}
for decision, traces in decision2traces.iteritems():
inst_a_conds_dec = [inst_a.nets[cond] for cond in decisions[decision]] + [inst_a.nets[decision]]
inst_a_testnets = inst_a.inputs.values() + inst_a_conds_dec
inst_b_conds_dec = [inst_b.nets[cond] for cond in decisions[decision]] + [inst_b.nets[decision]]
inst_b_testnets = inst_b.inputs.values() + inst_b_conds_dec
header = [cond for cond in decisions[decision]]
header.append(decision)
header_nets_a = set(inst_a_conds_dec)
# Enable this to include inputs in the tests (but you need to add also names in headers)
# header_nets_a = set(inst_a_testnets)
header_nets_b = set(inst_b_conds_dec)
# Enable this to include inputs in the tests (but you need to add also names in headers)
# header_nets_b = set(inst_b_testnets)
decision2table[decision] = [header]
decision2independencepairs[decision] = {}
test_number = 0
for trace in traces:
test_a_number = None
test_b_number = None
# Enrich the traces with the value of the output
# by performing a simulation
simulator = context.mk_simulator()
for net in inst_a_testnets:
simulator.add_watch(net)
for net in inst_b_testnets:
simulator.add_watch(net)
simulator.simulate(trace, trace.get_max_depth())
full_test_a = trace.get_as_net_dictionary()
full_test_b = trace.get_as_net_dictionary()
test_a_candidate = [v[0] for k, v in full_test_a.iteritems() if (k in inst_a_testnets and k in header_nets_a)]
test_b_candidate = [v[0] for k, v in full_test_b.iteritems() if (k in inst_b_testnets and k in header_nets_b)]
test_a_hash = compute_hash(test_a_candidate)
test_b_hash = compute_hash(test_b_candidate)
if test_a_hash in seen:
_, test_a_number = seen[test_a_hash]
else:
seen[test_a_hash] = (test_a_candidate, test_number)
decision2table[decision].append(test_a_candidate)
test_a_number = test_number
test_number += 1
if test_b_hash in seen:
_, test_b_number = seen[test_b_hash]
else:
seen[test_b_hash] = (test_b_candidate, test_number)
decision2table[decision].append(test_b_candidate)
test_b_number = test_number
test_number += 1
condition = trace2condition[trace]
condition = decisions[decision][condition]
decision2independencepairs[decision][condition] = (test_a_number, test_b_number)
return decision2table, decision2independencepairs
def compute_hash(test):
"""
Computes a unique hash for a test
"""
result = ""
for val in test:
result += val
return result
def compute_pretty_tables(decision2table):
"""
Postprocess raw MC/DC tables to get something presentation ready.
"""
decision2prettytable = {}
first = True
for decision, table in decision2table.iteritems():
pretty_table = []
for row in table:
if first:
first = False
pretty_table.append(row)
else:
pretty_table.append(row[0])
pretty_table.append(row[1])
decision2prettytable[decision] = pretty_table
return decision2prettytable
def get_tables_as_dataframe(decision2table):
"""
Postprocess table to turn into a dataframe
"""
result = {}
for decision, table in decision2table.iteritems():
if len(table) == 1:
continue
dataframe = pd.DataFrame(table[1:], columns=table[0])
dataframe = dataframe.drop_duplicates()
result[decision] = dataframe
return result
| bsd-3-clause |
nhejazi/scikit-learn | examples/classification/plot_digits_classification.py | 82 | 2414 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 4 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# matplotlib.pyplot.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples // 2], digits.target[:n_samples // 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples // 2:]
predicted = classifier.predict(data[n_samples // 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples // 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| bsd-3-clause |
dimroc/tensorflow-mnist-tutorial | lib/python3.6/site-packages/mpl_toolkits/gtktools.py | 10 | 19272 | """
Some gtk specific tools and widgets
* rec2gtk : put record array in GTK treeview - requires gtk
Example usage
import matplotlib.mlab as mlab
import mpl_toolkits.gtktools as gtktools
r = mlab.csv2rec('somefile.csv', checkrows=0)
formatd = dict(
weight = mlab.FormatFloat(2),
change = mlab.FormatPercent(2),
cost = mlab.FormatThousands(2),
)
exceltools.rec2excel(r, 'test.xls', formatd=formatd)
mlab.rec2csv(r, 'test.csv', formatd=formatd)
import gtk
scroll = gtktools.rec2gtk(r, formatd=formatd)
win = gtk.Window()
win.set_size_request(600,800)
win.add(scroll)
win.show_all()
gtk.main()
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange, zip
import copy
import gtk, gobject
import numpy as np
import matplotlib.cbook as cbook
from matplotlib.cbook import warn_deprecated
import matplotlib.mlab as mlab
warn_deprecated("2.0", name="mpl_toolkits.gtktools", obj_type="module")
def error_message(msg, parent=None, title=None):
"""
create an error message dialog with string msg. Optionally set
the parent widget and dialog title
"""
dialog = gtk.MessageDialog(
parent = None,
type = gtk.MESSAGE_ERROR,
buttons = gtk.BUTTONS_OK,
message_format = msg)
if parent is not None:
dialog.set_transient_for(parent)
if title is not None:
dialog.set_title(title)
else:
dialog.set_title('Error!')
dialog.show()
dialog.run()
dialog.destroy()
return None
def simple_message(msg, parent=None, title=None):
"""
create a simple message dialog with string msg. Optionally set
the parent widget and dialog title
"""
dialog = gtk.MessageDialog(
parent = None,
type = gtk.MESSAGE_INFO,
buttons = gtk.BUTTONS_OK,
message_format = msg)
if parent is not None:
dialog.set_transient_for(parent)
if title is not None:
dialog.set_title(title)
dialog.show()
dialog.run()
dialog.destroy()
return None
def gtkformat_factory(format, colnum):
"""
copy the format, perform any overrides, and attach an gtk style attrs
xalign = 0.
cell = None
"""
if format is None: return None
format = copy.copy(format)
format.xalign = 0.
format.cell = None
def negative_red_cell(column, cell, model, thisiter):
val = model.get_value(thisiter, colnum)
try: val = float(val)
except: cell.set_property('foreground', 'black')
else:
if val<0:
cell.set_property('foreground', 'red')
else:
cell.set_property('foreground', 'black')
if isinstance(format, mlab.FormatFloat) or isinstance(format, mlab.FormatInt):
format.cell = negative_red_cell
format.xalign = 1.
elif isinstance(format, mlab.FormatDate):
format.xalign = 1.
return format
class SortedStringsScrolledWindow(gtk.ScrolledWindow):
"""
A simple treeview/liststore assuming all columns are strings.
Supports ascending/descending sort by clicking on column header
"""
def __init__(self, colheaders, formatterd=None):
"""
xalignd if not None, is a dict mapping col header to xalignent (default 1)
formatterd if not None, is a dict mapping col header to a ColumnFormatter
"""
gtk.ScrolledWindow.__init__(self)
self.colheaders = colheaders
self.seq = None # not initialized with accts
self.set_shadow_type(gtk.SHADOW_ETCHED_IN)
self.set_policy(gtk.POLICY_AUTOMATIC,
gtk.POLICY_AUTOMATIC)
types = [gobject.TYPE_STRING] * len(colheaders)
model = self.model = gtk.ListStore(*types)
treeview = gtk.TreeView(self.model)
treeview.show()
treeview.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
treeview.set_rules_hint(True)
class Clicked:
def __init__(self, parent, i):
self.parent = parent
self.i = i
self.num = 0
def __call__(self, column):
ind = []
dsu = []
for rownum, thisiter in enumerate(self.parent.iters):
val = model.get_value(thisiter, self.i)
try: val = float(val.strip().rstrip('%'))
except ValueError: pass
if mlab.safe_isnan(val): val = np.inf # force nan to sort uniquely
dsu.append((val, rownum))
dsu.sort()
if not self.num%2: dsu.reverse()
vals, otherind = list(zip(*dsu))
ind.extend(otherind)
self.parent.model.reorder(ind)
newiters = []
for i in ind:
newiters.append(self.parent.iters[i])
self.parent.iters = newiters[:]
for i, thisiter in enumerate(self.parent.iters):
key = tuple([self.parent.model.get_value(thisiter, j) for j in range(len(colheaders))])
self.parent.rownumd[i] = key
self.num+=1
if formatterd is None:
formatterd = dict()
formatterd = formatterd.copy()
for i, header in enumerate(colheaders):
renderer = gtk.CellRendererText()
if header not in formatterd:
formatterd[header] = ColumnFormatter()
formatter = formatterd[header]
column = gtk.TreeViewColumn(header, renderer, text=i)
renderer.set_property('xalign', formatter.xalign)
renderer.set_property('editable', True)
renderer.connect("edited", self.position_edited, i)
column.connect('clicked', Clicked(self, i))
column.set_property('clickable', True)
if formatter.cell is not None:
column.set_cell_data_func(renderer, formatter.cell)
treeview.append_column(column)
self.formatterd = formatterd
self.lastcol = column
self.add(treeview)
self.treeview = treeview
self.clear()
def position_edited(self, renderer, path, newtext, position):
#print path, position
self.model[path][position] = newtext
def clear(self):
self.iterd = dict()
self.iters = [] # an ordered list of iters
self.rownumd = dict() # a map from rownum -> symbol
self.model.clear()
self.datad = dict()
def flat(self, row):
seq = []
for i,val in enumerate(row):
formatter = self.formatterd.get(self.colheaders[i])
seq.extend([i,formatter.tostr(val)])
return seq
def __delete_selected(self, *unused): # untested
keyd = dict([(thisiter, key) for key, thisiter in self.iterd.values()])
for row in self.get_selected():
key = tuple(row)
thisiter = self.iterd[key]
self.model.remove(thisiter)
del self.datad[key]
del self.iterd[key]
self.iters.remove(thisiter)
for i, thisiter in enumerate(self.iters):
self.rownumd[i] = keyd[thisiter]
def delete_row(self, row):
key = tuple(row)
thisiter = self.iterd[key]
self.model.remove(thisiter)
del self.datad[key]
del self.iterd[key]
self.rownumd[len(self.iters)] = key
self.iters.remove(thisiter)
for rownum, thiskey in list(six.iteritems(self.rownumd)):
if thiskey==key: del self.rownumd[rownum]
def add_row(self, row):
thisiter = self.model.append()
self.model.set(thisiter, *self.flat(row))
key = tuple(row)
self.datad[key] = row
self.iterd[key] = thisiter
self.rownumd[len(self.iters)] = key
self.iters.append(thisiter)
def update_row(self, rownum, newrow):
key = self.rownumd[rownum]
thisiter = self.iterd[key]
newkey = tuple(newrow)
self.rownumd[rownum] = newkey
del self.datad[key]
del self.iterd[key]
self.datad[newkey] = newrow
self.iterd[newkey] = thisiter
self.model.set(thisiter, *self.flat(newrow))
def get_row(self, rownum):
key = self.rownumd[rownum]
return self.datad[key]
def get_selected(self):
selected = []
def foreach(model, path, iter, selected):
selected.append(model.get_value(iter, 0))
self.treeview.get_selection().selected_foreach(foreach, selected)
return selected
def rec2gtk(r, formatd=None, rownum=0, autowin=True):
"""
formatd is a dictionary mapping dtype name -> mlab.Format instances
This function creates a SortedStringsScrolledWindow (derived
from gtk.ScrolledWindow) and returns it. if autowin is True,
a gtk.Window is created, attached to the
SortedStringsScrolledWindow instance, shown and returned. If
autowin=False, the caller is responsible for adding the
SortedStringsScrolledWindow instance to a gtk widget and
showing it.
"""
if formatd is None:
formatd = dict()
formats = []
for i, name in enumerate(r.dtype.names):
dt = r.dtype[name]
format = formatd.get(name)
if format is None:
format = mlab.defaultformatd.get(dt.type, mlab.FormatObj())
format = gtkformat_factory(format, i)
formatd[name] = format
colheaders = r.dtype.names
scroll = SortedStringsScrolledWindow(colheaders, formatd)
for row in r:
scroll.add_row(row)
if autowin:
win = gtk.Window()
win.set_default_size(800,600)
#win.set_geometry_hints(scroll)
win.add(scroll)
win.show_all()
scroll.win = win
return scroll
class RecListStore(gtk.ListStore):
"""
A liststore as a model of an editable record array.
attributes:
* r - the record array with the edited values
* formatd - the list of mlab.FormatObj instances, with gtk attachments
* stringd - a dict mapping dtype names to a list of valid strings for the combo drop downs
* callbacks - a matplotlib.cbook.CallbackRegistry. Connect to the cell_changed with
def mycallback(liststore, rownum, colname, oldval, newval):
print('verify: old=%s, new=%s, rec=%s'%(oldval, newval, liststore.r[rownum][colname]))
cid = liststore.callbacks.connect('cell_changed', mycallback)
"""
def __init__(self, r, formatd=None, stringd=None):
"""
r is a numpy record array
formatd is a dict mapping dtype name to mlab.FormatObj instances
stringd, if not None, is a dict mapping dtype names to a list of
valid strings for a combo drop down editor
"""
if stringd is None:
stringd = dict()
if formatd is None:
formatd = mlab.get_formatd(r)
self.stringd = stringd
self.callbacks = cbook.CallbackRegistry()
self.r = r
self.headers = r.dtype.names
self.formats = [gtkformat_factory(formatd.get(name, mlab.FormatObj()),i)
for i,name in enumerate(self.headers)]
# use the gtk attached versions
self.formatd = formatd = dict(zip(self.headers, self.formats))
types = []
for format in self.formats:
if isinstance(format, mlab.FormatBool):
types.append(gobject.TYPE_BOOLEAN)
else:
types.append(gobject.TYPE_STRING)
self.combod = dict()
if len(stringd):
types.extend([gobject.TYPE_INT]*len(stringd))
keys = list(six.iterkeys(stringd))
keys.sort()
valid = set(r.dtype.names)
for ikey, key in enumerate(keys):
assert(key in valid)
combostore = gtk.ListStore(gobject.TYPE_STRING)
for s in stringd[key]:
combostore.append([s])
self.combod[key] = combostore, len(self.headers)+ikey
gtk.ListStore.__init__(self, *types)
for row in r:
vals = []
for formatter, val in zip(self.formats, row):
if isinstance(formatter, mlab.FormatBool):
vals.append(val)
else:
vals.append(formatter.tostr(val))
if len(stringd):
# todo, get correct index here?
vals.extend([0]*len(stringd))
self.append(vals)
def position_edited(self, renderer, path, newtext, position):
position = int(position)
format = self.formats[position]
rownum = int(path)
colname = self.headers[position]
oldval = self.r[rownum][colname]
try: newval = format.fromstr(newtext)
except ValueError:
msg = cbook.exception_to_str('Error converting "%s"'%newtext)
error_message(msg, title='Error')
return
self.r[rownum][colname] = newval
self[path][position] = format.tostr(newval)
self.callbacks.process('cell_changed', self, rownum, colname, oldval, newval)
def position_toggled(self, cellrenderer, path, position):
position = int(position)
format = self.formats[position]
newval = not cellrenderer.get_active()
rownum = int(path)
colname = self.headers[position]
oldval = self.r[rownum][colname]
self.r[rownum][colname] = newval
self[path][position] = newval
self.callbacks.process('cell_changed', self, rownum, colname, oldval, newval)
class RecTreeView(gtk.TreeView):
"""
An editable tree view widget for record arrays
"""
def __init__(self, recliststore, constant=None):
"""
build a gtk.TreeView to edit a RecListStore
constant, if not None, is a list of dtype names which are not editable
"""
self.recliststore = recliststore
gtk.TreeView.__init__(self, recliststore)
combostrings = set(recliststore.stringd.keys())
if constant is None:
constant = []
constant = set(constant)
for i, header in enumerate(recliststore.headers):
formatter = recliststore.formatd[header]
coltype = recliststore.get_column_type(i)
if coltype==gobject.TYPE_BOOLEAN:
renderer = gtk.CellRendererToggle()
if header not in constant:
renderer.connect("toggled", recliststore.position_toggled, i)
renderer.set_property('activatable', True)
elif header in combostrings:
renderer = gtk.CellRendererCombo()
renderer.connect("edited", recliststore.position_edited, i)
combostore, listind = recliststore.combod[header]
renderer.set_property("model", combostore)
renderer.set_property('editable', True)
else:
renderer = gtk.CellRendererText()
if header not in constant:
renderer.connect("edited", recliststore.position_edited, i)
renderer.set_property('editable', True)
if formatter is not None:
renderer.set_property('xalign', formatter.xalign)
tvcol = gtk.TreeViewColumn(header)
self.append_column(tvcol)
tvcol.pack_start(renderer, True)
if coltype == gobject.TYPE_STRING:
tvcol.add_attribute(renderer, 'text', i)
if header in combostrings:
combostore, listind = recliststore.combod[header]
tvcol.add_attribute(renderer, 'text-column', listind)
elif coltype == gobject.TYPE_BOOLEAN:
tvcol.add_attribute(renderer, 'active', i)
if formatter is not None and formatter.cell is not None:
tvcol.set_cell_data_func(renderer, formatter.cell)
self.connect("button-release-event", self.on_selection_changed)
#self.set_grid_lines(gtk.TREE_VIEW_GRID_LINES_BOTH)
self.get_selection().set_mode(gtk.SELECTION_BROWSE)
self.get_selection().set_select_function(self.on_select)
def on_select(self, *args):
return False
def on_selection_changed(self, *args):
(path, col) = self.get_cursor()
ren = col.get_cell_renderers()[0]
if isinstance(ren, gtk.CellRendererText):
self.set_cursor_on_cell(path, col, ren, start_editing=True)
def edit_recarray(r, formatd=None, stringd=None, constant=None, autowin=True):
"""
create a RecListStore and RecTreeView and return them.
If autowin is True, create a gtk.Window, insert the treeview into
it, and return it (return value will be (liststore, treeview, win)
See RecListStore and RecTreeView for a description of the keyword args
"""
liststore = RecListStore(r, formatd=formatd, stringd=stringd)
treeview = RecTreeView(liststore, constant=constant)
if autowin:
win = gtk.Window()
win.add(treeview)
win.show_all()
return liststore, treeview, win
else:
return liststore, treeview
if __name__=='__main__':
import datetime
N = 10
today = datetime.date.today()
dates = [today+datetime.timedelta(days=i) for i in range(N)] # datetimes
weekdays = [d.strftime('%a') for d in dates] # strings
gains = np.random.randn(N) # floats
prices = np.random.rand(N)*1e7 # big numbers
up = gains>0 # bools
clientid = list(xrange(N)) # ints
r = np.rec.fromarrays([clientid, dates, weekdays, gains, prices, up],
names=str('clientid,date,weekdays,gains,prices,up'))
# some custom formatters
formatd = mlab.get_formatd(r)
formatd['date'] = mlab.FormatDate('%Y-%m-%d')
formatd['prices'] = mlab.FormatMillions(precision=1)
formatd['gain'] = mlab.FormatPercent(precision=2)
# use a drop down combo for weekdays
stringd = dict(weekdays=['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'])
constant = ['clientid'] # block editing of this field
liststore = RecListStore(r, formatd=formatd, stringd=stringd)
treeview = RecTreeView(liststore, constant=constant)
def mycallback(liststore, rownum, colname, oldval, newval):
print('verify: old=%s, new=%s, rec=%s'%(oldval, newval, liststore.r[rownum][colname]))
liststore.callbacks.connect('cell_changed', mycallback)
win = gtk.Window()
win.set_title('with full customization')
win.add(treeview)
win.show_all()
# or you just use the defaults
r2 = r.copy()
ls, tv, win2 = edit_recarray(r2)
win2.set_title('with all defaults')
gtk.main()
| apache-2.0 |
atcemgil/notes | html_utils_tester.py | 1 | 1757 | from IPython.display import display, Math, Latex, HTML
import numpy as np
import matplotlib.pylab as plt
import matplotlib as mpl
MAX = 40
nf = mpl.colors.Normalize(vmin=0, vmax=1.5*MAX, clip=True)
cmap = plt.cm.ScalarMappable(cmap=plt.cm.hot_r, norm=nf)
M = 10
N = 20
data = np.random.choice(range(MAX),replace=True, size=(M,N))
htmlTable = Table()
for i in range(M):
row = []
for j in range(N):
col = mpl.colors.rgb2hex(cmap.to_rgba(data[i,j]))
htmlCell = TableCell(data[i,j],style='background-color:'+col)
row.append(htmlCell)
htmlTableRow = TableRow(row)
htmlTable.rows.append(htmlTableRow)
#print(str(htmlTable))
display(HTML(str(htmlTable)) )
#------------------
t = Table()
t.rows.append(TableRow(['A', 'B', 'C'], header=True))
t.rows.append(TableRow(['D', 'E', 'F']))
t.rows.append(('i', 'j', 'k'))
display(HTML(str(t)))
# ----------
t2 = Table([
('1', '2'),
['3', '4']
], width='100%', header_row=('col1', 'col2'),
col_width=('', '75%'))
display(HTML(str(t2)))
t2.rows.append(['5', '6'])
t2.rows[1][1] = TableCell('new', bgcolor='red')
t2.rows.append(TableRow(['7', '8'], attribs={'align': 'center'}))
display(HTML(str(t2)))
# ----------
# sample table with column attributes and styles:
table_data = [
['Smith', 'John', 30, 4.5],
['Carpenter', 'Jack', 47],
['Johnson', 'Paul', 62, 10.55],
]
htmlcode = make_htmlTable(table_data,
header_row = ['Last name', 'First name', 'Age', 'Score'],
col_width=['', '1%', '5%', '5%'],
col_align=['left', 'center', 'right', 'char'],
col_styles=['font-size: large', '', 'font-size: small', 'background-color:yellow'])
HTML(str(htmlcode))
| mit |
peterbrook/assetjet | app/src/assetjet/services/prices/getByTicker.py | 1 | 3688 | from pandas import DataFrame
import pandas as pd
from datetime import date, datetime
from assetjet.cfg import db
from assetjet.log import log
import sqlalchemy.orm as orm
import json
import dateutil.parser
from bottle import route, request
@route('/services/Prices/GetByTicker/')
def getByTicker():
ticker = request.query.ticker
startDate = dateutil.parser.parse(request.query.startDate)
endDate = dateutil.parser.parse(request.query.endDate)
period = request.query.period
closePrices, seriesbegin = getAdjClosePrices([ticker], startDate, endDate)
pricesRebased = getPricesRebased(closePrices, seriesbegin, base=100, asjson=True)
return "_jqjsp(" + pricesRebased + ");"
def getPricesRebased(prices, startdates, base=100, asjson=False, frequency=None):
""" Returns a pandas dataframe (in json format if asjson=True) with prices
rebased to base, optionally with a new frequency:
e.g. 'D','M', 'W-FRI' for daily, end of month or friday-weekly data
"""
# Returns
returns = prices.pct_change()
# Rebasing
pricesRebased = (1 + returns).cumprod()
# requires NumPy 1.7 !! (1.6 doesn't translate datetime correctly)
for col in pricesRebased:
pricesRebased.ix[startdates.ix[col,0],col] = 1
pricesRebased = pricesRebased * base
if frequency:
pricesRebased = pricesRebased.asfreq(frequency, method='ffill')
if asjson:
# dataframe to_json() method is still pending, therefore:
return tojson(pricesRebased.reset_index())
else:
return pricesRebased
def tojson(df):
"""
convert a pandas data frame into a JSON object
"""
d = [
dict([(colname, row[i]) for i, colname in enumerate(df.columns)])
for row in df.values
]
# json cannot deal with datetime objects, therefore convert into string
dthandler = lambda obj: obj.isoformat() if isinstance(obj, datetime) else None
ret = json.dumps(d, default=dthandler, indent=4)
# print ret # must be disabled for freezing
return ret
#return json.dumps(d, indent=4)
def getAdjClosePrices(tickers, startdate, enddate):
""" returns a ready to use pandas DataFrame and a Series with the startDate
"""
Session = orm.sessionmaker(bind=db.GetEngine())
session = Session()
conn = db.GetEngine().connect()
# Query
conn.execute("""CREATE TEMP TABLE Tickers (Cd Text)""")
conn.execute("""INSERT INTO Tickers VALUES(?)""", zip(tickers))
result = conn.execute("""SELECT ts.Cd, Date, AdjClose
FROM TimeSeries ts
INNER JOIN Tickers t ON ts.Cd = t.Cd
WHERE ts.Date >= ? AND ts.Date <= ?""", (startdate, enddate))
rows = result.fetchall()
# Create a pandas DataFrame
pricesRaw = DataFrame.from_records(rows, columns=['Cd', 'Date', 'AdjClose'])
# Convert Date strings into datetime so pandas can do time series stuff
pricesRaw.Date = pd.to_datetime(pricesRaw.Date)
seriesbegin = pricesRaw[['Cd','Date']].groupby('Cd').min()
# Pivot DataFrame
prices = pricesRaw.pivot(index='Date', columns='Cd', values='AdjClose')
# Close DB and Cursor
conn.close()
return prices, seriesbegin
if __name__ == "__main__":
tickers = [ 'AAPL','MMM', 'ACE', 'ABT', 'ANF', 'ACN', 'ADBE', 'ADT', 'AMD', 'AES', 'AET' ]
startdate = '2011-01-01'
enddate = date.today()
# Get rebased prices
closePrices, seriesbegin = getAdjClosePrices(tickers, startdate, enddate)
pricesRebased = getPricesRebased(closePrices, seriesbegin, base=100, asjson=True, frequency='D')
print pricesRebased
| gpl-3.0 |
MPC-Berkeley/barc | workspace/src/labs/src/lab2/plot.py | 2 | 1152 | import rosbag
import numpy as np
import matplotlib.pyplot as plt
import os
import matplotlib.patches as patches
bag = rosbag.Bag(os.path.expanduser("~/FILENAMEHERE.bag"))
topics = bag.get_type_and_topic_info()[1].keys()
types = []
for i in range(0,len(bag.get_type_and_topic_info()[1].values())):
types.append(bag.get_type_and_topic_info()[1].values()[i][0])
if bag.get_type_and_topic_info()[1].values()[i][0] == 'barc/ECU':
dimEcu = bag.get_type_and_topic_info()[1].values()[i][1]
if bag.get_type_and_topic_info()[1].values()[i][0] == 'labs/Z_DynBkMdl':
dimxy = bag.get_type_and_topic_info()[1].values()[i][1]
x_raw = np.zeros((dimxy, 1))
v_raw = np.zeros((dimxy, 1))
v_des = 8*np.ones((dimxy,1))
counter = 0
for counter, (topic, msg, t) in enumerate( bag.read_messages(topics=['/z_vhcl']) ) :
x_raw[counter] = msg.x
v_raw[counter] = msg.v_x
plt.figure(1)
plt.plot(x_raw, v_raw, label = 'Actual Velocity')
plt.plot(x_raw, v_des, label = 'Desired Velocity')
plt.ylabel('Velocity [m/s]')
plt.ylim((0,12))
plt.xlabel('Longitudinal position [m]')
plt.title('Longitudinal Velocity Tracking')
plt.legend()
plt.show()
bag.close()
| mit |
mkomeichi/BuildingMLSystemsWithPython | ch09/fft.py | 24 | 3673 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import sys
import os
import glob
import numpy as np
import scipy
import scipy.io.wavfile
from utils import GENRE_DIR, CHART_DIR
import matplotlib.pyplot as plt
from matplotlib.ticker import EngFormatter
def write_fft(fft_features, fn):
"""
Write the FFT features to separate files to speed up processing.
"""
base_fn, ext = os.path.splitext(fn)
data_fn = base_fn + ".fft"
np.save(data_fn, fft_features)
print("Written "%data_fn)
def create_fft(fn):
sample_rate, X = scipy.io.wavfile.read(fn)
fft_features = abs(scipy.fft(X)[:1000])
write_fft(fft_features, fn)
def read_fft(genre_list, base_dir=GENRE_DIR):
X = []
y = []
for label, genre in enumerate(genre_list):
genre_dir = os.path.join(base_dir, genre, "*.fft.npy")
file_list = glob.glob(genre_dir)
assert(file_list), genre_dir
for fn in file_list:
fft_features = np.load(fn)
X.append(fft_features[:2000])
y.append(label)
return np.array(X), np.array(y)
def plot_wav_fft(wav_filename, desc=None):
plt.clf()
plt.figure(num=None, figsize=(6, 4))
sample_rate, X = scipy.io.wavfile.read(wav_filename)
spectrum = np.fft.fft(X)
freq = np.fft.fftfreq(len(X), 1.0 / sample_rate)
plt.subplot(211)
num_samples = 200.0
plt.xlim(0, num_samples / sample_rate)
plt.xlabel("time [s]")
plt.title(desc or wav_filename)
plt.plot(np.arange(num_samples) / sample_rate, X[:num_samples])
plt.grid(True)
plt.subplot(212)
plt.xlim(0, 5000)
plt.xlabel("frequency [Hz]")
plt.xticks(np.arange(5) * 1000)
if desc:
desc = desc.strip()
fft_desc = desc[0].lower() + desc[1:]
else:
fft_desc = wav_filename
plt.title("FFT of %s" % fft_desc)
plt.plot(freq, abs(spectrum), linewidth=5)
plt.grid(True)
plt.tight_layout()
rel_filename = os.path.split(wav_filename)[1]
plt.savefig("%s_wav_fft.png" % os.path.splitext(rel_filename)[0],
bbox_inches='tight')
plt.show()
def plot_wav_fft_demo():
plot_wav_fft("sine_a.wav", "400Hz sine wave")
plot_wav_fft("sine_b.wav", "3,000Hz sine wave")
plot_wav_fft("sine_mix.wav", "Mixed sine wave")
def plot_specgram(ax, fn):
sample_rate, X = scipy.io.wavfile.read(fn)
ax.specgram(X, Fs=sample_rate, xextent=(0, 30))
def plot_specgrams(base_dir=CHART_DIR):
"""
Plot a bunch of spectrograms of wav files in different genres
"""
plt.clf()
genres = ["classical", "jazz", "country", "pop", "rock", "metal"]
num_files = 3
f, axes = plt.subplots(len(genres), num_files)
for genre_idx, genre in enumerate(genres):
for idx, fn in enumerate(glob.glob(os.path.join(GENRE_DIR, genre, "*.wav"))):
if idx == num_files:
break
axis = axes[genre_idx, idx]
axis.yaxis.set_major_formatter(EngFormatter())
axis.set_title("%s song %i" % (genre, idx + 1))
plot_specgram(axis, fn)
specgram_file = os.path.join(base_dir, "Spectrogram_Genres.png")
plt.savefig(specgram_file, bbox_inches="tight")
plt.show()
if __name__ == "__main__":
# for fn in glob.glob(os.path.join(sys.argv[1], "*.wav")):
# create_fft(fn)
# plot_decomp()
if len(sys.argv) > 1:
plot_wav_fft(sys.argv[1], desc="some sample song")
else:
plot_wav_fft_demo()
plot_specgrams()
| mit |
gnu-sandhi/sandhi | modules/gr36/gr-filter/examples/channelize.py | 13 | 6790 | #!/usr/bin/env python
#
# Copyright 2009,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blks2
from gnuradio import filter
import sys, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 2000000 # number of samples to use
self._fs = 1000 # initial sampling rate
self._M = M = 9 # Number of channels to channelize
self._ifs = M*self._fs # initial sampling rate
# Create a set of taps for the PFB channelizer
self._taps = filter.firdes.low_pass_2(1, self._ifs, 475.50, 50,
attenuation_dB=100,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
# Create a set of signals at different frequencies
# freqs lists the frequencies of the signals that get stored
# in the list "signals", which then get summed together
self.signals = list()
self.add = gr.add_cc()
freqs = [-70, -50, -30, -10, 10, 20, 40, 60, 80]
for i in xrange(len(freqs)):
f = freqs[i] + (M/2-M+i+1)*self._fs
self.signals.append(gr.sig_source_c(self._ifs, gr.GR_SIN_WAVE, f, 1))
self.connect(self.signals[i], (self.add,i))
self.head = gr.head(gr.sizeof_gr_complex, self._N)
# Construct the channelizer filter
self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps, 1)
# Construct a vector sink for the input signal to the channelizer
self.snk_i = gr.vector_sink_c()
# Connect the blocks
self.connect(self.add, self.head, self.pfb)
self.connect(self.add, self.snk_i)
# Use this to play with the channel mapping
#self.pfb.set_channel_map([5,6,7,8,0,1,2,3,4])
# Create a vector sink for each of M output channels of the filter and connect it
self.snks = list()
for i in xrange(self._M):
self.snks.append(gr.vector_sink_c())
self.connect((self.pfb, i), self.snks[i])
def main():
tstart = time.time()
tb = pfb_top_block()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig_in = pylab.figure(1, figsize=(16,9), facecolor="w")
fig1 = pylab.figure(2, figsize=(16,9), facecolor="w")
fig2 = pylab.figure(3, figsize=(16,9), facecolor="w")
Ns = 1000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._ifs
# Plot the input signal on its own figure
d = tb.snk_i.data()[Ns:Ne]
spin_f = fig_in.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(X))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
pin_f = spin_f.plot(f_in, X_in, "b")
spin_f.set_xlim([min(f_in), max(f_in)+1])
spin_f.set_ylim([-200.0, 50.0])
spin_f.set_title("Input Signal", weight="bold")
spin_f.set_xlabel("Frequency (Hz)")
spin_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
spin_t = fig_in.add_subplot(2, 1, 2)
pin_t = spin_t.plot(t_in, x_in.real, "b")
pin_t = spin_t.plot(t_in, x_in.imag, "r")
spin_t.set_xlabel("Time (s)")
spin_t.set_ylabel("Amplitude")
Ncols = int(scipy.floor(scipy.sqrt(tb._M)))
Nrows = int(scipy.floor(tb._M / Ncols))
if(tb._M % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = tb._fs
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
for i in xrange(len(tb.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = tb.snks[i].data()[Ns:Ne]
sp1_f = fig1.add_subplot(Nrows, Ncols, 1+i)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(X))
f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
p2_f = sp1_f.plot(f_o, X_o, "b")
sp1_f.set_xlim([min(f_o), max(f_o)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title(("Channel %d" % i), weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_o = fig2.add_subplot(Nrows, Ncols, 1+i)
p2_o = sp2_o.plot(t_o, x_o.real, "b")
p2_o = sp2_o.plot(t_o, x_o.imag, "r")
sp2_o.set_xlim([min(t_o), max(t_o)+1])
sp2_o.set_ylim([-2, 2])
sp2_o.set_title(("Channel %d" % i), weight="bold")
sp2_o.set_xlabel("Time (s)")
sp2_o.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
annarev/tensorflow | tensorflow/python/autograph/core/config.py | 11 | 1959 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Global configuration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.core import config_lib
Action = config_lib.Action
Convert = config_lib.Convert
DoNotConvert = config_lib.DoNotConvert
# This list is evaluated in order and stops at the first rule that tests True
# for a definitely_convert of definitely_bypass call.
CONVERSION_RULES = (
# Known packages
Convert('tensorflow.python.training.experimental'),
# Builtin modules
DoNotConvert('collections'),
DoNotConvert('copy'),
DoNotConvert('cProfile'),
DoNotConvert('inspect'),
DoNotConvert('ipdb'),
DoNotConvert('linecache'),
DoNotConvert('mock'),
DoNotConvert('pathlib'),
DoNotConvert('pdb'),
DoNotConvert('posixpath'),
DoNotConvert('pstats'),
DoNotConvert('re'),
DoNotConvert('threading'),
DoNotConvert('urllib'),
# Known libraries
DoNotConvert('matplotlib'),
DoNotConvert('numpy'),
DoNotConvert('pandas'),
DoNotConvert('tensorflow'),
DoNotConvert('PIL'),
# TODO(b/133417201): Remove.
DoNotConvert('tensorflow_probability'),
# TODO(b/133842282): Remove.
DoNotConvert('tensorflow_datasets.core'),
)
| apache-2.0 |
bsipocz/scikit-image | skimage/io/tests/test_plugin.py | 24 | 3393 | from contextlib import contextmanager
from numpy.testing import assert_equal, raises
from skimage import io
from skimage.io import manage_plugins
io.use_plugin('pil')
priority_plugin = 'pil'
def setup_module():
manage_plugins.use_plugin('test') # see ../_plugins/test_plugin.py
def teardown_module():
io.reset_plugins()
@contextmanager
def protect_preferred_plugins():
"""Contexts where `preferred_plugins` can be modified w/o side-effects."""
preferred_plugins = manage_plugins.preferred_plugins.copy()
try:
yield
finally:
manage_plugins.preferred_plugins = preferred_plugins
def test_read():
io.imread('test.png', as_grey=True, dtype='i4', plugin='test')
def test_save():
io.imsave('test.png', [1, 2, 3], plugin='test')
def test_show():
io.imshow([1, 2, 3], plugin_arg=(1, 2), plugin='test')
def test_collection():
io.imread_collection('*.png', conserve_memory=False, plugin='test')
def test_use():
manage_plugins.use_plugin('test')
manage_plugins.use_plugin('test', 'imshow')
@raises(ValueError)
def test_failed_use():
manage_plugins.use_plugin('asd')
def test_use_priority():
manage_plugins.use_plugin(priority_plugin)
plug, func = manage_plugins.plugin_store['imread'][0]
assert_equal(plug, priority_plugin)
manage_plugins.use_plugin('test')
plug, func = manage_plugins.plugin_store['imread'][0]
assert_equal(plug, 'test')
def test_use_priority_with_func():
manage_plugins.use_plugin('pil')
plug, func = manage_plugins.plugin_store['imread'][0]
assert_equal(plug, 'pil')
manage_plugins.use_plugin('test', 'imread')
plug, func = manage_plugins.plugin_store['imread'][0]
assert_equal(plug, 'test')
plug, func = manage_plugins.plugin_store['imsave'][0]
assert_equal(plug, 'pil')
manage_plugins.use_plugin('test')
plug, func = manage_plugins.plugin_store['imsave'][0]
assert_equal(plug, 'test')
def test_plugin_order():
p = io.plugin_order()
assert 'imread' in p
assert 'test' in p['imread']
def test_available():
assert 'qt' in io.available_plugins
assert 'test' in io.find_available_plugins(loaded=True)
def test_load_preferred_plugins_all():
from skimage.io._plugins import pil_plugin, matplotlib_plugin
with protect_preferred_plugins():
manage_plugins.preferred_plugins = {'all': ['pil'],
'imshow': ['matplotlib']}
manage_plugins.reset_plugins()
for plugin_type in ('imread', 'imsave'):
plug, func = manage_plugins.plugin_store[plugin_type][0]
assert func == getattr(pil_plugin, plugin_type)
plug, func = manage_plugins.plugin_store['imshow'][0]
assert func == getattr(matplotlib_plugin, 'imshow')
def test_load_preferred_plugins_imread():
from skimage.io._plugins import pil_plugin, matplotlib_plugin
with protect_preferred_plugins():
manage_plugins.preferred_plugins['imread'] = ['pil']
manage_plugins.reset_plugins()
plug, func = manage_plugins.plugin_store['imread'][0]
assert func == pil_plugin.imread
plug, func = manage_plugins.plugin_store['imshow'][0]
assert func == matplotlib_plugin.imshow, func.__module__
if __name__ == "__main__":
from numpy.testing import run_module_suite
run_module_suite()
| bsd-3-clause |
Mendeley/mrec | mrec/item_similarity/knn.py | 3 | 3868 | """
Brute-force k-nearest neighbour recommenders
intended to provide evaluation baselines.
"""
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from recommender import ItemSimilarityRecommender
class KNNRecommender(ItemSimilarityRecommender):
"""
Abstract base class for k-nn recommenders. You must supply an
implementation of the compute_all_similarities() method.
Parameters
==========
k : int
The number of nearest neighbouring items to retain
"""
def __init__(self,k):
self.k = k
def compute_similarities(self,dataset,j):
A = dataset.X
a = dataset.fast_get_col(j)
d = self.compute_all_similarities(A,a)
d[j] = 0 # zero out self-similarity
# now zero out similarities for all but top-k items
nn = d.argsort()[-1:-1-self.k:-1]
w = np.zeros(A.shape[1])
w[nn] = d[nn]
return w
def compute_all_similarities(self,A,a):
"""
Compute similarity scores between item vector a
and all the rows of A.
Parameters
==========
A : scipy.sparse.csr_matrix
Matrix of item vectors.
a : array_like
The item vector to be compared to each row of A.
Returns
=======
similarities : numpy.ndarray
Vector of similarity scores.
"""
pass
class DotProductKNNRecommender(KNNRecommender):
"""
Similarity between two items is their dot product
(i.e. cooccurrence count if input data is binary).
"""
def compute_all_similarities(self,A,a):
return A.T.dot(a).toarray().flatten()
def __str__(self):
return 'DotProductKNNRecommender(k={0})'.format(self.k)
class CosineKNNRecommender(KNNRecommender):
"""
Similarity between two items is their cosine distance.
"""
def compute_all_similarities(self,A,a):
return cosine_similarity(A.T,a.T).flatten()
def __str__(self):
return 'CosineKNNRecommender(k={0})'.format(self.k)
if __name__ == '__main__':
# use knn models like this:
import random
import StringIO
from mrec import load_fast_sparse_matrix
random.seed(0)
print 'loading test data...'
data = """\
%%MatrixMarket matrix coordinate real general
3 5 9
1 1 1
1 2 1
1 3 1
1 4 1
2 2 1
2 3 1
2 5 1
3 3 1
3 4 1
"""
print data
dataset = load_fast_sparse_matrix('mm',StringIO.StringIO(data))
num_users,num_items = dataset.shape
model = CosineKNNRecommender(k=2)
num_samples = 2
def output(i,j,val):
# convert back to 1-indexed
print '{0}\t{1}\t{2:.3f}'.format(i+1,j+1,val)
print 'computing some item similarities...'
print 'item\tsim\tweight'
# if we want we can compute these individually without calling fit()
for i in random.sample(xrange(num_items),num_samples):
for j,weight in model.get_similar_items(i,max_similar_items=2,dataset=dataset):
output(i,j,weight)
print 'learning entire similarity matrix...'
# more usually we just call train() on the entire dataset
model = CosineKNNRecommender(k=2)
model.fit(dataset)
print 'making some recommendations...'
print 'user\trec\tscore'
for u in random.sample(xrange(num_users),num_samples):
for i,score in model.recommend_items(dataset.X,u,max_items=10):
output(u,i,score)
print 'making batch recommendations...'
recs = model.batch_recommend_items(dataset.X)
for u in xrange(num_users):
for i,score in recs[u]:
output(u,i,score)
print 'making range recommendations...'
for start,end in [(0,2),(2,3)]:
recs = model.range_recommend_items(dataset.X,start,end)
for u in xrange(start,end):
for i,score in recs[u-start]:
output(u,i,score)
| bsd-3-clause |
aabadie/scikit-learn | examples/gaussian_process/plot_gpr_prior_posterior.py | 104 | 2878 | """
==========================================================================
Illustration of prior and posterior Gaussian process for different kernels
==========================================================================
This example illustrates the prior and posterior of a GPR with different
kernels. Mean, standard deviation, and 10 samples are shown for both prior
and posterior.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import (RBF, Matern, RationalQuadratic,
ExpSineSquared, DotProduct,
ConstantKernel)
kernels = [1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-1, 10.0)),
1.0 * RationalQuadratic(length_scale=1.0, alpha=0.1),
1.0 * ExpSineSquared(length_scale=1.0, periodicity=3.0,
length_scale_bounds=(0.1, 10.0),
periodicity_bounds=(1.0, 10.0)),
ConstantKernel(0.1, (0.01, 10.0))
* (DotProduct(sigma_0=1.0, sigma_0_bounds=(0.0, 10.0)) ** 2),
1.0 * Matern(length_scale=1.0, length_scale_bounds=(1e-1, 10.0),
nu=1.5)]
for fig_index, kernel in enumerate(kernels):
# Specify Gaussian Process
gp = GaussianProcessRegressor(kernel=kernel)
# Plot prior
plt.figure(fig_index, figsize=(8, 8))
plt.subplot(2, 1, 1)
X_ = np.linspace(0, 5, 100)
y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - y_std, y_mean + y_std,
alpha=0.5, color='k')
y_samples = gp.sample_y(X_[:, np.newaxis], 10)
plt.plot(X_, y_samples, lw=1)
plt.xlim(0, 5)
plt.ylim(-3, 3)
plt.title("Prior (kernel: %s)" % kernel, fontsize=12)
# Generate data and fit GP
rng = np.random.RandomState(4)
X = rng.uniform(0, 5, 10)[:, np.newaxis]
y = np.sin((X[:, 0] - 2.5) ** 2)
gp.fit(X, y)
# Plot posterior
plt.subplot(2, 1, 2)
X_ = np.linspace(0, 5, 100)
y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - y_std, y_mean + y_std,
alpha=0.5, color='k')
y_samples = gp.sample_y(X_[:, np.newaxis], 10)
plt.plot(X_, y_samples, lw=1)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10)
plt.xlim(0, 5)
plt.ylim(-3, 3)
plt.title("Posterior (kernel: %s)\n Log-Likelihood: %.3f"
% (gp.kernel_, gp.log_marginal_likelihood(gp.kernel_.theta)),
fontsize=12)
plt.tight_layout()
plt.show()
| bsd-3-clause |
MichaelChatzidakis/Mn_Classifier_CNNs | train_crossval_mixed_states.py | 1 | 13779 | import os
import sys
import numpy as np
np.random.seed(23087)
import pandas as pd
from keras.utils import np_utils
from keras.optimizers import Adam
from keras.models import Sequential
from matplotlib import pyplot as plt
from sklearn.decomposition import PCA
from keras.callbacks import ModelCheckpoint
from sklearn.metrics import confusion_matrix
from sklearn.cross_validation import train_test_split
from keras.layers.pooling import GlobalAveragePooling1D
from keras.layers.normalization import BatchNormalization
from keras.layers import Dropout, Activation, Dense, Flatten
from keras.layers.convolutional import Convolution1D,AveragePooling1D,MaxPooling1D
from sklearn.utils import class_weight
def train(argv):
#Params
epochs = 100
batch_size = 2048
train_test_percent = 0.15 #optional
folds = 1
if argv[2] != None:
root_path = os.path.join("weights","cross_validation_results", argv[2])
if not os.path.exists(root_path):
os.mkdir(root_path)
Mn_All,labels = load_data_mixed(num=1500)
class_weights = class_weight.compute_class_weight('balanced', np.unique(labels), labels)
class_weights = dict(enumerate(class_weights))
for fold in range(folds):
model = build_neural_network_graph(graph_type=argv[1])
(X_train, y_train), (X_test, y_test) = preprocess_crossval_aug(Mn_All, labels, fold=fold, n_splits=folds, pca_aug = True)
save_dir = os.path.join(root_path,"weights_"+str(fold))
if not os.path.exists(save_dir):
os.mkdir(save_dir)
if folds == 1:
(X_test,y_test)= load_ditized_spectra()
best_model_file = save_dir+"/highest_val_acc_weights_epoch{epoch:02d}-valacc{val_acc:.3f}_.h5"
best_model = ModelCheckpoint(best_model_file, monitor='val_acc', verbose = 1, save_best_only = True)
hist = model.fit(X_train, y_train,
nb_epoch=epochs, batch_size=batch_size,
callbacks = [best_model], validation_data=(X_test, y_test),
class_weight = class_weights, shuffle = True, verbose=1)
else:
best_model_file = save_dir+"/highest_val_acc_weights_epoch{epoch:02d}-valacc{val_acc:.3f}_.h5"
best_model = ModelCheckpoint(best_model_file, monitor='val_acc', verbose = 1, save_best_only = True)
hist = model.fit(X_train, y_train, validation_data=(X_test, y_test),
nb_epoch=epochs, batch_size=batch_size,
callbacks = [best_model],
class_weight = class_weights, shuffle = True, verbose=1)
training_graphs(save_dir, hist)
def load_data_mixed(num=1000):
path_to_input = 'input_spectra'
Mn2_C = np.array(pd.read_pickle(os.path.join(path_to_input, 'Mn2_615-685eV_thinnest_448.pkl')))
Mn3_C = np.array(pd.read_pickle(os.path.join(path_to_input, 'Mn3_615-685eV_thin_765.pkl')))
Mn4_C = np.array(pd.read_pickle(os.path.join(path_to_input, 'Mn4_615-685eV_thin_788.pkl')))
Mn23_2 = apply_mixed_aug(Mn2_C,Mn3_C, 0.5, 0.5, num)
Mn34_2 = apply_mixed_aug(Mn3_C,Mn4_C, 0.5, 0.5, num)
Mn_All=np.concatenate((Mn2_C[:,200:500],
Mn23_2,
Mn3_C[:,200:500],
Mn34_2,
Mn4_C[:,200:500]))
labels = ([0]*len(Mn2_C) +
[1]*len(Mn23_2) +
[2]*len(Mn3_C) +
[3]*len(Mn34_2) +
[4]*len(Mn4_C) )
labels = np.array(labels)
return Mn_All, labels
'''
def load_data_mixed(num):
path_to_input = 'input_spectra'
Mn2_C = np.array(pd.read_pickle(os.path.join(path_to_input, 'Mn2_615-685eV_thinnest_448.pkl')))
Mn3_C = np.array(pd.read_pickle(os.path.join(path_to_input, 'Mn3_615-685eV_thin_765.pkl')))
Mn4_C = np.array(pd.read_pickle(os.path.join(path_to_input, 'Mn4_615-685eV_thin_788.pkl')))
Mn23_1 = apply_mixed_aug(Mn2_C,Mn3_C, 0.33, 0.66, num)
Mn23_2 = apply_mixed_aug(Mn2_C,Mn3_C, 0.66, 0.33, num)
Mn34_1 = apply_mixed_aug(Mn3_C,Mn4_C, 0.33, 0.66, num)
Mn34_2 = apply_mixed_aug(Mn3_C,Mn4_C, 0.66, 0.33, num)
Mn_All=np.concatenate((Mn2_C[:,200:500],
Mn23_1,
Mn23_2,
Mn3_C[:,200:500],
Mn34_1,
Mn34_2,
Mn4_C[:,200:500]))
labels = ([0]*len(Mn2_C) +
[1]*len(Mn23_1) +
[2]*len(Mn23_2) +
[3]*len(Mn3_C) +
[4]*len(Mn34_1) +
[5]*len(Mn34_2) +
[6]*len(Mn4_C) )
labels = np.array(labels)
return Mn_All, labels
'''
def apply_mixed_aug(Mn_1, Mn_2, Mn_1_frac, Mn_2_frac, num):
Mn_sum_list = []
for i in range(num):
rn1 = np.random.choice(len(Mn_1))
rn2 = np.random.choice(len(Mn_2))
rn_crop1 = np.random.choice(np.arange(-10, 10))
rn_crop2 = np.random.choice(np.arange(-10, 10))
spectra1 = Mn_1[rn1][200+rn_crop1: 500+rn_crop1]
spectra2 = Mn_2[rn2][200+rn_crop2: 500+rn_crop2]
Mn_sum = Mn_1_frac*spectra1 + Mn_2_frac*spectra2
Mn_sum /= np.max(Mn_sum)
Mn_sum_list.append(Mn_sum)
Mn_sum_list = np.array(Mn_sum_list)
return Mn_sum_list
def preprocess_crossval_aug(x, y, fold=None, n_splits=0, train_test_percent=0.25, pca_aug = False):
if n_splits > 3:
from sklearn.model_selection import StratifiedKFold
cv = StratifiedKFold(n_splits=n_splits, random_state=13, shuffle=False)
X_train = [x[train_index] for train_index, test_index in cv.split(x, y)]
X_test = [x[test_index] for train_index, test_index in cv.split(x, y)]
y_train = [y[train_index] for train_index, test_index in cv.split(x, y)]
y_test = [y[test_index] for train_index, test_index in cv.split(x, y)]
X_train, X_test, y_train, y_test = X_train[fold], X_test[fold], y_train[fold], y_test[fold]
print("Samples will be from fold", (fold+1), " out of the", n_splits, " n_splits")
print('Param train_test_percent will be ignored since folds are being used.')
elif n_splits == 1:
X_train = x
y_train = y
X_test = np.zeros((x.shape[0],x.shape[1]))
y_test = y
elif n_splits == 2:
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=train_test_percent,
random_state=13, stratify=y)
if pca_aug == True:
X_train, y_train = apply_pca_aug(X_train, y_train, snr_steps=25)
X_train, X_test, y_train, y_test = preprocess(X_train, X_test, y_train, y_test, mean_center = True, norm = True )
return (X_train, y_train), (X_test, y_test)
def apply_pca_aug(X_train, y_train, snr_steps):
from sklearn.decomposition import PCA
noise = np.copy(X_train)
mu = np.mean(noise, axis=0)
pca = PCA()
noise_model = pca.fit(noise)
nComp = 10
Xhat = np.dot(pca.transform(noise)[:,:nComp], pca.components_[:nComp,:])
noise_level = np.dot(pca.transform(noise)[:,nComp:], pca.components_[nComp:,:])
Xhat += mu
SNR = np.linspace(0,5,snr_steps)
noise_aug = []
for i in range(len(SNR)):
noise_aug.append(SNR[i]*noise_level + Xhat)
j = 0
for spectra in noise_aug[i]:
noise_aug[i][j] = spectra/np.max(spectra)
j += 1
X_train = np.array(noise_aug).reshape(snr_steps*X_train.shape[0], X_train.shape[1])
y_train = [item for i in range(snr_steps) for item in y_train]
return X_train, y_train
def load_ditized_spectra():
data_path = '/home/mike/Mn_Valences/Mn_Classifier_CV_Good_Copy/Data/Digitized_Mn_Usecases.pkl'
data = pd.read_pickle(data_path)
benchmark = np.concatenate((data[1], data[2], data[0]))
labels = [0]*len(data[1])+ [2]*len(data[2])+ [4]*len(data[0])
X_test = np.zeros( (len(benchmark),300) )
i=0
for spectra in benchmark:
x = spectra['Energy']
y = spectra['Intensity']
min_energy, max_energy= np.min(x), np.max(x)
new_energy=np.linspace(min_energy,max_energy,300)
new_intensity = np.interp(new_energy, x, y)
new_intensity -= np.mean(new_intensity)
new_intensity /= np.max(new_intensity)
X_test[i] = new_intensity
i+=1
X_test = X_test.reshape(X_test.shape + (1,))
y_test = np.array(labels)
y_test = np_utils.to_categorical(y_test)
return (X_test,y_test)
def preprocess(X_train, X_test, y_train, y_test, mean_center = False, norm = True):
X_train = np.array(X_train).astype('float32')
X_test = np.array(X_test).astype('float32')
if mean_center == True:
X_train -= np.mean(X_train)
X_test -= np.mean(X_test)
print( 'Data mean-centered')
if norm == True:
X_train /= np.max(X_train)
X_test /= np.max(X_test)
print( 'Data normalized')
X_test = X_test.reshape(X_test.shape + (1,))
X_train = X_train.reshape(X_train.shape + (1,))
y_train = np.array(y_train)
y_test = np.array(y_test)
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
print( 'Data one-hot encoded')
print("Total of "+str(y_test.shape[1])+" classes.")
print("Total of "+str(len(X_train))+" training samples.")
print("Total of "+str(len(X_test))+" testing samples.")
return X_train, X_test, y_train, y_test
def build_neural_network_graph(graph_type):
if graph_type == 'cnn':
model = Sequential()
activation = 'relu'
model.add(Convolution1D(2, 9, input_shape=(300,1)))
model.add(BatchNormalization())
model.add(Activation(activation))
model.add(AveragePooling1D())
model.add(Convolution1D(2, 7))
model.add(BatchNormalization())
model.add(Activation(activation))
model.add(AveragePooling1D())
model.add(Convolution1D(4, 7))
model.add(BatchNormalization())
model.add(Activation(activation))
model.add(AveragePooling1D())
model.add(Convolution1D(8, 5))
model.add(BatchNormalization())
model.add(Activation(activation))
model.add(AveragePooling1D())
model.add(Convolution1D(12, 3))
model.add(BatchNormalization())
model.add(Activation(activation))
model.add(AveragePooling1D())
model.add(Dropout(0.1, seed=23087))
model.add(Convolution1D(5, 1))
model.add(BatchNormalization())
model.add(GlobalAveragePooling1D())
model.add(Activation('softmax', name='loss'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
print("CNN Model created.")
return model
elif graph_type=='MLP':
model = Sequential()
model.add(Flatten(input_shape=(300,1)))
model.add(Dropout(0.5, seed=23087, name='drop1'))
model.add(Dense(32, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5, seed=23087, name='drop9'))
model.add(Dense(32,activation='relu'))
model.add(BatchNormalization())
model.add(Dense(5, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
print("CNN Model created.")
return model
else:
print('Custom Model')
model = Sequential()
activation = 'relu'
model.add(Convolution1D(4, 9, input_shape=(300,1), activation=activation))
model.add(BatchNormalization())
model.add(AveragePooling1D())
model.add(Convolution1D(4, 7, activation=activation))
model.add(BatchNormalization())
model.add(AveragePooling1D())
model.add(Convolution1D(8, 7, activation=activation))
model.add(BatchNormalization())
model.add(AveragePooling1D())
model.add(Convolution1D(16, 5, activation=activation))
model.add(BatchNormalization())
model.add(AveragePooling1D())
model.add(Convolution1D(32, 3, activation=activation))
model.add(BatchNormalization())
model.add(AveragePooling1D())
model.add(Flatten())
model.add(Dropout(0.5, seed=23087, name='drop1'))
model.add(Dense(16, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5, seed=23087, name='drop9'))
model.add(Dense(16,activation='relu'))
model.add(BatchNormalization())
model.add(Dense(5, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
print("CNN Model created.")
return model
def training_graphs(save_dir, hist):
#summarize history for accuracy
plt.figure(figsize=(15, 5))
plt.rcParams.update({'font.size': 16})
plt.subplot(1, 2, 1)
plt.plot(hist.history['acc'], linewidth = 3)
plt.title('Model Training Accuracy')
plt.ylabel('Training Accuracy')
plt.xlabel('Epoch')
# summarize history for loss
plt.subplot(1, 2, 2)
plt.plot(hist.history['loss'], linewidth = 3)
plt.title('Model Training Loss')
plt.ylabel('Cross Entropy Loss')
plt.xlabel('Epoch')
plt.savefig(os.path.join(save_dir, 'training_accuracy.png'))
plt.figure(figsize=(10, 8))
plt.plot(hist.history['val_acc'], linewidth = 3)
plt.plot(hist.history['acc'], linewidth = 3)
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Test', 'Train'], loc='lower right')
plt.savefig(os.path.join(save_dir, 'test_accuracy.png'))
if __name__ == "__main__":
train(sys.argv)
| mit |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/examples/mixture/plot_gmm_pdf.py | 1 | 1528 | """
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
| mit |
Djabbz/scikit-learn | sklearn/preprocessing/tests/test_data.py | 1 | 56774 |
# Authors:
#
# Giorgio Patrini
#
# License: BSD 3 clause
import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils import gen_batches
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_allclose
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import _handle_zeros_in_scale
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.exceptions import DataConversionWarning
from sklearn import datasets
iris = datasets.load_iris()
# Make some data to be used many times
rng = np.random.RandomState(0)
n_features = 30
n_samples = 1000
offsets = rng.uniform(-1, 1, size=n_features)
scales = rng.uniform(1, 10, size=n_features)
X_2d = rng.randn(n_samples, n_features) * scales + offsets
X_1row = X_2d[0, :].reshape(1, n_features)
X_1col = X_2d[:, 0].reshape(n_samples, 1)
X_list_1row = X_1row.tolist()
X_list_1col = X_1col.tolist()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def _check_dim_1axis(a):
if isinstance(a, list):
return np.array(a).shape[0]
return a.shape[0]
def assert_correct_incr(i, batch_start, batch_stop, n, chunk_size,
n_samples_seen):
if batch_stop != n:
assert_equal((i + 1) * chunk_size, n_samples_seen)
else:
assert_equal(i * chunk_size + (batch_stop - batch_start),
n_samples_seen)
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
def test_standard_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_almost_equal(scaler.mean_, X.ravel())
assert_almost_equal(scaler.scale_, np.ones(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.std(axis=0),
np.zeros_like(n_features))
else:
assert_almost_equal(scaler.mean_, X.mean())
assert_almost_equal(scaler.scale_, X.std())
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_almost_equal(scaler.mean_, 1.)
assert_almost_equal(scaler.scale_, 1.)
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), .0)
assert_equal(scaler.n_samples_seen_, X.shape[0])
def test_scale_1d():
# 1-d inputs
X_list = [1., 3., 5., 0.]
X_arr = np.array(X_list)
for X in [X_list, X_arr]:
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(), 0.0)
assert_array_almost_equal(X_scaled.std(), 1.0)
assert_array_equal(scale(X, with_mean=False, with_std=False), X)
def test_standard_scaler_numerical_stability():
"""Test numerical stability of scaling"""
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
n_features = 5
n_samples = 4
X = rng.randn(n_samples, n_features)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_equal(scaler.n_samples_seen_, n_samples)
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), n_samples * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_handle_zeros_in_scale():
s1 = np.array([0, 1, 2, 3])
s2 = _handle_zeros_in_scale(s1, copy=True)
assert_false(s1[0] == s2[0])
assert_array_equal(s1, np.array([0, 1, 2, 3]))
assert_array_equal(s2, np.array([1, 1, 2, 3]))
def test_minmax_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler()
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MinMaxScaler().fit(X[batch0])
scaler_incr = MinMaxScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std until the end of partial fits, and
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = StandardScaler(with_std=False).fit(X)
scaler_incr = StandardScaler(with_std=False)
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.mean_, scaler_incr.mean_)
assert_equal(scaler_batch.var_, scaler_incr.var_) # Nones
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_incr = StandardScaler().partial_fit(X[batch0])
if chunk_size == 1:
assert_array_almost_equal(np.zeros(n_features, dtype=np.float64),
scaler_incr.var_)
assert_array_almost_equal(np.ones(n_features, dtype=np.float64),
scaler_incr.scale_)
else:
assert_array_almost_equal(np.var(X[batch0], axis=0),
scaler_incr.var_)
assert_array_almost_equal(np.std(X[batch0], axis=0),
scaler_incr.scale_) # no constants
# Test std until the end of partial fits, and
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.var_, scaler_incr.var_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit_numerical_stability():
# Test if the incremental computation introduces significative errors
# for large datasets with values of large magniture
rng = np.random.RandomState(0)
n_features = 2
n_samples = 100
offsets = rng.uniform(-1e15, 1e15, size=n_features)
scales = rng.uniform(1e3, 1e6, size=n_features)
X = rng.randn(n_samples, n_features) * scales + offsets
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler()
for chunk in X:
scaler_incr = scaler_incr.partial_fit(chunk.reshape(1, n_features))
# Regardless of abs values, they must not be more diff 6 significant digits
tol = 10 ** (-6)
assert_allclose(scaler_incr.mean_, scaler_batch.mean_, rtol=tol)
assert_allclose(scaler_incr.var_, scaler_batch.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler_batch.scale_, rtol=tol)
# NOTE Be aware that for much larger offsets std is very unstable (last
# assert) while mean is OK.
# Sparse input
size = (100, 3)
scale = 1e20
X = rng.random_integers(0, 1, size).astype(np.float64) * scale
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
for X in [X_csr, X_csc]:
# with_mean=False is required with sparse input
scaler = StandardScaler(with_mean=False).fit(X)
scaler_incr = StandardScaler(with_mean=False)
for chunk in X:
# chunk = sparse.csr_matrix(data_chunks)
scaler_incr = scaler_incr.partial_fit(chunk)
# Regardless of magnitude, they must not differ more than of 6 digits
tol = 10 ** (-6)
assert_true(scaler.mean_ is not None)
assert_allclose(scaler_incr.var_, scaler.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler.scale_, rtol=tol)
def test_partial_fit_sparse_input():
# Check that sparsity is not destroyed
X = np.array([[1.], [0.], [0.], [5.]])
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
for X in [X_csr, X_csc]:
X_null = null_transform.partial_fit(X).transform(X)
assert_array_equal(X_null.data, X.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_null.data)
assert_array_equal(X_orig.data, X.data)
def test_standard_scaler_trasform_with_partial_fit():
# Check some postconditions after applying partial_fit and transform
X = X_2d[:100, :]
scaler_incr = StandardScaler()
for i, batch in enumerate(gen_batches(X.shape[0], 1)):
X_sofar = X[:(i+1), :]
chunks_copy = X_sofar.copy()
scaled_batch = StandardScaler().fit_transform(X_sofar)
scaler_incr = scaler_incr.partial_fit(X[batch])
scaled_incr = scaler_incr.transform(X_sofar)
assert_array_almost_equal(scaled_batch, scaled_incr)
assert_array_almost_equal(X_sofar, chunks_copy) # No change
right_input = scaler_incr.inverse_transform(scaled_incr)
assert_array_almost_equal(X_sofar, right_input)
zero = np.zeros(X.shape[1])
epsilon = np.nextafter(0, 1)
assert_array_less(zero, scaler_incr.var_ + epsilon) # as less or equal
assert_array_less(zero, scaler_incr.scale_ + epsilon)
# (i+1) because the Scaler has been already fitted
assert_equal((i + 1), scaler_incr.n_samples_seen_)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MinMaxScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(X_scaled.min(axis=0),
np.zeros(n_features))
assert_array_almost_equal(X_scaled.max(axis=0),
np.zeros(n_features))
else:
assert_array_almost_equal(X_scaled.min(axis=0), .0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# Function interface
X_1d = X_1row.ravel()
min_ = X_1d.min()
max_ = X_1d.max()
assert_array_almost_equal((X_1d - min_) / (max_ - min_),
minmax_scale(X_1d, copy=True))
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
assert_raises(ValueError, StandardScaler().fit, X_csc)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
X_csc_copy = X_csc.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csc)
assert_array_equal(X_csc.toarray(), X_csc_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
assert_raises(ValueError, scale, X_csc, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csc)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
assert_raises(ValueError, scaler.transform, X_csc)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
X_transformed_csc = sparse.csc_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csc)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [np.nan, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [np.inf, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_robust_scaler_2d_arrays():
"""Test robust scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# null scale
X_csr_scaled = scale(X_csr, with_mean=False, with_std=False, copy=True)
assert_array_almost_equal(X_csr.data, X_csr_scaled.data)
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
"""Check RobustScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
"""Check MaxAbsScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# function interface
X_trans = maxabs_scale(X)
assert_array_almost_equal(X_trans, X_expected)
# sparse data
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_trans_csr = scaler.fit_transform(X_csr)
X_trans_csc = scaler.fit_transform(X_csc)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans_csr.A, X_expected)
assert_array_almost_equal(X_trans_csc.A, X_expected)
X_trans_csr_inv = scaler.inverse_transform(X_trans_csr)
X_trans_csc_inv = scaler.inverse_transform(X_trans_csc)
assert_array_almost_equal(X, X_trans_csr_inv.A)
assert_array_almost_equal(X, X_trans_csc_inv.A)
def test_maxabs_scaler_large_negative_value():
"""Check MaxAbsScaler on toy data with a large negative value"""
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
@ignore_warnings
def test_deprecation_minmax_scaler():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
scaler = MinMaxScaler().fit(X)
depr_message = ("Attribute data_range will be removed in "
"0.19. Use data_range_ instead")
data_range = assert_warns_message(DeprecationWarning, depr_message,
getattr, scaler, "data_range")
assert_array_equal(data_range, scaler.data_range)
depr_message = ("Attribute data_min will be removed in "
"0.19. Use data_min_ instead")
data_min = assert_warns_message(DeprecationWarning, depr_message,
getattr, scaler, "data_min")
assert_array_equal(data_min, scaler.data_min)
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_maxabs_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MaxAbsScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)),
np.ones(n_features))
else:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = MaxAbsScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# function interface
X_1d = X_1row.ravel()
max_abs = np.abs(X_1d).max()
assert_array_almost_equal(X_1d / max_abs, maxabs_scale(X_1d, copy=True))
def test_maxabs_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d[:100, :]
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler()
scaler_incr_csr = MaxAbsScaler()
scaler_incr_csc = MaxAbsScaler()
for batch in gen_batches(n, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
X_csr = sparse.csr_matrix(X[batch])
scaler_incr_csr = scaler_incr_csr.partial_fit(X_csr)
X_csc = sparse.csc_matrix(X[batch])
scaler_incr_csc = scaler_incr_csc.partial_fit(X_csc)
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csc.max_abs_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_equal(scaler_batch.n_samples_seen_,
scaler_incr_csr.n_samples_seen_)
assert_equal(scaler_batch.n_samples_seen_,
scaler_incr_csc.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csc.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MaxAbsScaler().fit(X[batch0])
scaler_incr = MaxAbsScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std until the end of partial fits, and
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
binarizer = Binarizer(copy=False)
X_float = np.array([[1, 0, 5], [2, 3, -1]], dtype=np.float64)
X_bin = binarizer.transform(X_float)
if init is not list:
assert_true(X_bin is X_float)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_deprecation_standard_scaler():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
scaler = StandardScaler().fit(X)
depr_message = ("Function std_ is deprecated; Attribute std_ will be "
"removed in 0.19. Use scale_ instead")
std_ = assert_warns_message(DeprecationWarning, depr_message, getattr,
scaler, "std_")
assert_array_equal(std_, scaler.scale_)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]])
)
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
def test_fit_cold_start():
X = iris.data
X_2d = X[:, :2]
# Scalers that have a partial_fit method
scalers = [StandardScaler(with_mean=False, with_std=False),
MinMaxScaler(),
MaxAbsScaler()]
for scaler in scalers:
scaler.fit_transform(X)
# with a different shape, this may break the scaler unless the internal
# state is reset
scaler.fit_transform(X_2d)
| bsd-3-clause |
matousc89/padasip | padasip/filters/lms.py | 1 | 7179 | """
.. versionadded:: 0.1
.. versionchanged:: 1.0.0
The least-mean-squares (LMS) adaptive filter :cite:`sayed2003fundamentals`
is the most popular adaptive filter.
The LMS filter can be created as follows
>>> import padasip as pa
>>> pa.filters.FilterLMS(n)
where :code:`n` is the size (number of taps) of the filter.
Content of this page:
.. contents::
:local:
:depth: 1
.. seealso:: :ref:`filters`
Algorithm Explanation
==========================
The LMS adaptive filter could be described as
:math:`y(k) = w_1 \cdot x_{1}(k) + ... + w_n \cdot x_{n}(k)`,
or in a vector form
:math:`y(k) = \\textbf{x}^T(k) \\textbf{w}(k)`,
where :math:`k` is discrete time index, :math:`(.)^T` denotes the transposition,
:math:`y(k)` is filtered signal,
:math:`\\textbf{w}` is vector of filter adaptive parameters and
:math:`\\textbf{x}` is input vector (for a filter of size :math:`n`) as follows
:math:`\\textbf{x}(k) = [x_1(k), ..., x_n(k)]`.
The LMS weights adaptation could be described as follows
:math:`\\textbf{w}(k+1) = \\textbf{w}(k) + \Delta \\textbf{w}(k)`,
where :math:`\Delta \\textbf{w}(k)` is
:math:`\Delta \\textbf{w}(k) = \\frac{1}{2} \mu \\frac{\partial e^2(k)}
{ \partial \\textbf{w}(k)}\ = \mu \cdot e(k) \cdot \\textbf{x}(k)`,
where :math:`\mu` is the learning rate (step size) and :math:`e(k)`
is error defined as
:math:`e(k) = d(k) - y(k)`.
Stability and Optimal Performance
==================================
The general stability criteria of LMS stands as follows
:math:`|1 - \mu \cdot ||\\textbf{x}(k)||^2 | \leq 1`.
In practice the key argument :code:`mu` should be set to really small number
in most of the cases
(recomended value can be something in range from 0.1 to 0.00001).
If you have still problems stability or performance of the filter,
then try the normalized LMS (:ref:`filter-nlms`).
Minimal Working Examples
==============================
If you have measured data you may filter it as follows
.. code-block:: python
import numpy as np
import matplotlib.pylab as plt
import padasip as pa
# creation of data
N = 500
x = np.random.normal(0, 1, (N, 4)) # input matrix
v = np.random.normal(0, 0.1, N) # noise
d = 2*x[:,0] + 0.1*x[:,1] - 4*x[:,2] + 0.5*x[:,3] + v # target
# identification
f = pa.filters.FilterLMS(n=4, mu=0.1, w="random")
y, e, w = f.run(d, x)
# show results
plt.figure(figsize=(15,9))
plt.subplot(211);plt.title("Adaptation");plt.xlabel("samples - k")
plt.plot(d,"b", label="d - target")
plt.plot(y,"g", label="y - output");plt.legend()
plt.subplot(212);plt.title("Filter error");plt.xlabel("samples - k")
plt.plot(10*np.log10(e**2),"r", label="e - error [dB]");plt.legend()
plt.tight_layout()
plt.show()
An example how to filter data measured in real-time
.. code-block:: python
import numpy as np
import matplotlib.pylab as plt
import padasip as pa
# these two function supplement your online measurment
def measure_x():
# it produces input vector of size 3
x = np.random.random(3)
return x
def measure_d(x):
# meausure system output
d = 2*x[0] + 1*x[1] - 1.5*x[2]
return d
N = 100
log_d = np.zeros(N)
log_y = np.zeros(N)
filt = pa.filters.FilterLMS(3, mu=1.)
for k in range(N):
# measure input
x = measure_x()
# predict new value
y = filt.predict(x)
# do the important stuff with prediction output
pass
# measure output
d = measure_d(x)
# update filter
filt.adapt(d, x)
# log values
log_d[k] = d
log_y[k] = y
### show results
plt.figure(figsize=(15,9))
plt.subplot(211);plt.title("Adaptation");plt.xlabel("samples - k")
plt.plot(log_d,"b", label="d - target")
plt.plot(log_y,"g", label="y - output");plt.legend()
plt.subplot(212);plt.title("Filter error");plt.xlabel("samples - k")
plt.plot(10*np.log10((log_d-log_y)**2),"r", label="e - error [dB]")
plt.legend(); plt.tight_layout(); plt.show()
References
============
.. bibliography:: lms.bib
:style: plain
Code Explanation
====================
"""
import numpy as np
from padasip.filters.base_filter import AdaptiveFilter
class FilterLMS(AdaptiveFilter):
"""
This class represents an adaptive LMS filter.
**Args:**
* `n` : length of filter (integer) - how many input is input array
(row of input matrix)
**Kwargs:**
* `mu` : learning rate (float). Also known as step size. If it is too slow,
the filter may have bad performance. If it is too high,
the filter will be unstable. The default value can be unstable
for ill-conditioned input data.
* `w` : initial weights of filter. Possible values are:
* array with initial weights (1 dimensional array) of filter size
* "random" : create random weights
* "zeros" : create zero value weights
"""
def __init__(self, n, mu=0.01, w="random"):
self.kind = "LMS filter"
if type(n) == int:
self.n = n
else:
raise ValueError('The size of filter must be an integer')
self.mu = self.check_float_param(mu, 0, 1000, "mu")
self.init_weights(w, self.n)
self.w_history = False
def adapt(self, d, x):
"""
Adapt weights according one desired value and its input.
**Args:**
* `d` : desired value (float)
* `x` : input array (1-dimensional array)
"""
y = np.dot(self.w, x)
e = d - y
self.w += self.mu * e * x
def run(self, d, x):
"""
This function filters multiple samples in a row.
**Args:**
* `d` : desired value (1 dimensional array)
* `x` : input matrix (2-dimensional array). Rows are samples,
columns are input arrays.
**Returns:**
* `y` : output value (1 dimensional array).
The size corresponds with the desired value.
* `e` : filter error for every sample (1 dimensional array).
The size corresponds with the desired value.
* `w` : history of all weights (2 dimensional array).
Every row is set of the weights for given sample.
"""
# measure the data and check if the dimmension agree
N = len(x)
if not len(d) == N:
raise ValueError('The length of vector d and matrix x must agree.')
self.n = len(x[0])
# prepare data
try:
x = np.array(x)
d = np.array(d)
except:
raise ValueError('Impossible to convert x or d to a numpy array')
# create empty arrays
y = np.zeros(N)
e = np.zeros(N)
self.w_history = np.zeros((N,self.n))
# adaptation loop
for k in range(N):
self.w_history[k,:] = self.w
y[k] = np.dot(self.w, x[k])
e[k] = d[k] - y[k]
dw = self.mu * e[k] * x[k]
self.w += dw
return y, e, self.w_history
| mit |
mojoboss/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 142 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coefs with B
print("Estimated B")
print(np.round(pls2.coefs, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coefs, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
fdft/ml | ch11/demo_pca.py | 25 | 4333 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import os
from matplotlib import pylab
import numpy as np
from sklearn import linear_model, decomposition
from sklearn import lda
logistic = linear_model.LogisticRegression()
from utils import CHART_DIR
np.random.seed(3)
x1 = np.arange(0, 10, .2)
x2 = x1 + np.random.normal(scale=1, size=len(x1))
def plot_simple_demo_1():
pylab.clf()
fig = pylab.figure(num=None, figsize=(10, 4))
pylab.subplot(121)
title = "Original feature space"
pylab.title(title)
pylab.xlabel("$X_1$")
pylab.ylabel("$X_2$")
x1 = np.arange(0, 10, .2)
x2 = x1 + np.random.normal(scale=1, size=len(x1))
good = (x1 > 5) | (x2 > 5)
bad = ~good
x1g = x1[good]
x2g = x2[good]
pylab.scatter(x1g, x2g, edgecolor="blue", facecolor="blue")
x1b = x1[bad]
x2b = x2[bad]
pylab.scatter(x1b, x2b, edgecolor="red", facecolor="white")
pylab.grid(True)
pylab.subplot(122)
X = np.c_[(x1, x2)]
pca = decomposition.PCA(n_components=1)
Xtrans = pca.fit_transform(X)
Xg = Xtrans[good]
Xb = Xtrans[bad]
pylab.scatter(
Xg[:, 0], np.zeros(len(Xg)), edgecolor="blue", facecolor="blue")
pylab.scatter(
Xb[:, 0], np.zeros(len(Xb)), edgecolor="red", facecolor="white")
title = "Transformed feature space"
pylab.title(title)
pylab.xlabel("$X'$")
fig.axes[1].get_yaxis().set_visible(False)
print(pca.explained_variance_ratio_)
pylab.grid(True)
pylab.autoscale(tight=True)
filename = "pca_demo_1.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_simple_demo_2():
pylab.clf()
fig = pylab.figure(num=None, figsize=(10, 4))
pylab.subplot(121)
title = "Original feature space"
pylab.title(title)
pylab.xlabel("$X_1$")
pylab.ylabel("$X_2$")
x1 = np.arange(0, 10, .2)
x2 = x1 + np.random.normal(scale=1, size=len(x1))
good = x1 > x2
bad = ~good
x1g = x1[good]
x2g = x2[good]
pylab.scatter(x1g, x2g, edgecolor="blue", facecolor="blue")
x1b = x1[bad]
x2b = x2[bad]
pylab.scatter(x1b, x2b, edgecolor="red", facecolor="white")
pylab.grid(True)
pylab.subplot(122)
X = np.c_[(x1, x2)]
pca = decomposition.PCA(n_components=1)
Xtrans = pca.fit_transform(X)
Xg = Xtrans[good]
Xb = Xtrans[bad]
pylab.scatter(
Xg[:, 0], np.zeros(len(Xg)), edgecolor="blue", facecolor="blue")
pylab.scatter(
Xb[:, 0], np.zeros(len(Xb)), edgecolor="red", facecolor="white")
title = "Transformed feature space"
pylab.title(title)
pylab.xlabel("$X'$")
fig.axes[1].get_yaxis().set_visible(False)
print(pca.explained_variance_ratio_)
pylab.grid(True)
pylab.autoscale(tight=True)
filename = "pca_demo_2.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_simple_demo_lda():
pylab.clf()
fig = pylab.figure(num=None, figsize=(10, 4))
pylab.subplot(121)
title = "Original feature space"
pylab.title(title)
pylab.xlabel("$X_1$")
pylab.ylabel("$X_2$")
good = x1 > x2
bad = ~good
x1g = x1[good]
x2g = x2[good]
pylab.scatter(x1g, x2g, edgecolor="blue", facecolor="blue")
x1b = x1[bad]
x2b = x2[bad]
pylab.scatter(x1b, x2b, edgecolor="red", facecolor="white")
pylab.grid(True)
pylab.subplot(122)
X = np.c_[(x1, x2)]
lda_inst = lda.LDA(n_components=1)
Xtrans = lda_inst.fit_transform(X, good)
Xg = Xtrans[good]
Xb = Xtrans[bad]
pylab.scatter(
Xg[:, 0], np.zeros(len(Xg)), edgecolor="blue", facecolor="blue")
pylab.scatter(
Xb[:, 0], np.zeros(len(Xb)), edgecolor="red", facecolor="white")
title = "Transformed feature space"
pylab.title(title)
pylab.xlabel("$X'$")
fig.axes[1].get_yaxis().set_visible(False)
pylab.grid(True)
pylab.autoscale(tight=True)
filename = "lda_demo.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
if __name__ == '__main__':
plot_simple_demo_1()
plot_simple_demo_2()
plot_simple_demo_lda()
| mit |
kavinyao/SKBPR | graphs/plot_common.py | 1 | 1472 | import numpy as np
import matplotlib.pyplot as plt
marker_settings = {
'Hottest': 'ro-',
'SKBPR-BC': 'gs-',
'SKBPR-BCIPF': 'b^-',
'SKBPR-BCIPF-FB': 'mD-',
'SKBPR-BC-SEQ': 'k*-',
}
def output_dataset(fig, title, labels, pos, dataset, axis):
ax = fig.add_subplot(pos)
for method, data in dataset.iteritems():
marker = marker_settings[method]
x_data, y_data, yerr_minus, yerr_plus = data
# plot data with lines
ax.errorbar(x_data, y_data, yerr=[yerr_minus, yerr_plus], fmt=marker, label=method)
for n, d in zip(x_data, y_data):
# show precise number
ax.text(n, d, '%.4f' % d)
xlabel, ylabel = labels
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.legend()
plt.axis(axis)# doesn't care about y-axis range
plt.grid(True)
def output_graph(datasets, filename):
"""
@param datasets list of (Measure, Name, Axes, xy_data)
@param filename the name of file to write to
"""
# default is 8x6, which is so narrow that labels overlap
# so make it wider
fig = plt.figure(figsize=(10, 6))# default dpi=72
ds_number = len(datasets)
for i, config in enumerate(datasets, 1):
measure, name, axis, dataset = config
labels = ('N', measure)
title = '%s Plot - %s Dataset' % (measure, name)
output_dataset(fig, title, labels, '1%d%d' % (ds_number, i), dataset, axis)
fig.savefig(filename)
| mit |
diofant/diofant | diofant/tests/external/test_plot_implicit.py | 2 | 3552 | """Implicit plotting tests."""
import tempfile
import warnings
import pytest
from diofant import (And, Eq, I, Or, cos, exp, pi, plot_implicit, re, sin,
symbols, tan)
from diofant.abc import x, y
__all__ = ()
matplotlib = pytest.importorskip('matplotlib', minversion='1.1.0')
def tmp_file(name=''):
return tempfile.NamedTemporaryFile(suffix='.png').name
def plot_and_save(name):
# implicit plot tests
plot_implicit(Eq(y, cos(x)), (x, -5, 5), (y, -2, 2)).save(tmp_file(name))
plot_implicit(Eq(y**2, x**3 - x), (x, -5, 5),
(y, -4, 4), show=False).save(tmp_file(name))
plot_implicit(y > 1 / x, (x, -5, 5),
(y, -2, 2), show=False).save(tmp_file(name))
plot_implicit(y < 1 / tan(x), (x, -5, 5),
(y, -2, 2), show=False).save(tmp_file(name))
plot_implicit(y >= 2 * sin(x) * cos(x), (x, -5, 5),
(y, -2, 2), show=False).save(tmp_file(name))
plot_implicit(y <= x**2, (x, -3, 3),
(y, -1, 5), show=False).save(tmp_file(name))
plot_implicit(Or(And(x < y, x > y**2), sin(y) > x),
show=False).save(tmp_file(name))
plot_implicit(Or(And(x < y, x > y**2), sin(y)),
show=False).save(tmp_file(name))
plot_implicit(Or(x < y, sin(x) >= y), show=False).save(tmp_file(name))
# Test all input args for plot_implicit
plot_implicit(Eq(y**2, x**3 - x), show=False).save(tmp_file(name))
plot_implicit(Eq(y**2, x**3 - x), adaptive=False, show=False).save(tmp_file(name))
plot_implicit(Eq(y**2, x**3 - x), adaptive=False, points=500, show=False).save(tmp_file(name))
plot_implicit(y > x, (x, -5, 5), show=False).save(tmp_file(name))
plot_implicit(And(y > exp(x), y > x + 2), show=False).save(tmp_file(name))
plot_implicit(Or(y > x, y > -x), show=False).save(tmp_file(name))
plot_implicit(x**2 - 1, (x, -5, 5), show=False).save(tmp_file(name))
plot_implicit(x**2 - 1, show=False).save(tmp_file(name))
plot_implicit(y > x, depth=-5, show=False).save(tmp_file(name))
plot_implicit(y > x, depth=5, show=False).save(tmp_file(name))
plot_implicit(y > cos(x), adaptive=False, show=False).save(tmp_file(name))
plot_implicit(y < cos(x), adaptive=False, show=False).save(tmp_file(name))
plot_implicit(y - cos(pi / x), show=False).save(tmp_file(name))
pytest.raises(ValueError, lambda: plot_implicit(y > x, (x, -1, 1, 2)))
# issue sympy/sympy#17719
plot_implicit(((x - 1)**2 + y**2 < 2) ^ ((x + 1)**2 + y**2 < 2),
show=False).save(tmp_file(name))
def test_line_color():
x, y = symbols('x, y')
p = plot_implicit(x**2 + y**2 - 1, line_color='green', show=False)
assert p._series[0].line_color == 'green'
p = plot_implicit(x**2 + y**2 - 1, line_color='r', show=False)
assert p._series[0].line_color == 'r'
def test_matplotlib():
plot_and_save('test')
@pytest.mark.xfail
def test_matplotlib2():
name = 'test2'
plot_implicit(And(y > cos(x), Or(y > x, Eq(y, x))), show=False).save(tmp_file(name))
# Test plots which cannot be rendered using the adaptive algorithm
# TODO: catch the warning.
plot_implicit(Eq(y, re(cos(x) + I*sin(x))), show=False).save(tmp_file(name))
with warnings.catch_warnings(record=True) as w:
plot_implicit(x**2 - 1, legend='An implicit plot', show=False).save(tmp_file(name))
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert 'No labelled objects found' in str(w[0].message)
| bsd-3-clause |
rseubert/scikit-learn | sklearn/tests/test_kernel_approximation.py | 6 | 7616 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
"""test that AdditiveChi2Sampler approximates kernel on random data"""
# compute exact kernel
# appreviations for easier formular
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
"""test that RBFSampler approximates kernel on random data"""
# compute exact kernel
c = 0.03
# appreviations for easier formular
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
"""test that RBFSampler approximates kernel on random data"""
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
"""Regression test: kernel approx. transformers should work on lists
No assertions; the old versions would simply crash
"""
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
"""Non-regression: Nystroem should pass other parameters beside gamma."""
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
"""Test Nystroem on a callable."""
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
| bsd-3-clause |
waterponey/scikit-learn | benchmarks/bench_random_projections.py | 397 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
ElDeveloper/scikit-learn | sklearn/utils/metaestimators.py | 283 | 2353 | """Utilities for meta-estimators"""
# Author: Joel Nothman
# Andreas Mueller
# Licence: BSD
from operator import attrgetter
from functools import update_wrapper
__all__ = ['if_delegate_has_method']
class _IffHasAttrDescriptor(object):
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if the ``attribute_name`` is not present on the base object.
This allows ducktyping of the decorated method based on ``attribute_name``.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, attribute_name):
self.fn = fn
self.get_attribute = attrgetter(attribute_name)
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, type=None):
# raise an AttributeError if the attribute is not present on the object
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self.get_attribute(obj)
# lambda, but not partial, allows help() to work with update_wrapper
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
# update the docstring of the returned function
update_wrapper(out, self.fn)
return out
def if_delegate_has_method(delegate):
"""Create a decorator for methods that are delegated to a sub-estimator
This enables ducktyping by hasattr returning True according to the
sub-estimator.
>>> from sklearn.utils.metaestimators import if_delegate_has_method
>>>
>>>
>>> class MetaEst(object):
... def __init__(self, sub_est):
... self.sub_est = sub_est
...
... @if_delegate_has_method(delegate='sub_est')
... def predict(self, X):
... return self.sub_est.predict(X)
...
>>> class HasPredict(object):
... def predict(self, X):
... return X.sum(axis=1)
...
>>> class HasNoPredict(object):
... pass
...
>>> hasattr(MetaEst(HasPredict()), 'predict')
True
>>> hasattr(MetaEst(HasNoPredict()), 'predict')
False
"""
return lambda fn: _IffHasAttrDescriptor(fn, '%s.%s' % (delegate, fn.__name__))
| bsd-3-clause |
BhallaLab/moose | moose-examples/snippets/reacDiffSpinyNeuron.py | 2 | 10667 | #########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2013 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
import math
import pylab
import numpy
import matplotlib.pyplot as plt
import moose
def makeModel():
model = moose.Neutral( '/model' )
# Make neuronal model. It has no channels, just for geometry
cell = moose.loadModel( './spinyNeuron.p', '/model/cell', 'Neutral' )
# We don't want the cell to do any calculations. Disable everything.
for i in moose.wildcardFind( '/model/cell/##' ):
i.tick = -1
diffConst = 0.0
# create container for model
model = moose.element( '/model' )
chem = moose.Neutral( '/model/chem' )
# The naming of the compartments is dicated by the places that the
# chem model expects to be loaded.
compt0 = moose.NeuroMesh( '/model/chem/compt0' )
compt0.separateSpines = 1
compt0.geometryPolicy = 'cylinder'
compt1 = moose.SpineMesh( '/model/chem/compt1' )
moose.connect( compt0, 'spineListOut', compt1, 'spineList', 'OneToOne' )
compt2 = moose.PsdMesh( '/model/chem/compt2' )
moose.connect( compt0, 'psdListOut', compt2, 'psdList', 'OneToOne' )
#reacSystem = moose.loadModel( 'simpleOsc.g', '/model/chem', 'ee' )
makeChemModel( compt0 ) # Populate all 3 compts with the chem system.
makeChemModel( compt1 )
makeChemModel( compt2 )
compt0.diffLength = 2e-6 # This will be over 100 compartments.
# This is the magic command that configures the diffusion compartments.
compt0.subTreePath = cell.path + "/#"
moose.showfields( compt0 )
# Build the solvers. No need for diffusion in this version.
ksolve0 = moose.Ksolve( '/model/chem/compt0/ksolve' )
ksolve1 = moose.Ksolve( '/model/chem/compt1/ksolve' )
ksolve2 = moose.Ksolve( '/model/chem/compt2/ksolve' )
dsolve0 = moose.Dsolve( '/model/chem/compt0/dsolve' )
dsolve1 = moose.Dsolve( '/model/chem/compt1/dsolve' )
dsolve2 = moose.Dsolve( '/model/chem/compt2/dsolve' )
stoich0 = moose.Stoich( '/model/chem/compt0/stoich' )
stoich1 = moose.Stoich( '/model/chem/compt1/stoich' )
stoich2 = moose.Stoich( '/model/chem/compt2/stoich' )
# Configure solvers
stoich0.compartment = compt0
stoich1.compartment = compt1
stoich2.compartment = compt2
stoich0.ksolve = ksolve0
stoich1.ksolve = ksolve1
stoich2.ksolve = ksolve2
stoich0.dsolve = dsolve0
stoich1.dsolve = dsolve1
stoich2.dsolve = dsolve2
stoich0.path = '/model/chem/compt0/#'
stoich1.path = '/model/chem/compt1/#'
stoich2.path = '/model/chem/compt2/#'
assert( stoich0.numVarPools == 3 )
assert( stoich0.numProxyPools == 0 )
assert( stoich0.numRates == 4 )
assert( stoich1.numVarPools == 3 )
assert( stoich1.numProxyPools == 0 )
assert( stoich1.numRates == 4 )
assert( stoich2.numVarPools == 3 )
assert( stoich2.numProxyPools == 0 )
assert( stoich2.numRates == 4 )
dsolve0.buildNeuroMeshJunctions( dsolve1, dsolve2 )
'''
stoich0.buildXreacs( stoich1 )
stoich1.buildXreacs( stoich2 )
stoich0.filterXreacs()
stoich1.filterXreacs()
stoich2.filterXreacs()
'''
moose.element( '/model/chem/compt2/a[0]' ).concInit *= 1.5
# Create the output tables
num = compt0.numDiffCompts - 1
graphs = moose.Neutral( '/model/graphs' )
moose.le( '/model/chem/compt1' )
a = moose.element( '/model/chem/compt1' )
print((a.voxelVolume))
makeTab( 'a_soma', '/model/chem/compt0/a[0]' )
makeTab( 'b_soma', '/model/chem/compt0/b[0]' )
makeTab( 'a_apical', '/model/chem/compt0/a[' + str( num ) + ']' )
makeTab( 'b_apical', '/model/chem/compt0/b[' + str( num ) + ']' )
makeTab( 'a_spine', '/model/chem/compt1/a[5]' )
makeTab( 'b_spine', '/model/chem/compt1/b[5]' )
makeTab( 'a_psd', '/model/chem/compt2/a[5]' )
makeTab( 'b_psd', '/model/chem/compt2/b[5]' )
def makeTab( plotname, molpath ):
tab = moose.Table2( '/model/graphs/' + plotname ) # Make output table
# connect up the tables
moose.connect( tab, 'requestOut', moose.element( molpath ), 'getConc' );
def makeDisplay():
plt.ion()
fig = plt.figure( figsize=(10,12) )
dend = fig.add_subplot( 411 )
plt.ylabel( 'Conc (mM)' )
plt.xlabel( 'Dend voxel #' )
plt.legend()
timeLabel = plt.text(200, 0.5, 'time = 0')
spine = fig.add_subplot( 412 )
plt.ylabel( 'Conc (mM)' )
plt.xlabel( 'Spine voxel #' )
plt.legend()
psd = fig.add_subplot( 413 )
plt.ylabel( 'Conc (mM)' )
plt.xlabel( 'PSD voxel #' )
plt.legend()
timeSeries = fig.add_subplot( 414 )
timeSeries.set_ylim( 0, 0.6 )
plt.ylabel( 'Conc (mM)' )
plt.xlabel( 'time (seconds)' )
plt.legend()
a = moose.vec( '/model/chem/compt0/a' )
b = moose.vec( '/model/chem/compt0/b' )
line1, = dend.plot( list(range( len( a ))), a.conc, label='a' )
line2, = dend.plot( list(range( len( b ))), b.conc, label='b' )
dend.set_ylim( 0, 0.6 )
a = moose.vec( '/model/chem/compt1/a' )
b = moose.vec( '/model/chem/compt1/b' )
line3, = spine.plot( list(range( len( a ))), a.conc, label='a' )
line4, = spine.plot( list(range( len( b ))), b.conc, label='b' )
spine.set_ylim( 0, 0.6 )
a = moose.vec( '/model/chem/compt2/a' )
b = moose.vec( '/model/chem/compt2/b' )
line5, = psd.plot( list(range( len( a ))), a.conc, label='a' )
line6, = psd.plot( list(range( len( b ))), b.conc, label='b' )
psd.set_ylim( 0, 0.6 )
fig.canvas.draw()
return ( timeSeries, dend, spine, psd, fig, line1, line2, line3, line4, line5, line6, timeLabel )
def updateDisplay( plotlist ):
a = moose.vec( '/model/chem/compt0/a' )
b = moose.vec( '/model/chem/compt0/b' )
plotlist[5].set_ydata( a.conc )
plotlist[6].set_ydata( b.conc )
a = moose.vec( '/model/chem/compt1/a' )
b = moose.vec( '/model/chem/compt1/b' )
plotlist[7].set_ydata( a.conc )
plotlist[8].set_ydata( b.conc )
a = moose.vec( '/model/chem/compt2/a' )
b = moose.vec( '/model/chem/compt2/b' )
plotlist[9].set_ydata( a.conc )
plotlist[10].set_ydata( b.conc )
plotlist[4].canvas.draw()
def finalizeDisplay( plotlist, cPlotDt ):
for x in moose.wildcardFind( '/model/graphs/#[ISA=Table2]' ):
pos = numpy.arange( 0, x.vector.size, 1 ) * cPlotDt
line1, = plotlist[0].plot( pos, x.vector, label=x.name )
plotlist[4].canvas.draw()
print( "Hit '0' to exit" )
eval(str(input()))
def makeChemModel( compt ):
"""
This function setus up a simple oscillatory chemical system within
the script. The reaction system is::
s ---a---> a // s goes to a, catalyzed by a.
s ---a---> b // s goes to b, catalyzed by a.
a ---b---> s // a goes to s, catalyzed by b.
b -------> s // b is degraded irreversibly to s.
in sum, **a** has a positive feedback onto itself and also forms **b**.
**b** has a negative feedback onto **a**.
Finally, the diffusion constant for **a** is 1/10 that of **b**.
"""
# create container for model
diffConst = 10e-12 # m^2/sec
motorRate = 1e-6 # m/sec
concA = 1 # millimolar
# create molecules and reactions
a = moose.Pool( compt.path + '/a' )
b = moose.Pool( compt.path + '/b' )
s = moose.Pool( compt.path + '/s' )
e1 = moose.MMenz( compt.path + '/e1' )
e2 = moose.MMenz( compt.path + '/e2' )
e3 = moose.MMenz( compt.path + '/e3' )
r1 = moose.Reac( compt.path + '/r1' )
a.concInit = 0.1
b.concInit = 0.1
s.concInit = 1
moose.connect( e1, 'sub', s, 'reac' )
moose.connect( e1, 'prd', a, 'reac' )
moose.connect( a, 'nOut', e1, 'enzDest' )
e1.Km = 1
e1.kcat = 1
moose.connect( e2, 'sub', s, 'reac' )
moose.connect( e2, 'prd', b, 'reac' )
moose.connect( a, 'nOut', e2, 'enzDest' )
e2.Km = 1
e2.kcat = 0.5
moose.connect( e3, 'sub', a, 'reac' )
moose.connect( e3, 'prd', s, 'reac' )
moose.connect( b, 'nOut', e3, 'enzDest' )
e3.Km = 0.1
e3.kcat = 1
moose.connect( r1, 'sub', b, 'reac' )
moose.connect( r1, 'prd', s, 'reac' )
r1.Kf = 0.3 # 1/sec
r1.Kb = 0 # 1/sec
# Assign parameters
a.diffConst = diffConst/10
b.diffConst = diffConst
s.diffConst = 0
def main():
"""
This example illustrates how to define a kinetic model embedded in
the branching pseudo-1-dimensional geometry of a neuron. The model
oscillates in space and time due to a Turing-like reaction-diffusion
mechanism present in all compartments. For the sake of this demo,
the initial conditions are set up slightly different on the PSD
compartments, so as to break the symmetry and initiate oscillations
in the spines.
This example uses an external electrical model file with basal
dendrite and three branches on
the apical dendrite. One of those branches has a dozen or so spines.
In this example we build an identical model in each compartment, using
the makeChemModel function. One could readily define a system with
distinct reactions in each compartment.
The model is set up to run using the Ksolve for integration and the
Dsolve for handling diffusion.
The display has four parts:
a. animated line plot of concentration against main compartment#.
b. animated line plot of concentration against spine compartment#.
c. animated line plot of concentration against psd compartment#.
d. time-series plot that appears after the simulation has
ended. The plot is for the last (rightmost) compartment.
"""
chemdt = 0.1 # Tested various dts, this is reasonable.
diffdt = 0.01
plotdt = 1
animationdt = 5
runtime = 800
makeModel()
plotlist = makeDisplay()
# Schedule the whole lot - autoscheduling already does this.
'''
for i in range( 11, 17 ):
moose.setClock( i, chemdt ) # for the chem objects
moose.setClock( 10, diffdt ) # for the diffusion
moose.setClock( 18, plotdt ) # for the output tables.
'''
moose.reinit()
for i in range( 0, runtime, animationdt ):
moose.start( animationdt )
plotlist[11].set_text( "time = %d" % i )
updateDisplay( plotlist )
finalizeDisplay( plotlist, plotdt )
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main()
| gpl-3.0 |
janhahne/nest-simulator | pynest/examples/spatial/connex.py | 20 | 2341 | # -*- coding: utf-8 -*-
#
# connex.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Connect with circular mask, flat probability using 2 populations of iaf_psc_alpha neurons
-------------------------------------------------------------------------------------------
Create two populations on a 30x30 grid of iaf_psc_alpha neurons,
connect with circular mask, flat probability,
visualize.
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
"""
import nest
import matplotlib.pyplot as plt
import numpy as np
nest.ResetKernel()
pos = nest.spatial.grid(shape=[30, 30], extent=[3., 3.])
#######################################################
# create and connect two populations
a = nest.Create('iaf_psc_alpha', positions=pos)
b = nest.Create('iaf_psc_alpha', positions=pos)
cdict = {'rule': 'pairwise_bernoulli',
'p': 0.5,
'mask': {'circular': {'radius': 0.5}}}
nest.Connect(a, b,
conn_spec=cdict,
syn_spec={'weight': nest.random.uniform(0.5, 2.)})
#################################################################
# plot targets of neurons in different grid locations
# first, clear existing figure, get current figure
plt.clf()
fig = plt.gcf()
# plot targets of two source neurons into same figure, with mask
for src_index in [30 * 15 + 15, 0]:
# obtain node id for center
src = a[src_index:src_index + 1]
nest.PlotTargets(src, b, mask=cdict['mask'], fig=fig)
# beautify
plt.axes().set_xticks(np.arange(-1.5, 1.55, 0.5))
plt.axes().set_yticks(np.arange(-1.5, 1.55, 0.5))
plt.grid(True)
plt.axis([-2.0, 2.0, -2.0, 2.0])
plt.axes().set_aspect('equal', 'box')
plt.title('Connection targets')
plt.show()
# plt.savefig('connex.pdf')
| gpl-2.0 |
Achuth17/scikit-learn | doc/datasets/mldata_fixture.py | 367 | 1183 | """Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
| bsd-3-clause |
chimney37/ml-snippets | cust_kmeans_shift_dynamic.py | 1 | 5474 | #! /usr/bin/env python
# -*- coding:utf-8
# Custom Kmeans Shift clustering, with dynamic radius
# Special thanks: Harisson@pythonprogramming.net
'''
File name: cust_kmeans_shift_dynamic.py
Author: chimney37
Date created: 10/30/2017
Python Version: 3.62
'''
import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
style.use('ggplot')
X, y = make_blobs(n_samples=50, centers=3, n_features=2)
"""X = np.array([[1,2],
[1.5,1.8],
[5,8],
[8,8],
[1,0.6],
[9,11],
[8,2],
[10,2],
[9,3],])
"""
colors = 10*["g", "r", "c", "b", "k"]
class Mean_Shift:
# plan is to crete a massive radius, but let it go in steps
def __init__(self, radius=None, radius_norm_step=100):
self.radius = radius
self.radius_norm_step = radius_norm_step
def fit(self, data):
if self.radius is None:
# find the "center" of all data
all_data_centroid = np.average(data, axis=0)
# take the norm of the data (maginitude of data from origin)
all_data_norm = np.linalg.norm(all_data_centroid)
# start with a radius (all data norm divided by step
self.radius = all_data_norm / self.radius_norm_step
centroids = {}
# Make all datapoints centroids
for i in range(len(data)):
centroids[i] = data[i]
# get a list from 0 to 99, reversed
weights = [i for i in range(self.radius_norm_step)][::-1]
while True:
new_centroids = []
for i in centroids:
in_bandwidth = []
centroid = centroids[i]
for featureset in data:
# calculate the full distance between featurset and centroid
distance = np.linalg.norm(featureset-centroid)
# solve an initialization problem. featureset is compared to itself
if distance == 0:
distance = 0.00000000001
# weight index is the index computed by entire distance divided by the radial step length
# (the bigger the distance, the larger the index,)hence towards 99
weight_index = int(distance/self.radius)
# if weight index is beyond maximum, all of the bounds
if weight_index > self.radius_norm_step-1:
weight_index = self.radius_norm_step-1
# add the "weighted" number of centroids to the in_bandwidth
to_add = (weights[weight_index]**2)*[featureset]
in_bandwidth += to_add
new_centroid = np.average(in_bandwidth, axis=0)
new_centroids.append(tuple(new_centroid))
uniques = sorted(list(set(new_centroids)))
to_pop = []
# remove uniques where the difference is less than the radial step distance
for i in uniques:
for ii in [i for i in uniques]:
if i == ii:
pass
elif np.linalg.norm(np.array(i)-np.array(ii)) < self.radius:
to_pop.append(ii)
break
for i in to_pop:
try:
uniques.remove(i)
except ValueError:
pass
prev_centroids = dict(centroids)
centroids = {}
for i in range(len(uniques)):
centroids[i] = np.array(uniques[i])
optimized = True
# compare previous centroids to the previous ones, and measure movement.
for i in centroids:
# if centroid moved, not converged
if not np.array_equal(centroids[i], prev_centroids[i]):
optimized = False
if not optimized:
break
# get out of loop when converged
if optimized:
break
# we expect fit to also classify the existing featureset
self.centroids = centroids
self.classifications = {}
for i in range(len(self.centroids)):
self.classifications[i] = []
for featureset in data:
# compare data to either centroid
distances = [np.linalg.norm(featureset-self.centroids[c]) for c in self.centroids]
classification = (distances.index(min(distances)))
# featureset that belongs to the cluster
self.classifications[classification].append(featureset)
def predict(self, data):
# compare data to either centroid
distances = [np.linalg.norm(featureset-self.centroids[centroid]) for centroid in self.centroids]
classification = (distances.index(min(distances)))
return classification
clf = Mean_Shift()
clf.fit(X)
centroids = clf.centroids
print(centroids)
for classification in clf.classifications:
color = colors[classification]
for featureset in clf.classifications[classification]:
plt.scatter(featureset[0], featureset[1], marker="x", color=color, s=150, linewidths=5, zorder=10)
for c in centroids:
plt.scatter(centroids[c][0], centroids[c][1], color='k',
marker="*", s=150, linewidths=5)
plt.show()
| mit |
musicalrunner/Geiger-Counter | cx_setup.py | 2 | 1082 | ''' Creates a windows executable with cx_freeze. Run with "python cx_setup build". The package is fairly massive
right now (~80 MB), partially because it includes extra libraries that are unnecessary, like QT, but one
must be very careful when removing libraries, as it can cause the whole package to fail. Consequently,
they are simply left in.'''
import sys
from cx_Freeze import setup, Executable
# Packages would be excluded here if desired.
build_exe_options = {
"includes":[
"scipy.sparse.csgraph._validation",
"scipy.io.matlab.streams",
"matplotlib.backends.backend_qt4agg"
]
}
# GUI applications require a different base on Windows (the default is for a console application).
base = None
if sys.platform == "win32":
base = "Win32GUI"
# There are many options here that may be included later if desired.
setup(
name = "Geiger-Counter",
version = "0.1",
description = "Geiger-Counter Lab Analysis",
options = {"build_exe": build_exe_options},
executables = [Executable("GUI.py", base=base)]
) | mit |
RobGrimm/prediction_based | Eval/functions.py | 1 | 7025 | import os
import numpy as np
from sklearn.manifold import TSNE
from sklearn.neighbors import KNeighborsClassifier
from sklearn.dummy import DummyClassifier
from sklearn.metrics import classification_report, f1_score
from matplotlib import pyplot
# set parameters for plots
pyplot.rcParams.update({'figure.figsize': (25, 20), 'font.size': 10})
# define directory for storing results
save_results_to_dir = os.path.abspath(os.path.dirname(__file__)).rstrip('/Eval') + '/results/'
########################################################################################################################
# helper functions
def get_pos_tag(word):
# a word is a string 'word-'pos_tag'
# this returns the pos tag
return word.split('-')[1]
def get_pos_tags(words):
return [get_pos_tag(w) for w in words]
def get_paras_for_centering_legend_below_plot():
# get matplotlib parameters for centering the legend below plots
pyplot.legend(loc=9, bbox_to_anchor=(0.5, -0.1))
lgd = pyplot.legend(loc=9, bbox_to_anchor=(0.5, -0.1), ncol=2)
art = [lgd]
return art
def create_dir_if_not_exists(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def save_plot_to(plot_dir, plot_name, create_folder_if_not_exists=True):
if create_folder_if_not_exists:
create_dir_if_not_exists(plot_dir)
pyplot.savefig(plot_dir + plot_name, additional_artists=get_paras_for_centering_legend_below_plot(),
bbox_inches='tight')
pyplot.close()
def create_graph(x, y, marker, label, e=None):
# create custom matplotlib plot
assert len(x) == len(y)
if e is None:
pyplot.plot(x, y, marker, markersize=40, linewidth=9, label=label)
else:
pyplot.errorbar(x, y, e, markersize=40, linewidth=9, label=label)
pyplot.rcParams.update({'font.size': 50})
def plot_metric(plot_name, plot_type, ys, label, error=None):
xs = range(len(ys))
create_graph(xs, ys, marker='go-', label=label, e=error)
plot_dir = save_results_to_dir + '/%s/' % plot_type
save_plot_to(plot_dir, plot_name)
########################################################################################################################
# functions for: retrieving results from trained models, plotting results, saving results to disk
def get_f1_and_classification_report(embeddings_dict, classifier):
xs, ys, y_pred = get_xs_ys_predictions(embeddings_dict, classifier)
class_names = ['verbs', 'nouns', 'adjectives', 'closed class words']
report = classification_report(y_true=ys, y_pred=y_pred, target_names=class_names)
micro_f1 = f1_score(y_true=ys, y_pred=y_pred, average='micro')
macro_f1 = f1_score(y_true=ys, y_pred=y_pred, average='macro')
return micro_f1, macro_f1, report
def get_xs_ys_predictions(embeddings_dict, classifier):
"""
Run a classifier of type 'classifier' (one of: majority vote baseline,
tratified sampling baseline, 10-NN classifier).
Return:
- xs: the word embeddings
- ys: the gold standard labels
- y_pred: the predicted labels
"""
assert classifier in ['majority_vote', 'stratified', '10-NN']
pos_ints = {'v': 0, 'n': 1, 'adj': 2, 'fn': 3}
ys = []
xs = []
words = sorted(embeddings_dict.keys())
for w in words:
xs.append(embeddings_dict[w])
# get embeddings's pos tag, look up pos tag's unique integer
label = pos_ints[get_pos_tag(w)]
ys.append(label)
clf = None
if classifier == 'majority_vote':
clf = DummyClassifier(strategy='most_frequent', random_state=0)
elif classifier == 'stratified':
clf = DummyClassifier(strategy='stratified', random_state=0)
elif classifier == '10-NN':
clf = KNeighborsClassifier(n_neighbors=10, algorithm='ball_tree')
clf.fit(xs, ys)
y_pred = clf.predict(xs)
return xs, ys, y_pred
def write_preds_to_file(embeddings_dict, classfier, outfile_name):
"""
Write predictions made by 'classifier' and gold standard labels to file.
Files can be used for further processing -- e.g. to compare predictions made by different classifiers.
"""
results_dir = save_results_to_dir + '/predictions/'
create_dir_if_not_exists(results_dir)
xs, ys, ys_pred = get_xs_ys_predictions(embeddings_dict, classfier)
with open('%s%s' % (results_dir, outfile_name), 'w') as outfile:
for x, y, y_pred in zip(range(len(xs)), ys, ys_pred):
outfile.write('%s %s %s\n' % (x, y, y_pred))
def plot_2D_embeddings(embeddings_dict, condition, training_stage):
"""
Take word embeddings from last epoch. Reduce them to 2 dimensions using the TSNE algorithm.
Create two plots and save to disk:
- colored embeddings: color each data point by syntactic type
- orthographic embeddings: plot each data point as the word's orthographic word form
"""
# set readable font size for orthographic embeddings
pyplot.rcParams.update({'font.size': 10})
tsne = TSNE(n_components=2)
color_maps = {'v': pyplot.get_cmap("Blues"), 'n': pyplot.get_cmap("Reds"), 'adj': pyplot.get_cmap("Greens"),
'fn': pyplot.get_cmap('Greys')}
words = embeddings_dict.keys()
vectors = embeddings_dict.values()
pos_tags = get_pos_tags(words)
reduced_data = tsne.fit_transform(np.array(vectors))
# plot embeddings as data points that are colored by syntactic class
for xy, pos in zip(reduced_data, pos_tags):
pyplot.plot(xy[0], xy[1], 'o', markersize=20, color=color_maps[pos](0.7))
# the directory for the plots
plot_dir = save_results_to_dir + '/t_sne_color_embeddings/'
# the name of the plot file
plot_name = '%s_%s.png' % (condition, training_stage)
save_plot_to(plot_dir, plot_name)
# plot plain words
fig = pyplot.figure()
ax = fig.add_subplot(111)
# plot embeddings as orthographic word forms
for i, j in zip(reduced_data, words):
pyplot.plot(i[0], i[1])
ax.annotate(j, xy=i)
plot_dir = save_results_to_dir + '/t_sne_orthographic_embeddings/'
save_plot_to(plot_dir, plot_name)
def results_to_disk(micro_f1, macro_f1, classification_report, epoch, condition, training_stage, newfile):
"""
Write results to file.
Either create a new file (newfile=True) or append to an existing file (newfile=False).
"""
results_dir = save_results_to_dir + '/results_over_training_stages/'
create_dir_if_not_exists(results_dir)
if newfile:
# write to new file
mode = 'w'
else:
# append to existing file
mode = 'a'
with open('%s%s.txt' % (results_dir, condition), mode) as outfile:
outfile.write('%s\n\n' % training_stage)
outfile.write('epoch: %s\n' % epoch)
outfile.write(classification_report)
outfile.write('\n\n')
outfile.write('10-NN micro F1: %s\n' % micro_f1)
outfile.write('10-NN macro F1: %s\n' % macro_f1)
outfile.write('\n\n\n') | mit |
destijl/forensicartifacts | frontend/thirdparty/networkx-1.9/examples/multigraph/chess_masters.py | 54 | 5146 | #!/usr/bin/env python
"""
An example of the MultiDiGraph clas
The function chess_pgn_graph reads a collection of chess
matches stored in the specified PGN file
(PGN ="Portable Game Notation")
Here the (compressed) default file ---
chess_masters_WCC.pgn.bz2 ---
contains all 685 World Chess Championship matches
from 1886 - 1985.
(data from http://chessproblem.my-free-games.com/chess/games/Download-PGN.php)
The chess_pgn_graph() function returns a MultiDiGraph
with multiple edges. Each node is
the last name of a chess master. Each edge is directed
from white to black and contains selected game info.
The key statement in chess_pgn_graph below is
G.add_edge(white, black, game_info)
where game_info is a dict describing each game.
"""
# Copyright (C) 2006-2010 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
# tag names specifying what game info should be
# stored in the dict on each digraph edge
game_details=["Event",
"Date",
"Result",
"ECO",
"Site"]
def chess_pgn_graph(pgn_file="chess_masters_WCC.pgn.bz2"):
"""Read chess games in pgn format in pgn_file.
Filenames ending in .gz or .bz2 will be uncompressed.
Return the MultiDiGraph of players connected by a chess game.
Edges contain game data in a dict.
"""
import bz2
G=nx.MultiDiGraph()
game={}
datafile = bz2.BZ2File(pgn_file)
lines = (line.decode().rstrip('\r\n') for line in datafile)
for line in lines:
if line.startswith('['):
tag,value=line[1:-1].split(' ',1)
game[str(tag)]=value.strip('"')
else:
# empty line after tag set indicates
# we finished reading game info
if game:
white=game.pop('White')
black=game.pop('Black')
G.add_edge(white, black, **game)
game={}
return G
if __name__ == '__main__':
import networkx as nx
G=chess_pgn_graph()
ngames=G.number_of_edges()
nplayers=G.number_of_nodes()
print("Loaded %d chess games between %d players\n"\
% (ngames,nplayers))
# identify connected components
# of the undirected version
Gcc=list(nx.connected_component_subgraphs(G.to_undirected()))
if len(Gcc)>1:
print("Note the disconnected component consisting of:")
print(Gcc[1].nodes())
# find all games with B97 opening (as described in ECO)
openings=set([game_info['ECO']
for (white,black,game_info) in G.edges(data=True)])
print("\nFrom a total of %d different openings,"%len(openings))
print('the following games used the Sicilian opening')
print('with the Najdorff 7...Qb6 "Poisoned Pawn" variation.\n')
for (white,black,game_info) in G.edges(data=True):
if game_info['ECO']=='B97':
print(white,"vs",black)
for k,v in game_info.items():
print(" ",k,": ",v)
print("\n")
try:
import matplotlib.pyplot as plt
except ImportError:
import sys
print("Matplotlib needed for drawing. Skipping")
sys.exit(0)
# make new undirected graph H without multi-edges
H=nx.Graph(G)
# edge width is proportional number of games played
edgewidth=[]
for (u,v,d) in H.edges(data=True):
edgewidth.append(len(G.get_edge_data(u,v)))
# node size is proportional to number of games won
wins=dict.fromkeys(G.nodes(),0.0)
for (u,v,d) in G.edges(data=True):
r=d['Result'].split('-')
if r[0]=='1':
wins[u]+=1.0
elif r[0]=='1/2':
wins[u]+=0.5
wins[v]+=0.5
else:
wins[v]+=1.0
try:
pos=nx.graphviz_layout(H)
except:
pos=nx.spring_layout(H,iterations=20)
plt.rcParams['text.usetex'] = False
plt.figure(figsize=(8,8))
nx.draw_networkx_edges(H,pos,alpha=0.3,width=edgewidth, edge_color='m')
nodesize=[wins[v]*50 for v in H]
nx.draw_networkx_nodes(H,pos,node_size=nodesize,node_color='w',alpha=0.4)
nx.draw_networkx_edges(H,pos,alpha=0.4,node_size=0,width=1,edge_color='k')
nx.draw_networkx_labels(H,pos,fontsize=14)
font = {'fontname' : 'Helvetica',
'color' : 'k',
'fontweight' : 'bold',
'fontsize' : 14}
plt.title("World Chess Championship Games: 1886 - 1985", font)
# change font and write text (using data coordinates)
font = {'fontname' : 'Helvetica',
'color' : 'r',
'fontweight' : 'bold',
'fontsize' : 14}
plt.text(0.5, 0.97, "edge width = # games played",
horizontalalignment='center',
transform=plt.gca().transAxes)
plt.text(0.5, 0.94, "node size = # games won",
horizontalalignment='center',
transform=plt.gca().transAxes)
plt.axis('off')
plt.savefig("chess_masters.png",dpi=75)
print("Wrote chess_masters.png")
plt.show() # display
| apache-2.0 |
qusp/orange3 | Orange/projection/manifold.py | 2 | 2351 | import sklearn.manifold as skl_manifold
from Orange.distance import SklDistance, SpearmanDistance, PearsonDistance
from Orange.projection import SklProjector
__all__ = ["MDS", "Isomap", "LocallyLinearEmbedding"]
class MDS(SklProjector):
__wraps__ = skl_manifold.MDS
name = 'mds'
def __init__(self, n_components=2, metric=True, n_init=4, max_iter=300,
eps=0.001, n_jobs=1, random_state=None,
dissimilarity='euclidean',
preprocessors=None):
super().__init__(preprocessors=preprocessors)
self.params = vars()
self._metric = dissimilarity
def __call__(self, data):
distances = SklDistance, SpearmanDistance, PearsonDistance
if isinstance(self._metric, distances):
data = self.preprocess(data)
X, Y, domain = data.X, data.Y, data.domain
dist_matrix = self._metric(X).X
self.params['dissimilarity'] = 'precomputed'
clf = self.fit(dist_matrix, Y=Y)
elif self._metric is 'precomputed':
dist_matrix, Y, domain = data.X, None, None
clf = self.fit(dist_matrix, Y=Y)
else:
data = self.preprocess(data)
X, Y, domain = data.X, data.Y, data.domain
clf = self.fit(X, Y=Y)
clf.domain = domain
return clf
def fit(self, X, init=None, Y=None):
proj = self.__wraps__(**self.params)
return proj.fit(X, init=init, y=Y)
class Isomap(SklProjector):
__wraps__ = skl_manifold.Isomap
name = 'isomap'
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
max_iter=None, path_method='auto',
neighbors_algorithm='auto', preprocessors=None):
super().__init__(preprocessors=preprocessors)
self.params = vars()
class LocallyLinearEmbedding(SklProjector):
__wraps__ = skl_manifold.LocallyLinearEmbedding
name = 'lle'
def __init__(self, n_neighbors=5, n_components=2, reg=0.001,
eigen_solver='auto', tol=1e-06 , max_iter=100,
method='standard', hessian_tol=0.0001,
modified_tol=1e-12, neighbors_algorithm='auto',
random_state=None, preprocessors=None):
super().__init__(preprocessors=preprocessors)
self.params = vars()
| bsd-2-clause |
mihaic/brainiak | tests/fcma/test_voxel_selection.py | 1 | 4725 | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from brainiak.fcma.voxelselector import VoxelSelector
from scipy.stats.mstats import zscore
from sklearn import svm
from sklearn.linear_model import LogisticRegression
import numpy as np
import math
from mpi4py import MPI
from numpy.random import RandomState
# specify the random state to fix the random numbers
prng = RandomState(1234567890)
def create_epoch():
row = 12
col = 5
mat = prng.rand(row, col).astype(np.float32)
mat = zscore(mat, axis=0, ddof=0)
# if zscore fails (standard deviation is zero),
# set all values to be zero
mat = np.nan_to_num(mat)
mat = mat / math.sqrt(mat.shape[0])
return mat
def test_voxel_selection():
fake_raw_data = [create_epoch() for i in range(8)]
labels = [0, 1, 0, 1, 0, 1, 0, 1]
# 2 subjects, 4 epochs per subject
vs = VoxelSelector(labels, 4, 2, fake_raw_data, voxel_unit=1)
# test scipy normalization
fake_corr = prng.rand(1, 4, 5).astype(np.float32)
fake_corr = vs._correlation_normalization(fake_corr)
if MPI.COMM_WORLD.Get_rank() == 0:
expected_fake_corr = [[[1.06988919, 0.51641309, -0.46790636,
-1.31926763, 0.2270218],
[-1.22142744, -1.39881694, -1.2979387,
1.05702305, -0.6525566],
[0.89795232, 1.27406132, 0.36460185,
0.87538344, 1.5227468],
[-0.74641371, -0.39165771, 1.40124381,
-0.61313909, -1.0972116]]]
assert np.allclose(fake_corr, expected_fake_corr), \
'within-subject normalization does not provide correct results'
# for cross validation, use SVM with precomputed kernel
# no shrinking, set C=1
clf = svm.SVC(kernel='precomputed', shrinking=False, C=1, gamma='auto')
results = vs.run(clf)
if MPI.COMM_WORLD.Get_rank() == 0:
output = [None] * len(results)
for tuple in results:
output[tuple[0]] = int(8*tuple[1])
expected_output = [7, 4, 6, 4, 4]
assert np.allclose(output, expected_output, atol=1), \
'voxel selection via SVM does not provide correct results'
# for cross validation, use logistic regression
clf = LogisticRegression()
results = vs.run(clf)
if MPI.COMM_WORLD.Get_rank() == 0:
output = [None] * len(results)
for tuple in results:
output[tuple[0]] = int(8*tuple[1])
expected_output = [6, 3, 6, 4, 4]
assert np.allclose(output, expected_output, atol=1), (
"voxel selection via logistic regression does not provide correct "
"results")
def test_voxel_selection_with_two_masks():
fake_raw_data1 = [create_epoch() for i in range(8)]
fake_raw_data2 = [create_epoch() for i in range(8)]
labels = [0, 1, 0, 1, 0, 1, 0, 1]
# 2 subjects, 4 epochs per subject
vs = VoxelSelector(labels, 4, 2, fake_raw_data1,
raw_data2=fake_raw_data2, voxel_unit=1)
# for cross validation, use SVM with precomputed kernel
# no shrinking, set C=1
clf = svm.SVC(kernel='precomputed', shrinking=False, C=1, gamma='auto')
results = vs.run(clf)
if MPI.COMM_WORLD.Get_rank() == 0:
output = [None] * len(results)
for tuple in results:
output[tuple[0]] = int(8*tuple[1])
expected_output = [3, 3, 3, 6, 6]
assert np.allclose(output, expected_output, atol=1), \
'voxel selection via SVM does not provide correct results'
# for cross validation, use logistic regression
clf = LogisticRegression()
results = vs.run(clf)
if MPI.COMM_WORLD.Get_rank() == 0:
output = [None] * len(results)
for tuple in results:
output[tuple[0]] = int(8*tuple[1])
expected_output = [3, 4, 4, 6, 6]
assert np.allclose(output, expected_output, atol=1), (
"voxel selection via logistic regression does not provide correct "
"results")
if __name__ == '__main__':
test_voxel_selection()
test_voxel_selection_with_two_masks()
| apache-2.0 |
chaluemwut/fbserver | venv/lib/python2.7/site-packages/sklearn/tests/test_dummy.py | 2 | 10844 | import warnings
import numpy as np
from sklearn.base import clone
from sklearn.externals.six.moves import xrange
from sklearn.utils.testing import (assert_array_equal,
assert_equal,
assert_almost_equal,
assert_raises)
from sklearn.dummy import DummyClassifier, DummyRegressor
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in xrange(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="most_frequent", random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
def test_most_frequent_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
clf = DummyClassifier(strategy="most_frequent", random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in xrange(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in xrange(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
| apache-2.0 |
SeldonIO/seldon-ucl | dcs/testing/test_clean_module.py | 1 | 18381 | import numpy as np
import pandas as pd
import pytest
from dcs import clean
d = {'col1' : pd.Series([1., 2., 3., 4.], index=[0, 1, 3, 4]),
'col2' : pd.Series([1., 2., 2., 4.], index=[0, 1, 2, 4]),
'col3' : pd.Series([1., 2., 3., 4., 5.])}
df = pd.DataFrame(d)
#-----------------------------(fillDown)-----------------------------
#fill all columns of the dataframe
def test_fill_pad1():
testing_df = df.copy()
clean.fillDown(testing_df, 0, 1, 'pad')
res = {'col1' : pd.Series([1., 2., 2., 3., 4.], index=[0, 1, 2, 3, 4]),
'col2' : pd.Series([1., 2., 2., 2., 4.], index=[0, 1, 2, 3, 4]),
'col3' : pd.Series([1., 2., 3., 4., 5.])}
df_res = pd.DataFrame(res)
assert (((testing_df.fillna(0) == df_res.fillna(0)).all()).all()) == True
#fill a single column of the dataframe
def test_fill_pad2():
testing_df = df.copy()
clean.fillDown(testing_df, 0, 0, 'pad')
res = {'col1' : pd.Series([1., 2., 2., 3., 4.], index=[0, 1, 2, 3, 4]),
'col2' : pd.Series([1., 2., 2., 4.], index=[0, 1, 2, 4]),
'col3' : pd.Series([1., 2., 3., 4., 5.,])}
df_res = pd.DataFrame(res)
assert (((testing_df.fillna(0) == df_res.fillna(0)).all()).all()) == True
#dataframe column does not exist
def test_fill_pad3():
testing_df = df.copy
with pytest.raises(Exception):
clean.fillDown(testing_df, 4, 4, 'pad')
#dataframe columnFrom > collumnTo
def test_fill_pad4():
testing_df = df.copy()
clean.fillDown(testing_df, 1, 0,'pad')
with pytest.raises(Exception):
clean.fillDown(testing_df, 3, 3, 'pad')
#fill all columns of the dataframe
def test_fill_back1():
testing_df =df.copy()
clean.fillDown(testing_df, 0, 1, 'bfill')
res = {'col1' : pd.Series([1., 2., 3., 3., 4.], index=[0, 1, 2, 3, 4]),
'col2' : pd.Series([1., 2., 2., 4., 4.], index=[0, 1, 2, 3, 4]),
'col3' : pd.Series([1., 2., 3., 4., 5.,])}
df_res = pd.DataFrame(res)
assert (((testing_df.fillna(0) == df_res.fillna(0)).all()).all()) == True
#fill a column that has no missing values
def test_fill_back2():
testing_df = df.copy()
clean.fillDown(testing_df, 2, 2, 'bfill')
assert (((testing_df.fillna(0) == df.fillna(0)).all()).all()) == True
#-------------------------(invalid values)----------------------------------
#test for index positions of invalid values in a dataframe
def test_invalid1():
testing_df = df.copy()
result = clean.invalidValuesInDataFrame(testing_df)
assert (result['col1']['hasInvalidValues'] == True)
assert (result['col1']['invalidIndices'] == [2])
assert (result['col2']['hasInvalidValues'] == True)
assert (result['col2']['invalidIndices'] == [3])
assert (result['col3']['hasInvalidValues'] == False)
#test for missing/invalid values in a dataframe without any
def test_invalid2():
testing_df = df.copy()
result =clean.invalidValuesInDataFrame(testing_df.dropna())
assert (result['col1']['hasInvalidValues'] == False)
assert (result['col2']['hasInvalidValues'] == False)
assert (result['col3']['hasInvalidValues'] == False)
#test for multiple missing values in a single column
def test_invalid3():
testing_df = df.copy()
testing_df['col4'] = pd.Series([1., 2], index = [0, 4])
result = clean.invalidValuesInDataFrame(testing_df)
assert (result['col4']['hasInvalidValues'] == True)
assert (result['col4']['invalidIndices'] == [1, 2, 3])
#---------------------------(interpolation)--------------------------------
#fill a single column of dataframe
def test_interpolate_polynomial1():
testing_df = df.copy()
result_df = df.copy()
clean.fillByInterpolation(testing_df, 0, 'polynomial', 2)
result_df['col1'].interpolate(method='polynomial', order=2, inplace=True)
assert (((testing_df.fillna(0) == result_df.fillna(0)).all()).all()) == True
#fill a column that does not exist
def test_intrpolate_polynomial2():
testing_df = df.copy()
with pytest.raises(Exception):
clean.fillByInterpolation(testing_df, 4, 'polynomial', 2)
#fill a single column of a dataframe
def test_interpolate_spline1():
testing_df = df.copy()
result_df = df.copy()
clean.fillByInterpolation(testing_df, 1, 'spline', 1)
result_df['col2'].interpolate(method='spline', order=1, inplace=True)
assert (((testing_df.fillna(0) == result_df.fillna(0)).all()).all()) == True
#fill a single column of a dataframe
def test_interpolate_linear1():
testing_df = df.copy()
result_df = df.copy()
clean.fillByInterpolation(testing_df, 1, 'linear', 6)
result_df['col2'].interpolate(method='linear', inplace=True)
assert (((testing_df.fillna(0) == result_df.fillna(0)).all()).all()) == True
#fill a single column of a dataframe
def test_interpolate_pchip1():
testing_df = df.copy()
result_df = df.copy()
clean.fillByInterpolation(testing_df, 0, 'PCHIP', 4)
result_df['col1'].interpolate(method='pchip',inplace=True)
assert (((testing_df.fillna(0) == result_df.fillna(0)).all()).all()) == True
#apply to a column that has no missing values
def test_interpolate_all1():
testing_df = df.copy()
clean.fillByInterpolation(testing_df, 2, 'linear', 4)
assert (((testing_df.fillna(0) == df.fillna(0)).all()).all()) == True
#-----------------------------(custom value)-------------------------------------
#fill missing values in a single column with a custom value
def test_custom_value1():
testing_df = df.copy()
result_df = df.copy()
clean.fillWithCustomValue(testing_df, 1, 'testValue')
result_df['col2'].fillna(value='testValue', inplace=True)
assert (((testing_df.fillna(0) == result_df.fillna(0)).all()).all()) == True
#select a column that does not exist
def test_custom_value2():
testing_df = df.copy()
with pytest.raises(Exception):
clean.fillWithCustomValue(testing_df, 6, 'testValue')
#-----------------------------(fill with average)----------------------------------
#fill missng values in a single column with mean value
def test_average_mean1():
testing_df = df.copy()
result_df = df.copy()
clean.fillWithAverage(testing_df, 1, 'mean')
average = result_df['col2'].mean()
result_df['col2'].fillna(value=average, inplace=True)
assert (((testing_df.fillna(0) == result_df.fillna(0)).all()).all()) == True
#fill missng values in a single column with median value
def test_average_median1():
testing_df = df.copy()
result_df = df.copy()
clean.fillWithAverage(testing_df, 1, 'median')
average = result_df['col2'].median()
result_df['col2'].fillna(value=average, inplace=True)
assert (((testing_df.fillna(0) == result_df.fillna(0)).all()).all()) == True
#fill missng values in a single column with mode value
def test_average_mode1():
testing_df = df.copy()
result_df = df.copy()
clean.fillWithAverage(testing_df, 1, 'mode')
average = 2
result_df['col2'].fillna(value=average, inplace=True)
assert (((testing_df.fillna(0) == result_df.fillna(0)).all()).all()) == True
#fill missng values in a single column with mode when there are multiple modes
def test_average_mode2():
testing_df = df.copy()
testing_df['col4'] = pd.Series([2., 2., 3., 3.], index=[0, 1, 4, 5])
result_df = testing_df.copy()
clean.fillWithAverage(testing_df, 3, 'mode')
average = 2
result_df['col4'].fillna(value=average, inplace=True)
assert (((testing_df.fillna(0) == result_df.fillna(0)).all()).all()) == True
#select a column that is already full
def test_average_all2():
testing_df = df.copy()
clean.fillWithAverage(testing_df, 2, 'mean')
assert (((testing_df.fillna(0) == df.fillna(0)).all()).all()) == True
#select a column that does not exist
def test_average_all3():
testing_df = df.copy()
with pytest.raises(Exception):
clean.fillWithAverage(testing_df, 10, 'mean')
#---------------------------------(normalize)-----------------------------------------
#test normalistaion on a single column using a positive range
def test_normalize1():
testing_df = df.copy()
result_df = df.copy()
clean.normalize(testing_df, 1, 0 ,18)
result_df['col2'] = 0 + ((result_df['col2'] - result_df['col2'].min()) * (18 - 0)) / (result_df['col2'].max() - result_df['col2'].min())
assert (((testing_df.fillna(0) == result_df.fillna(0)).all()).all()) == True
#test normalistaion on a single column using a negative range
def test_normalize2():
testing_df = df.copy()
result_df = df.copy()
clean.normalize(testing_df, 1, -3 , -18)
result_df['col2'] = -3 + ((result_df['col2'] - result_df['col2'].min()) * (-18 - (-3))) / (result_df['col2'].max() - result_df['col2'].min())
assert (((testing_df.fillna(0) == result_df.fillna(0)).all()).all()) == True
#nomalize a column that has a range = 0
def test_normalize3():
testing_df = df.copy()
testing_df['col4'] = pd.Series([1, 1, 1], index = [0, 1, 2])
result_df = testing_df.copy()
clean.normalize(testing_df, 3, 0 , 18)
assert (((testing_df.fillna(0) == result_df.fillna(0)).all()).all()) == True
#nomalize a column that does not exist
def test_normalize4():
testing_df = df.copy()
with pytest.raises(Exception):
clean.normalize(testing_df, 20, 0 , 18)
#-----------------------------------(standardize)---------------------------------------
#test standardisation on a single column
def test_standardize1():
testing_df = df.copy()
result_df = df.copy()
clean.standardize(testing_df, 0)
result_df['col1'] = (result_df['col1'] - result_df['col1'].mean()) / result_df['col1'].std()
assert (((testing_df.fillna(0) == result_df.fillna(0)).all()).all()) == True
#test on a column index that does not exist
def test_standaardize2():
testing_df = df.copy()
with pytest.raises(Exception):
clean.standardize(testing_df, 6,)
#standardize a column that has a standard deviation = 0
def test_standardize3():
testing_df = df.copy()
testing_df['col4'] = pd.Series([1, 1, 1], index = [0, 1, 2])
result_df = testing_df.copy()
clean.standardize(testing_df, 3)
assert (((testing_df.fillna(0) == result_df.fillna(0)).all()).all()) == True
#-------------------------------(delete rows with na)----------------------------------
#test deleting all rows in a single column that have NA values
def test_delete_na1():
testing_df = df.copy()
result_df = df.copy()
clean.deleteRowsWithNA(testing_df, 0)
result_df.dropna(subset=['col1'], inplace=True)
result_df.reset_index(drop=True, inplace=True)
assert (((testing_df.fillna(0) == result_df.fillna(0)).all()).all()) == True
#test deleting rows that have na on a column with no NA values
def test_delete_na2():
testing_df = df.copy()
result_df = df.copy()
clean.deleteRowsWithNA(testing_df, 2)
assert (((testing_df.fillna(0) == result_df.fillna(0)).all()).all()) == True
#select a column that does not exist
def test_delete_na3():
testing_df = df.copy()
with pytest.raises(Exception):
clean.deleteRowsWithNA(testing_df, 16)
#----------------------------------(find and replace)-------------------------------
#test replacing a single type of value in a single column (Numeric)
def test_find_replace1():
testing_df = df.copy()
result_df = df.copy()
clean.findReplace(result_df, 0, [2], [8], False)
testing_df['col1'].replace(to_replace=2, value=8, regex=False, inplace=True)
result_df = result_df.astype(float)
assert (((testing_df.fillna(0) == result_df.fillna(0)).all()).all()) == True
#test replacing multiple values in a single column (Numeric)
def test_find_replace2():
testing_df = df.copy()
result_df = df.copy()
clean.findReplace(result_df, 0, [2, 3], [8, 10], False)
testing_df['col1'].replace(to_replace=2, value=8, regex=False, inplace=True)
testing_df['col1'].replace(to_replace=3, value=10, regex=False, inplace=True)
result_df = result_df.astype(float)
assert (((testing_df.fillna(0) == result_df.fillna(0)).all()).all()) == True
#test replacing a value that dose not exist in a column (Numeric)
def test_find_replace3():
testing_df = df.copy()
result_df = df.copy()
clean.findReplace(result_df, 0, [16], [32], False)
result_df = result_df.astype(float)
assert (((testing_df.fillna(0) == result_df.fillna(0)).all()).all()) == True
#test replacing a single type of value in a column (String)
def test_find_replace4():
testing_df = df.copy()
testing_df['col4'] = pd.Series(['hello', 'world', 'wild'], index = [0, 1, 2])
result_df = testing_df.copy()
clean.findReplace(result_df, 0, ['hello'], ['bello'], False)
testing_df['col1'].replace(to_replace='hello', value='bello', regex=False, inplace=True)
assert (((testing_df.fillna(0) == result_df.fillna(0)).all()).all()) == True
#test replacing a multiple values in a column (String)
def test_find_replace5():
testing_df = df.copy()
testing_df['col4'] = pd.Series(['hello', 'world', 'wild'], index = [0, 1, 2])
result_df = testing_df.copy()
clean.findReplace(result_df, 0, ['hello', 'wild', 'world'], ['bello', 'bello', 'bello'], False)
testing_df['col1'].replace(to_replace='hello', value='bello', regex=False, inplace=True)
testing_df['col1'].replace(to_replace='wild', value='bello', regex=False, inplace=True)
testing_df['col1'].replace(to_replace='world', value='bello', regex=False, inplace=True)
assert (((testing_df.fillna(0) == result_df.fillna(0)).all()).all()) == True
#test replacing all number values using regex in a column (Numeric)
def test_find_replace6():
testing_df = df.copy()
result_df = df.copy()
clean.findReplace(result_df, 0, ["[0-9]+\.?[0-9]*"], [32], True)
testing_df['col1'].replace(to_replace='[0-9]+\.?[0-9]*', value='32', regex=True, inplace=True)
result_df = result_df.astype(float)
assert (((testing_df.fillna(0) == result_df.fillna(0)).all()).all()) == True
#test replacing all string values using regex in a column (String)
def test_find_replace7():
testing_df = df.copy()
testing_df['col4'] = pd.Series(['hello', 'world', 'wild'], index = [0, 1, 2])
result_df = testing_df.copy()
clean.findReplace(result_df, 3, [".*"], ['hello'], True)
testing_df['col4'].replace(to_replace='.*', value='hello', regex=True, inplace=True)
assert (((testing_df.fillna(0) == result_df.fillna(0)).all()).all()) == True
#select a column that does not exist
def test_find_replace8():
testing_df = df.copy()
with pytest.raises(Exception):
clean.findReplace(testing_df, 30, [".*"], ['hello'], True)
#------------------------------------(Geneate Dummies)----------------------------------------
#test genearting dummies on a column with NA values
def test_generate_dummies1():
testing_df = df.copy()
result_df = df.copy()
clean.generateDummies(testing_df, 0 , False)
result_df.insert(1, 'col1_1.0', [1,0,0,0,0], allow_duplicates=True)
result_df.insert(2, 'col1_2.0', [0,1,0,0,0], allow_duplicates=True)
result_df.insert(3, 'col1_3.0', [0,0,0,1,0], allow_duplicates=True)
result_df.insert(4, 'col1_4.0', [0,0,0,0,1], allow_duplicates=True)
assert (((testing_df.fillna(0) == result_df.fillna(0)).all()).all()) == True
#test genearting dummies on a column with a single repeated value which is a string
def test_generate_dummies2():
testing_df = df.copy()
testing_df['col4'] = pd.Series(['hello'], index = [2])
result_df = testing_df.copy()
clean.generateDummies(testing_df, 3 , False)
result_df.insert(4, 'col4_hello', [0,0,1,0,0], allow_duplicates=True)
assert (((testing_df.fillna(0) == result_df.fillna(0)).all()).all()) == True
#select a column that does not exist
def test_generate_dummmies3():
testing_df = df.copy()
with pytest.raises(Exception):
clean.generateDummies(testing_df, 30 , False)
#----------------------------------(insert Duplicate Column)----------------------------------
#test adding a duplicate column to a dataframe
def test_duplicate_column1():
testing_df = df.copy()
result_df = df.copy()
clean.insertDuplicateColumn(testing_df, 0)
result_df.insert(1, 'col1_copy', result_df['col1'], allow_duplicates=True)
assert (((testing_df.fillna(0) == result_df.fillna(0)).all()).all()) == True
#test adding a duplicate column using a column index that does not exist
def test_duplicate_column2():
testing_df = df.copy()
with pytest.raises(Exception):
clean.insertDuplicateColumn(testing_df, 6)
#----------------------------------------(Split Column)---------------------------------------
#split a column using '.' as a delimeter
def test_split_delimeter():
testing_df = df.copy()
testing_df['col4'] = pd.Series(['hello.world', 'world.hello', 'p.4q', 'r', 's'])
result_df = testing_df.copy()
clean.splitColumn(testing_df, 3, '.', regex=False)
result_df['col4_0'] = pd.Series(['hello', 'world', 'p','r', 's'])
result_df['col4_1'] = pd.Series(['world', 'hello', '4q'])
assert (((testing_df.fillna(0) == result_df.fillna(0)).all()).all()) == True
#split a column using a delimetr that is not present in a column
def test_split_delimeter2():
testing_df = df.copy()
testing_df['col4'] = pd.Series(['hello.world', 'world.hello', 'p.4q', 'r', 's'])
result_df = testing_df.copy()
clean.splitColumn(testing_df, 3, '-', regex=False)
result_df['col4_0'] = result_df['col4']
assert (((testing_df.fillna(0) == result_df.fillna(0)).all()).all()) == True
#split a column using regex as delimeter
def test_split_delimeter3():
testing_df = df.copy()
testing_df['col4'] = pd.Series(['hello.world', 'world.hello', 'p.4q', 'r', 's'])
result_df = testing_df.copy()
clean.splitColumn(testing_df, 3, '\.', regex=True)
result_df['col4_0'] = pd.Series(['hello', 'world', 'p','r', 's'])
result_df['col4_1'] = pd.Series(['world', 'hello', '4q'])
assert (((testing_df.fillna(0) == result_df.fillna(0)).all()).all()) == True
#test splitting a column that dose not exist
def test_split_delimeter4():
testing_df = df.copy()
with pytest.raises(Exception):
clean.splitColumn(testing_df, 7, '.')
#--------------------------------------(Combine Columns)---------------------------------------
#test combining two columns that contain some NA values
def test_combine1():
testing_df = df.copy()
result_df = df.copy()
clean.combineColumns(testing_df, ['col1', 'col2'], seperator=' ', newName="combined")
result_df.insert(0, 'combined', ['1.0 1.0', '2.0 2.0', '2.0', '3.0', '4.0 4.0'], allow_duplicates=True)
assert (((testing_df.fillna(0) == result_df.fillna(0)).all()).all()) == True
#test combining less than two columns
def test_combine2():
testing_df = df.copy()
result_df = df.copy()
with pytest.raises(Exception):
clean.combineColumns(testing_df, ['col1'], seperator=' ', newName="combined")
#test combining a column that exists with one that does not
def test_combine3():
testing_df = df.copy()
result_df = df.copy()
with pytest.raises(Exception):
clean.combineColumns(testing_df, ['col1', 'col16'], seperator=' ', newName="combined")
| apache-2.0 |
crichardson17/starburst_atlas | Low_resolution_sims/DustFree_LowRes/Padova_cont/padova_cont_4/IR.py | 33 | 7344 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith(".grd"):
inputfile = file
for file in os.listdir('.'):
if file.endswith(".txt"):
inputfile2 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
numplots = 12
def add_sub_plot(sub_num):
plt.subplot(3,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 9:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 12:
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid = [];
with open(inputfile, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid.append(row);
grid = asarray(grid)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#for grid
phi_values = grid[1:len(dataEmissionlines)+1,6]
hdens_values = grid[1:len(dataEmissionlines)+1,7]
#for lines
headers = headers[1:]
Emissionlines = dataEmissionlines[:, 1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(Emissionlines[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(Emissionlines),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
#change desired lines here!
line = [75, #AR 3 7135
76, #TOTL 7325
78, #AR 3 7751
79, #6LEV 8446
80, #CA2X 8498
81, #CA2Y 8542
82, #CA2Z 8662
83, #CA 2 8579A
84, #S 3 9069
85, #H 1 9229
86, #S 3 9532
87] #H 1 9546
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("IR Lines", fontsize=14)
# ---------------------------------------------------
for i in range(12):
add_sub_plot(i)
ax1 = plt.subplot(3,4,1)
add_patches(ax1)
print "complete"
plt.savefig('Near_IR.pdf')
plt.clf()
| gpl-2.0 |
NelisVerhoef/scikit-learn | sklearn/cross_validation.py | 13 | 66570 | """
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>,
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
check_array, column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
__all__ = ['KFold',
'LabelKFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'LabelShuffleSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LabelKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping labels.
The same label will not appear in two different folds (the number of
distinct labels has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct labels is approximately the same in each fold.
Parameters
----------
labels : array-like with shape (n_samples, )
Contains a label for each sample.
The folds are built so that the same label does not appear in two
different folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import LabelKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> labels = np.array([0, 0, 2, 2])
>>> label_kfold = LabelKFold(labels, n_folds=2)
>>> len(label_kfold)
2
>>> print(label_kfold)
sklearn.cross_validation.LabelKFold(n_labels=4, n_folds=2)
>>> for train_index, test_index in label_kfold:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def __init__(self, labels, n_folds=3, shuffle=False, random_state=None):
super(LabelKFold, self).__init__(len(labels), n_folds, shuffle,
random_state)
unique_labels, labels = np.unique(labels, return_inverse=True)
n_labels = len(unique_labels)
if n_folds > n_labels:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of labels: {1}.").format(n_folds,
n_labels))
# Weight labels by their number of occurences
n_samples_per_label = np.bincount(labels)
# Distribute the most frequent labels first
indices = np.argsort(n_samples_per_label)[::-1]
n_samples_per_label = n_samples_per_label[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(n_folds)
# Mapping from label index to fold index
label_to_fold = np.zeros(len(unique_labels))
# Distribute samples by adding the largest weight to the lightest fold
for label_index, weight in enumerate(n_samples_per_label):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
label_to_fold[indices[label_index]] = lightest_fold
self.idxs = label_to_fold[labels]
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
for i in range(self.n_folds):
yield (self.idxs == i)
def __repr__(self):
return '{0}.{1}(n_labels={2}, n_folds={3})'.format(
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
class LabelShuffleSplit(ShuffleSplit):
'''Shuffle-Labels-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided label. This label information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LabelShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique labels,
whereas LabelShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique labels.
For example, a less computationally intensive alternative to
``LeavePLabelOut(labels, p=10)`` would be
``LabelShuffleSplit(labels, test_size=10, n_iter=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to labels, and
not to samples, as in ShuffleSplit.
Parameters
----------
labels : array, [n_samples]
Labels of samples
n_iter : int (default 5)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the test split. If
int, represents the absolute number of test labels. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the train split. If
int, represents the absolute number of train labels. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
'''
def __init__(self, labels, n_iter=5, test_size=0.2, train_size=None,
random_state=None):
classes, label_indices = np.unique(labels, return_inverse=True)
super(LabelShuffleSplit, self).__init__(
len(classes),
n_iter=n_iter,
test_size=test_size,
train_size=train_size,
random_state=random_state)
self.labels = labels
self.classes = classes
self.label_indices = label_indices
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.labels,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _iter_indices(self):
for label_train, label_test in super(LabelShuffleSplit,
self)._iter_indices():
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(self.label_indices, label_train))
test = np.flatnonzero(np.in1d(self.label_indices, label_test))
yield train, test
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : integer or cross-validation generator, optional, default=3
A cross-validation generator to use. If int, determines the number
of folds in StratifiedKFold if estimator is a classifier and the
target y is binary or multiclass, or the number of folds in KFold
otherwise.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
This generator must include all elements in the test set exactly once.
Otherwise, a ValueError is raised.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
preds = [p for p, _ in preds_blocks]
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_locs = np.empty(len(locs), dtype=int)
inv_locs[locs] = np.arange(len(locs))
# Check for sparse predictions
if sp.issparse(preds[0]):
preds = sp.vstack(preds, format=preds[0].format)
else:
preds = np.concatenate(preds)
return preds[inv_locs]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional, default=3
A cross-validation generator to use. If int, determines the number
of folds in StratifiedKFold if estimator is a classifier and the
target y is binary or multiclass, or the number of folds in KFold
otherwise.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
class FitFailedWarning(RuntimeWarning):
pass
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, a cv generator instance, or None
The input specifying which cv generator to use. It can be an
integer, in which case it is the number of folds in a KFold,
None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional, default=3
A cross-validation generator to use. If int, determines the number
of folds in StratifiedKFold if estimator is a classifier and the
target y is binary or multiclass, or the number of folds in KFold
otherwise.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.",
DeprecationWarning)
allow_nd = options.pop('allow_nd', None)
allow_lists = options.pop('allow_lists', None)
stratify = options.pop('stratify', None)
if allow_lists is not None:
warnings.warn("The allow_lists option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if allow_nd is not None:
warnings.warn("The allow_nd option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if allow_lists is False or allow_nd is False:
arrays = [check_array(x, 'csr', allow_nd=allow_nd,
force_all_finite=False, ensure_2d=False)
if x is not None else x
for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
cv = StratifiedShuffleSplit(stratify, test_size=test_size,
train_size=train_size,
random_state=random_state)
else:
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| bsd-3-clause |
sinkpoint/dipy | doc/examples/simulate_multi_tensor.py | 16 | 2546 | """
======================
MultiTensor Simulation
======================
In this example we show how someone can simulate the signal and the ODF of a
single voxel using a MultiTensor.
"""
import numpy as np
from dipy.sims.voxel import (multi_tensor,
multi_tensor_odf,
single_tensor_odf,
all_tensor_evecs)
from dipy.data import get_sphere
"""
For the simulation we will need a GradientTable with the b-values and b-vectors
Here we use the one we created in :ref:`example_gradients_spheres`.
"""
from gradients_spheres import gtab
"""
In ``mevals`` we save the eigenvalues of each tensor.
"""
mevals = np.array([[0.0015, 0.0003, 0.0003],
[0.0015, 0.0003, 0.0003]])
"""
In ``angles`` we save in polar coordinates (:math:`\theta, \phi`) the principal
axis of each tensor.
"""
angles = [(0, 0), (60, 0)]
"""
In ``fractions`` we save the percentage of the contribution of each tensor.
"""
fractions = [50, 50]
"""
The function ``multi_tensor`` will return the simulated signal and an array
with the principal axes of the tensors in cartesian coordinates.
"""
signal, sticks = multi_tensor(gtab, mevals, S0=100, angles=angles,
fractions=fractions, snr=None)
"""
We can also add rician noise with a specific SNR.
"""
signal_noisy, sticks = multi_tensor(gtab, mevals, S0=100, angles=angles,
fractions=fractions, snr=20)
import matplotlib.pyplot as plt
plt.plot(signal, label='noiseless')
plt.plot(signal_noisy, label='with noise')
plt.legend()
plt.show()
plt.savefig('simulated_signal.png')
"""
.. figure:: simulated_signal.png
:align: center
**Simulated MultiTensor signal**
"""
"""
For the ODF simulation we will need a sphere. Because we are interested in a
simulation of only a single voxel, we can use a sphere with very high
resolution. We generate that by subdividing the triangles of one of Dipy's
cached spheres, which we can read in the following way.
"""
sphere = get_sphere('symmetric724')
sphere = sphere.subdivide(2)
odf = multi_tensor_odf(sphere.vertices, mevals, angles, fractions)
from dipy.viz import fvtk
ren = fvtk.ren()
odf_actor = fvtk.sphere_funcs(odf, sphere)
odf_actor.RotateX(90)
fvtk.add(ren, odf_actor)
print('Saving illustration as multi_tensor_simulation')
fvtk.record(ren, out_path='multi_tensor_simulation.png', size=(300, 300))
"""
.. figure:: multi_tensor_simulation.png
:align: center
**Simulating a MultiTensor ODF**
"""
| bsd-3-clause |
pkaspa/trading-with-python | historicDataDownloader/historicDataDownloader.py | 77 | 4526 | '''
Created on 4 aug. 2012
Copyright: Jev Kuznetsov
License: BSD
a module for downloading historic data from IB
'''
import ib
import pandas
from ib.ext.Contract import Contract
from ib.opt import ibConnection, message
from time import sleep
import tradingWithPython.lib.logger as logger
from pandas import DataFrame, Index
import datetime as dt
from timeKeeper import TimeKeeper
import time
timeFormat = "%Y%m%d %H:%M:%S"
class DataHandler(object):
''' handles incoming messages '''
def __init__(self,tws):
self._log = logger.getLogger('DH')
tws.register(self.msgHandler,message.HistoricalData)
self.reset()
def reset(self):
self._log.debug('Resetting data')
self.dataReady = False
self._timestamp = []
self._data = {'open':[],'high':[],'low':[],'close':[],'volume':[],'count':[],'WAP':[]}
def msgHandler(self,msg):
#print '[msg]', msg
if msg.date[:8] == 'finished':
self._log.debug('Data recieved')
self.dataReady = True
return
self._timestamp.append(dt.datetime.strptime(msg.date,timeFormat))
for k in self._data.keys():
self._data[k].append(getattr(msg, k))
@property
def data(self):
''' return downloaded data as a DataFrame '''
df = DataFrame(data=self._data,index=Index(self._timestamp))
return df
class Downloader(object):
def __init__(self,debug=False):
self._log = logger.getLogger('DLD')
self._log.debug('Initializing data dwonloader. Pandas version={0}, ibpy version:{1}'.format(pandas.__version__,ib.version))
self.tws = ibConnection()
self._dataHandler = DataHandler(self.tws)
if debug:
self.tws.registerAll(self._debugHandler)
self.tws.unregister(self._debugHandler,message.HistoricalData)
self._log.debug('Connecting to tws')
self.tws.connect()
self._timeKeeper = TimeKeeper() # keep track of past requests
self._reqId = 1 # current request id
def _debugHandler(self,msg):
print '[debug]', msg
def requestData(self,contract,endDateTime,durationStr='1800 S',barSizeSetting='1 secs',whatToShow='TRADES',useRTH=1,formatDate=1):
self._log.debug('Requesting data for %s end time %s.' % (contract.m_symbol,endDateTime))
while self._timeKeeper.nrRequests(timeSpan=600) > 59:
print 'Too many requests done. Waiting... '
time.sleep(1)
self._timeKeeper.addRequest()
self._dataHandler.reset()
self.tws.reqHistoricalData(self._reqId,contract,endDateTime,durationStr,barSizeSetting,whatToShow,useRTH,formatDate)
self._reqId+=1
#wait for data
startTime = time.time()
timeout = 3
while not self._dataHandler.dataReady and (time.time()-startTime < timeout):
sleep(2)
if not self._dataHandler.dataReady:
self._log.error('Data timeout')
print self._dataHandler.data
return self._dataHandler.data
def getIntradayData(self,contract, dateTuple ):
''' get full day data on 1-s interval
date: a tuple of (yyyy,mm,dd)
'''
openTime = dt.datetime(*dateTuple)+dt.timedelta(hours=16)
closeTime = dt.datetime(*dateTuple)+dt.timedelta(hours=22)
timeRange = pandas.date_range(openTime,closeTime,freq='30min')
datasets = []
for t in timeRange:
datasets.append(self.requestData(contract,t.strftime(timeFormat)))
return pandas.concat(datasets)
def disconnect(self):
self.tws.disconnect()
if __name__=='__main__':
dl = Downloader(debug=True)
c = Contract()
c.m_symbol = 'SPY'
c.m_secType = 'STK'
c.m_exchange = 'SMART'
c.m_currency = 'USD'
df = dl.getIntradayData(c, (2012,8,6))
df.to_csv('test.csv')
# df = dl.requestData(c, '20120803 22:00:00')
# df.to_csv('test1.csv')
# df = dl.requestData(c, '20120803 21:30:00')
# df.to_csv('test2.csv')
dl.disconnect()
print 'Done.' | bsd-3-clause |
rbalda/neural_ocr | env/lib/python2.7/site-packages/scipy/integrate/odepack.py | 62 | 9420 | # Author: Travis Oliphant
from __future__ import division, print_function, absolute_import
__all__ = ['odeint']
from . import _odepack
from copy import copy
import warnings
class ODEintWarning(Warning):
pass
_msgs = {2: "Integration successful.",
1: "Nothing was done; the integration time was 0.",
-1: "Excess work done on this call (perhaps wrong Dfun type).",
-2: "Excess accuracy requested (tolerances too small).",
-3: "Illegal input detected (internal error).",
-4: "Repeated error test failures (internal error).",
-5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).",
-6: "Error weight became zero during problem.",
-7: "Internal workspace insufficient to finish (internal error)."
}
def odeint(func, y0, t, args=(), Dfun=None, col_deriv=0, full_output=0,
ml=None, mu=None, rtol=None, atol=None, tcrit=None, h0=0.0,
hmax=0.0, hmin=0.0, ixpr=0, mxstep=0, mxhnil=0, mxordn=12,
mxords=5, printmessg=0):
"""
Integrate a system of ordinary differential equations.
Solve a system of ordinary differential equations using lsoda from the
FORTRAN library odepack.
Solves the initial value problem for stiff or non-stiff systems
of first order ode-s::
dy/dt = func(y, t0, ...)
where y can be a vector.
*Note*: The first two arguments of ``func(y, t0, ...)`` are in the
opposite order of the arguments in the system definition function used
by the `scipy.integrate.ode` class.
Parameters
----------
func : callable(y, t0, ...)
Computes the derivative of y at t0.
y0 : array
Initial condition on y (can be a vector).
t : array
A sequence of time points for which to solve for y. The initial
value point should be the first element of this sequence.
args : tuple, optional
Extra arguments to pass to function.
Dfun : callable(y, t0, ...)
Gradient (Jacobian) of `func`.
col_deriv : bool, optional
True if `Dfun` defines derivatives down columns (faster),
otherwise `Dfun` should define derivatives across rows.
full_output : bool, optional
True if to return a dictionary of optional outputs as the second output
printmessg : bool, optional
Whether to print the convergence message
Returns
-------
y : array, shape (len(t), len(y0))
Array containing the value of y for each desired time in t,
with the initial value `y0` in the first row.
infodict : dict, only returned if full_output == True
Dictionary containing additional output information
======= ============================================================
key meaning
======= ============================================================
'hu' vector of step sizes successfully used for each time step.
'tcur' vector with the value of t reached for each time step.
(will always be at least as large as the input times).
'tolsf' vector of tolerance scale factors, greater than 1.0,
computed when a request for too much accuracy was detected.
'tsw' value of t at the time of the last method switch
(given for each time step)
'nst' cumulative number of time steps
'nfe' cumulative number of function evaluations for each time step
'nje' cumulative number of jacobian evaluations for each time step
'nqu' a vector of method orders for each successful step.
'imxer' index of the component of largest magnitude in the
weighted local error vector (e / ewt) on an error return, -1
otherwise.
'lenrw' the length of the double work array required.
'leniw' the length of integer work array required.
'mused' a vector of method indicators for each successful time step:
1: adams (nonstiff), 2: bdf (stiff)
======= ============================================================
Other Parameters
----------------
ml, mu : int, optional
If either of these are not None or non-negative, then the
Jacobian is assumed to be banded. These give the number of
lower and upper non-zero diagonals in this banded matrix.
For the banded case, `Dfun` should return a matrix whose
rows contain the non-zero bands (starting with the lowest diagonal).
Thus, the return matrix `jac` from `Dfun` should have shape
``(ml + mu + 1, len(y0))`` when ``ml >=0`` or ``mu >=0``.
The data in `jac` must be stored such that ``jac[i - j + mu, j]``
holds the derivative of the `i`th equation with respect to the `j`th
state variable. If `col_deriv` is True, the transpose of this
`jac` must be returned.
rtol, atol : float, optional
The input parameters `rtol` and `atol` determine the error
control performed by the solver. The solver will control the
vector, e, of estimated local errors in y, according to an
inequality of the form ``max-norm of (e / ewt) <= 1``,
where ewt is a vector of positive error weights computed as
``ewt = rtol * abs(y) + atol``.
rtol and atol can be either vectors the same length as y or scalars.
Defaults to 1.49012e-8.
tcrit : ndarray, optional
Vector of critical points (e.g. singularities) where integration
care should be taken.
h0 : float, (0: solver-determined), optional
The step size to be attempted on the first step.
hmax : float, (0: solver-determined), optional
The maximum absolute step size allowed.
hmin : float, (0: solver-determined), optional
The minimum absolute step size allowed.
ixpr : bool, optional
Whether to generate extra printing at method switches.
mxstep : int, (0: solver-determined), optional
Maximum number of (internally defined) steps allowed for each
integration point in t.
mxhnil : int, (0: solver-determined), optional
Maximum number of messages printed.
mxordn : int, (0: solver-determined), optional
Maximum order to be allowed for the non-stiff (Adams) method.
mxords : int, (0: solver-determined), optional
Maximum order to be allowed for the stiff (BDF) method.
See Also
--------
ode : a more object-oriented integrator based on VODE.
quad : for finding the area under a curve.
Examples
--------
The second order differential equation for the angle `theta` of a
pendulum acted on by gravity with friction can be written::
theta''(t) + b*theta'(t) + c*sin(theta(t)) = 0
where `b` and `c` are positive constants, and a prime (') denotes a
derivative. To solve this equation with `odeint`, we must first convert
it to a system of first order equations. By defining the angular
velocity ``omega(t) = theta'(t)``, we obtain the system::
theta'(t) = omega(t)
omega'(t) = -b*omega(t) - c*sin(theta(t))
Let `y` be the vector [`theta`, `omega`]. We implement this system
in python as:
>>> def pend(y, t, b, c):
... theta, omega = y
... dydt = [omega, -b*omega - c*np.sin(theta)]
... return dydt
...
We assume the constants are `b` = 0.25 and `c` = 5.0:
>>> b = 0.25
>>> c = 5.0
For initial conditions, we assume the pendulum is nearly vertical
with `theta(0)` = `pi` - 0.1, and it initially at rest, so
`omega(0)` = 0. Then the vector of initial conditions is
>>> y0 = [np.pi - 0.1, 0.0]
We generate a solution 101 evenly spaced samples in the interval
0 <= `t` <= 10. So our array of times is:
>>> t = np.linspace(0, 10, 101)
Call `odeint` to generate the solution. To pass the parameters
`b` and `c` to `pend`, we give them to `odeint` using the `args`
argument.
>>> from scipy.integrate import odeint
>>> sol = odeint(pend, y0, t, args=(b, c))
The solution is an array with shape (101, 2). The first column
is `theta(t)`, and the second is `omega(t)`. The following code
plots both components.
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, sol[:, 0], 'b', label='theta(t)')
>>> plt.plot(t, sol[:, 1], 'g', label='omega(t)')
>>> plt.legend(loc='best')
>>> plt.xlabel('t')
>>> plt.grid()
>>> plt.show()
"""
if ml is None:
ml = -1 # changed to zero inside function call
if mu is None:
mu = -1 # changed to zero inside function call
t = copy(t)
y0 = copy(y0)
output = _odepack.odeint(func, y0, t, args, Dfun, col_deriv, ml, mu,
full_output, rtol, atol, tcrit, h0, hmax, hmin,
ixpr, mxstep, mxhnil, mxordn, mxords)
if output[-1] < 0:
warning_msg = _msgs[output[-1]] + " Run with full_output = 1 to get quantitative information."
warnings.warn(warning_msg, ODEintWarning)
elif printmessg:
warning_msg = _msgs[output[-1]]
warnings.warn(warning_msg, ODEintWarning)
if full_output:
output[1]['message'] = _msgs[output[-1]]
output = output[:-1]
if len(output) == 1:
return output[0]
else:
return output
| mit |
hetland/xray | xray/core/formatting.py | 4 | 9115 | from datetime import datetime, timedelta
import functools
import numpy as np
import pandas as pd
from .options import OPTIONS
from .pycompat import (OrderedDict, iteritems, itervalues, unicode_type,
bytes_type, dask_array_type)
def pretty_print(x, numchars):
"""Given an object `x`, call `str(x)` and format the returned string so
that it is numchars long, padding with trailing spaces or truncating with
ellipses as necessary
"""
s = str(x)
if len(s) > numchars:
return s[:(numchars - 3)] + '...'
else:
return s + ' ' * (numchars - len(s))
def wrap_indent(text, start='', length=None):
if length is None:
length = len(start)
indent = '\n' + ' ' * length
return start + indent.join(x for x in text.splitlines())
def _get_indexer_at_least_n_items(shape, n_desired):
assert 0 < n_desired <= np.prod(shape)
cum_items = np.cumprod(shape[::-1])
n_steps = np.argmax(cum_items >= n_desired)
stop = int(np.ceil(float(n_desired) / np.r_[1, cum_items][n_steps]))
indexer = ((0,) * (len(shape) - 1 - n_steps) + (slice(stop),)
+ (slice(None),) * n_steps)
return indexer
def first_n_items(x, n_desired):
"""Returns the first n_desired items of an array"""
# Unfortunately, we can't just do x.flat[:n_desired] here because x might
# not be a numpy.ndarray. Moreover, access to elements of x could be very
# expensive (e.g. if it's only available over DAP), so go out of our way to
# get them in a single call to __getitem__ using only slices.
if n_desired < 1:
raise ValueError('must request at least one item')
if x.size == 0:
# work around for https://github.com/numpy/numpy/issues/5195
return []
if n_desired < x.size:
indexer = _get_indexer_at_least_n_items(x.shape, n_desired)
x = x[indexer]
return np.asarray(x).flat[:n_desired]
def format_timestamp(t):
"""Cast given object to a Timestamp and return a nicely formatted string"""
datetime_str = str(pd.Timestamp(t))
try:
date_str, time_str = datetime_str.split()
except ValueError:
# catch NaT and others that don't split nicely
return datetime_str
else:
if time_str == '00:00:00':
return date_str
else:
return '%sT%s' % (date_str, time_str)
def format_timedelta(t, timedelta_format=None):
"""Cast given object to a Timestamp and return a nicely formatted string"""
timedelta_str = str(pd.Timedelta(t))
try:
days_str, time_str = timedelta_str.split(' days ')
except ValueError:
# catch NaT and others that don't split nicely
return timedelta_str
else:
if timedelta_format == 'date':
return days_str + ' days'
elif timedelta_format == 'time':
return time_str
else:
return timedelta_str
def format_item(x, timedelta_format=None):
"""Returns a succinct summary of an object as a string"""
if isinstance(x, (np.datetime64, datetime)):
return format_timestamp(x)
if isinstance(x, (np.timedelta64, timedelta)):
return format_timedelta(x, timedelta_format=timedelta_format)
elif isinstance(x, (unicode_type, bytes_type)):
return repr(x)
elif isinstance(x, (float, np.float)):
return '{0:.4}'.format(x)
else:
return str(x)
def format_items(x):
"""Returns a succinct summaries of all items in a sequence as strings"""
x = np.asarray(x)
timedelta_format = 'datetime'
if np.issubdtype(x.dtype, np.timedelta64):
x = np.asarray(x, dtype='timedelta64[ns]')
day_part = (x[~pd.isnull(x)]
.astype('timedelta64[D]')
.astype('timedelta64[ns]'))
time_needed = x != day_part
day_needed = day_part != np.timedelta64(0, 'ns')
if np.logical_not(day_needed).all():
timedelta_format = 'time'
elif np.logical_not(time_needed).all():
timedelta_format = 'date'
formatted = [format_item(xi, timedelta_format) for xi in x]
return formatted
def format_array_flat(items_ndarray, max_width):
"""Return a formatted string for as many items in the flattened version of
items_ndarray that will fit within max_width characters
"""
# every item will take up at least two characters, but we always want to
# print at least one item
max_possibly_relevant = max(int(np.ceil(max_width / 2.0)), 1)
relevant_items = first_n_items(items_ndarray, max_possibly_relevant)
pprint_items = format_items(relevant_items)
cum_len = np.cumsum([len(s) + 1 for s in pprint_items]) - 1
if (max_possibly_relevant < items_ndarray.size
or (cum_len > max_width).any()):
end_padding = ' ...'
count = max(np.argmax((cum_len + len(end_padding)) > max_width), 1)
pprint_items = pprint_items[:count]
else:
end_padding = ''
pprint_str = ' '.join(pprint_items) + end_padding
return pprint_str
def _summarize_var_or_coord(name, var, col_width, show_values=True,
marker=' ', max_width=None):
if max_width is None:
max_width = OPTIONS['display_width']
first_col = pretty_print(' %s %s ' % (marker, name), col_width)
dims_str = '(%s) ' % ', '.join(map(str, var.dims)) if var.dims else ''
front_str = first_col + dims_str + ('%s ' % var.dtype)
if show_values:
values_str = format_array_flat(var, max_width - len(front_str))
else:
values_str = '...'
return front_str + values_str
def _not_remote(var):
"""Helper function to identify if array is positively identifiable as
coming from a remote source.
"""
source = var.encoding.get('source')
if source and source.startswith('http') and not var._in_memory:
return False
return True
def summarize_var(name, var, col_width):
show_values = _not_remote(var)
return _summarize_var_or_coord(name, var, col_width, show_values)
def summarize_coord(name, var, col_width):
is_index = name in var.dims
show_values = is_index or _not_remote(var)
marker = '*' if is_index else ' '
return _summarize_var_or_coord(name, var, col_width, show_values, marker)
def _maybe_truncate(obj, maxlen=500):
s = str(obj)
if len(s) > maxlen:
s = s[:(maxlen - 3)] + '...'
return s
def summarize_attr(key, value, col_width=None):
# ignore col_width for now to more clearly distinguish attributes
return ' %s: %s' % (key, _maybe_truncate(value))
EMPTY_REPR = ' *empty*'
def _calculate_col_width(mapping):
max_name_length = max(len(str(k)) for k in mapping) if mapping else 0
col_width = max(max_name_length, 7) + 6
return col_width
def _mapping_repr(mapping, title, summarizer, col_width=None):
if col_width is None:
col_width = _calculate_col_width(mapping)
summary = ['%s:' % title]
if mapping:
summary += [summarizer(k, v, col_width) for k, v in mapping.items()]
else:
summary += [EMPTY_REPR]
return '\n'.join(summary)
coords_repr = functools.partial(_mapping_repr, title='Coordinates',
summarizer=summarize_coord)
vars_repr = functools.partial(_mapping_repr, title='Data variables',
summarizer=summarize_var)
attrs_repr = functools.partial(_mapping_repr, title='Attributes',
summarizer=summarize_attr)
def indexes_repr(indexes):
summary = []
for k, v in indexes.items():
summary.append(wrap_indent(repr(v), '%s: ' % k))
return '\n'.join(summary)
def array_repr(arr):
# used for DataArray, Variable and Coordinate
if hasattr(arr, 'name') and arr.name is not None:
name_str = '%r ' % arr.name
else:
name_str = ''
dim_summary = ', '.join('%s: %s' % (k, v) for k, v
in zip(arr.dims, arr.shape))
summary = ['<xray.%s %s(%s)>'% (type(arr).__name__, name_str, dim_summary)]
if isinstance(getattr(arr, 'variable', arr)._data, dask_array_type):
summary.append(repr(arr.data))
elif arr._in_memory or arr.size < 1e5:
summary.append(repr(arr.values))
else:
summary.append('[%s values with dtype=%s]' % (arr.size, arr.dtype))
if hasattr(arr, 'coords'):
if arr.coords:
summary.append(repr(arr.coords))
if arr.attrs:
summary.append(attrs_repr(arr.attrs))
return '\n'.join(summary)
def dataset_repr(ds):
summary = ['<xray.%s>' % type(ds).__name__]
col_width = _calculate_col_width(ds)
dims_start = pretty_print('Dimensions:', col_width)
all_dim_strings = ['%s: %s' % (k, v) for k, v in iteritems(ds.dims)]
summary.append('%s(%s)' % (dims_start, ', '.join(all_dim_strings)))
summary.append(coords_repr(ds.coords, col_width=col_width))
summary.append(vars_repr(ds.data_vars, col_width=col_width))
if ds.attrs:
summary.append(attrs_repr(ds.attrs))
return '\n'.join(summary)
| apache-2.0 |
shnizzedy/SM_openSMILE | openSMILE_analysis/barh.py | 1 | 11126 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
barh.py
Script to build horizontal bar charts to explore weighted feature rankings
produced by random forests in trees.py
Authors:
– Jon Clucas, 2017 (jon.clucas@childmind.org)
@author: jon.clucas
"""
import os, sys
if os.path.abspath('../../') not in sys.path:
if os.path.isdir(os.path.join(os.path.abspath('../..'), 'SM_openSMILE')):
sys.path.append(os.path.abspath('../..'))
elif os.path.isdir(os.path.join(os.path.abspath('..'), 'SM_openSMILE')):
sys.path.append(os.path.abspath('..'))
elif os.path.isdir('SM_openSMILE'):
sys.path.append(os.path.abspath('.'))
from SM_openSMILE.utilities.cmi_color_pallette import cmi_colors
import math, pandas as pd, matplotlib.pyplot as plt
from SM_openSMILE.cfg import topdir
def main():
configs = ['emobase', 'ComParE_2016']
replacements = ['removed', 'replaced_clone', 'replaced_pink', 'timeshifted'
]
for i, replacement in enumerate(replacements):
replacements[i] = '_'.join(['adults', replacement])
for config in configs:
dfs, ltd_dfs, adults_dfs, unmod = collect_dfs(config, replacements)
for dlist in [dfs, ltd_dfs, adults_dfs]:
dlist.append(unmod)
build_barh(get_features(dfs), config, replacements)
build_barh(get_features(ltd_dfs), config, replacements, 'ltd')
build_barh(get_features(adults_dfs), config, ['adults', 'unmodified'],
'adults')
def collect_dfs(config, replacements):
"""
Function to collect dataframes to build relevant plots.
Parameters
----------
config : string
openSMILE config file
replacements : list of strings
list of replacement methods
Returns
-------
dfs : list of pandas dataframes
list of cleaned dataframes
ltd_dfs : list of pandas dataframes
list of limited dataframes excluding unmodified files
adults_dfs : list of pandas dataframes
list of dataframes in which only adults speak
unmod : pandas dataframe
dataframe of unmodified files
"""
dfs, ltd_dfs, adults_dfs = [], [], []
for replacement in replacements:
dfs.append(get_df_from_file(get_filepath(config, replacement),
replacement))
for replacement in replacements:
ltd_dfs.append(get_df_from_file(get_filepath(config, replacement,
'ltd'), '/'.join(['ltd', replacement])))
unmod = get_df_from_file(get_filepath(config, 'unmodified','unmodified'),
'unmodified')
adults_dfs.append(get_df_from_file(get_filepath(config, 'adults','ltd'),
'/'.join(['ltd', 'adults'])))
return(dfs, ltd_dfs, adults_dfs, unmod)
def build_barh(df, config, replacements, special=None):
"""
Function to prepare a dataframe for plotting and to send that prepared
dataframe to the plot funcion (plot_barh) for plotting and saving in as
many forms as is appropriate.
Parameters
----------
df : pandas dataframe
dataframe to prepare for plotting
config : string
openSMILE config file
replacements : list
noise replacement methods
special : string or None
'ltd' or None
Returns
-------
None
"""
conditions = ['button_w', 'button_no', 'vocal_w', 'vocal_no']
# plot each condition with replacements as colors
for condition in conditions:
sdf = df.xs(condition, axis=1)
# get rid of non-predictive features
sdf = sdf[sdf > 0].dropna(how='all')
if special:
out_path = os.path.join(topdir, config, 'feature_summary', special,
'_'.join([condition, 'complete.svg']))
else:
out_path = os.path.join(topdir, config, 'feature_summary', ''.join(
[condition, '.svg']))
if not os.path.exists(os.path.dirname(out_path)):
os.makedirs(os.path.dirname(out_path))
title = " :\n".join(["weighted random forest values", config, condition
])
plot_barh(sdf, title, out_path)
# plot conditions in which all replacements returned values above the
# median
top_all = sdf[sdf > sdf.sum(axis=1).median()].dropna(how='any')
out_path_all = ''.join([out_path.strip('.svg'), '_top_all.svg'])
plot_barh(top_all, title, out_path_all)
# plot conditions in which any replacements returned values above the
# median
top_any = sdf[sdf > sdf.sum(axis=1).median()].dropna(how='all')
out_path_any = ''.join([out_path.strip('.svg'), '_top_any.svg'])
plot_barh(top_any, title, out_path_any)
if special:
for i, replacement in enumerate(replacements):
if replacement != 'unmodified':
replacements[i] = '/'.join(['ltd', replacement])
# plot each replacement with conditions as colors
for replacement in replacements:
sdf = df.xs(replacement, axis=1, level=1)
out_path = os.path.join(topdir, config, 'feature_summary', ''.join(
[replacement, '.svg']))
if not os.path.exists(os.path.dirname(out_path)):
os.makedirs(os.path.dirname(out_path))
title = " :\n".join(["weighted random forest values", config,
replacement])
plot_barh(sdf, title, out_path)
# plot conditions in which all conditions returned values above the
# median
top_all = sdf[sdf > sdf.sum(axis=1).median()].dropna(how='any')
out_path_all = ''.join([out_path.strip('.svg'), '_top_all.svg'])
plot_barh(top_all, title, out_path_all)
# plot conditions in which any conditions returned values above the
# median
top_any = sdf[sdf > sdf.sum(axis=1).median()].dropna(how='all')
out_path_any = ''.join([out_path.strip('.svg'), '_top_any.svg'])
plot_barh(top_any, title, out_path_any)
# plot each replacement and condition combination individually
for condition in conditions:
tdf = sdf.xs(condition, axis=1)
tdf = tdf[tdf > 0]
out_path = os.path.join(topdir, config, 'feature_summary',
''.join([replacement, '_', condition, '.svg']))
if not os.path.exists(os.path.dirname(out_path)):
os.makedirs(os.path.dirname(out_path))
title = " :\n".join(["weighted random forest values", config,
replacement, condition])
plot_barh(tdf, title, out_path)
# plot conditions in which replacement and condition returned
# values above the median
top_any = tdf[tdf > tdf.median()]
out_path_any = ''.join([out_path.strip('.svg'), '_top_any.svg'])
plot_barh(top_any, title, out_path_any)
def plot_barh(sdf, title, out_path):
"""
Function to plot a horizontal barplot and save said plot.
Parameters
----------
sdf : pandas dataframe
dataframe to plot
title : string
plot title
out_path : string
path of image file save location
Returns
-------
None
Output
------
out_path : image
image file
"""
print(title.replace("\n"," "))
print(sdf.shape)
plt.figure()
if sdf.shape[0] > 0:
if len(sdf.shape) == 2:
# color per condition and/or replacement method
color = cmi_colors()
# plot dimensions: f(maximum value) × f(# of features)
dim = (abs(sdf.sum(axis=0)).max()*25, math.log(sdf.shape[0])**3)
else:
# all bars one color
color = cmi_colors()[0]
# plot dimensions: f(maximum value) × f(# of features)
dim = (abs(sdf.max())*500, math.log(sdf.shape[0])**3)
ax = sdf.plot.barh(figsize=dim, color=color, stacked=True, title=title
)
ax.legend(loc=3, fancybox=True, shadow=True, bbox_to_anchor=(-0.01,
-0.01))
plt.savefig(out_path, bbox_inches="tight")
plt.close()
def get_features(dfs):
"""
Function to cross-tabulate feature dataframes and sum by features.
Parameters
----------
dfs : list of pandas dataframes
a list of dataframes to compare
values : list
a list of column headers to cross-tabulate by
Returns
-------
p_t : pandas dataframe
a multi-indexed dataframe of ['base_feature' × 'coefficient' ×
'summary_type'] × ['replacement' × 'condition'] predictive counts
"""
df = pd.concat(dfs)
conditions = ['button_w', 'button_no', 'vocal_w', 'vocal_no']
features = ['base_feature', 'coefficient', 'summary_type']
return pd.pivot_table(df, values=conditions, index=features,
columns=['replacement'], aggfunc='sum', fill_value=0)
def get_df_from_file(filepath, replacement="unmodified"):
"""
Function to get weighted summary table from filepath.
Parameters
----------
filepath : string
absolute path to the weighted dataframe csv
replacement : string
adult replacement method
Returns
-------
df : pandas dataframe
a features × ['base_feature', 'coefficient', 'summary_type',
'button_w', 'button_no', 'vocal_w', 'vocal_no'] pandas dataframe
"""
df = pd.read_csv(filepath)
df['base_feature'] = df['feature'].str.extract('(.*(?=_sma))', expand=True)
df['coefficient'] = df['feature'].str.extract('([\d+])', expand=True)
df['summary_type'] = df['feature'].str.extract('((?<=_).*?)*(?=\s)',
expand=True)
df['replacement'] = replacement
df = df.ix[:, ['base_feature', 'coefficient', 'summary_type', 'button_w',
'button_no', 'vocal_w', 'vocal_no', 'replacement']]
return df
def get_filepath(config, replacement, special=None):
"""
Function to get filepaths for weighted summary tables.
Parameters
----------
config : string
openSMILE config file basename
replacement : string
adult replacement method
special : string or None
['ltd', 'unmodified', None] if we're looking at something other than
the cleaned data with the unmodified data filled in
Returns
-------
filepath : string
absolute path to the weighted dataframe csv
"""
if special:
special_path = os.path.join(topdir, config, 'feature_summary', special,
'_'.join([replacement, 'weighted.csv']))
if os.path.exists(special_path):
return special_path
return os.path.join(topdir, config, 'feature_summary', '_'.join([
replacement, 'weighted.csv']))
# ============================================================================
if __name__ == '__main__':
main() | apache-2.0 |
SujitKRay/Listener_Classification_Public | modeling/model_refinement-extracted_features/code/fit_and_evaluate.py | 2 | 2309 | """
Use scikit-learn to fit/ evaluate models.
Code originally created by Benjamin Bengfort.
modified by Nicole Donnelly 20160513
"""
import os
import time
import pickle
import pandas as pd
from sklearn import metrics
from sklearn import cross_validation
from sklearn.cross_validation import KFold
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
def fit_and_evaluate(root, name, dataset, model, label, **kwargs):
"""
Because of the Scikit-Learn API, we can create a function to
do all of the fit and evaluate work on our behalf!
"""
start = time.time()
#create empty lists for scoring variables
scores = {'precision':[], 'recall':[], 'accuracy':[], 'f1':[]}
for train, test in KFold(dataset.data.shape[0], n_folds=12, shuffle=True):
X_train, X_test = dataset.data[train], dataset.data[test]
y_train, y_test = dataset.target[train], dataset.target[test]
estimator = model(**kwargs)
estimator.fit(X_train, y_train)
expected = y_test
predicted = estimator.predict(X_test)
scores = {'precision':[], 'recall':[], 'accuracy':[], 'f1':[]}
scores['precision'].append(metrics.precision_score(expected, predicted, average='binary'))
scores['recall'].append(metrics.recall_score(expected, predicted, average='binary'))
scores['accuracy'].append(metrics.accuracy_score(expected, predicted))
scores['f1'].append(metrics.f1_score(expected, predicted, average='binary'))
# Report
print "Build and Validation of {} took {:0.3f} seconds".format(label, time.time()-start)
print "Validation scores are as follows:\n"
print pd.DataFrame(scores).mean()
#print feature weights. these have been computed separately for tree models.
if label == "LogisticRegression":
print estimator.coef_
if label == "GaussianNB":
print estimator.class_prior_
# Write official estimator to disk
estimator = model(**kwargs)
estimator.fit(dataset.data, dataset.target)
outpath = os.path.join(root + "/", name + "_" + label.lower().replace(" ", "-") + ".pickle")
with open(outpath, 'w') as f:
pickle.dump(estimator, f)
print "\nFitted model written to:\n{}".format(os.path.abspath(outpath))
| mit |
smorton2/think-stats | code/populations.py | 68 | 2609 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import csv
import logging
import sys
import numpy as np
import pandas
import thinkplot
import thinkstats2
def ReadData(filename='PEP_2012_PEPANNRES_with_ann.csv'):
"""Reads filename and returns populations in thousands
filename: string
returns: pandas Series of populations in thousands
"""
df = pandas.read_csv(filename, header=None, skiprows=2,
encoding='iso-8859-1')
populations = df[7]
populations.replace(0, np.nan, inplace=True)
return populations.dropna()
def MakeFigures():
"""Plots the CDF of populations in several forms.
On a log-log scale the tail of the CCDF looks like a straight line,
which suggests a Pareto distribution, but that turns out to be misleading.
On a log-x scale the distribution has the characteristic sigmoid of
a lognormal distribution.
The normal probability plot of log(sizes) confirms that the data fit the
lognormal model very well.
Many phenomena that have been described with Pareto models can be described
as well, or better, with lognormal models.
"""
pops = ReadData()
print('Number of cities/towns', len(pops))
log_pops = np.log10(pops)
cdf = thinkstats2.Cdf(pops, label='data')
cdf_log = thinkstats2.Cdf(log_pops, label='data')
# pareto plot
xs, ys = thinkstats2.RenderParetoCdf(xmin=5000, alpha=1.4, low=0, high=1e7)
thinkplot.Plot(np.log10(xs), 1-ys, label='model', color='0.8')
thinkplot.Cdf(cdf_log, complement=True)
thinkplot.Config(xlabel='log10 population',
ylabel='CCDF',
yscale='log')
thinkplot.Save(root='populations_pareto')
# lognormal plot
thinkplot.PrePlot(cols=2)
mu, sigma = log_pops.mean(), log_pops.std()
xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=8)
thinkplot.Plot(xs, ps, label='model', color='0.8')
thinkplot.Cdf(cdf_log)
thinkplot.Config(xlabel='log10 population',
ylabel='CDF')
thinkplot.SubPlot(2)
thinkstats2.NormalProbabilityPlot(log_pops, label='data')
thinkplot.Config(xlabel='z',
ylabel='log10 population',
xlim=[-5, 5])
thinkplot.Save(root='populations_normal')
def main():
thinkstats2.RandomSeed(17)
MakeFigures()
if __name__ == "__main__":
main()
| gpl-3.0 |
aflaxman/scikit-learn | examples/mixture/plot_gmm_sin.py | 103 | 6101 | """
=================================
Gaussian Mixture Model Sine Curve
=================================
This example demonstrates the behavior of Gaussian mixture models fit on data
that was not sampled from a mixture of Gaussian random variables. The dataset
is formed by 100 points loosely spaced following a noisy sine curve. There is
therefore no ground truth value for the number of Gaussian components.
The first model is a classical Gaussian Mixture Model with 10 components fit
with the Expectation-Maximization algorithm.
The second model is a Bayesian Gaussian Mixture Model with a Dirichlet process
prior fit with variational inference. The low value of the concentration prior
makes the model favor a lower number of active components. This models
"decides" to focus its modeling power on the big picture of the structure of
the dataset: groups of points with alternating directions modeled by
non-diagonal covariance matrices. Those alternating directions roughly capture
the alternating nature of the original sine signal.
The third model is also a Bayesian Gaussian mixture model with a Dirichlet
process prior but this time the value of the concentration prior is higher
giving the model more liberty to model the fine-grained structure of the data.
The result is a mixture with a larger number of active components that is
similar to the first model where we arbitrarily decided to fix the number of
components to 10.
Which model is the best is a matter of subjective judgement: do we want to
favor models that only capture the big picture to summarize and explain most of
the structure of the data while ignoring the details or do we prefer models
that closely follow the high density regions of the signal?
The last two panels show how we can sample from the last two models. The
resulting samples distributions do not look exactly like the original data
distribution. The difference primarily stems from the approximation error we
made by using a model that assumes that the data was generated by a finite
number of Gaussian components instead of a continuous noisy sine curve.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
print(__doc__)
color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',
'darkorange'])
def plot_results(X, Y, means, covariances, index, title):
splot = plt.subplot(5, 1, 1 + index)
for i, (mean, covar, color) in enumerate(zip(
means, covariances, color_iter)):
v, w = linalg.eigh(covar)
v = 2. * np.sqrt(2.) * np.sqrt(v)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y == i):
continue
plt.scatter(X[Y == i, 0], X[Y == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180. * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-6., 4. * np.pi - 6.)
plt.ylim(-5., 5.)
plt.title(title)
plt.xticks(())
plt.yticks(())
def plot_samples(X, Y, n_components, index, title):
plt.subplot(5, 1, 4 + index)
for i, color in zip(range(n_components), color_iter):
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y == i):
continue
plt.scatter(X[Y == i, 0], X[Y == i, 1], .8, color=color)
plt.xlim(-6., 4. * np.pi - 6.)
plt.ylim(-5., 5.)
plt.title(title)
plt.xticks(())
plt.yticks(())
# Parameters
n_samples = 100
# Generate random sample following a sine curve
np.random.seed(0)
X = np.zeros((n_samples, 2))
step = 4. * np.pi / n_samples
for i in range(X.shape[0]):
x = i * step - 6.
X[i, 0] = x + np.random.normal(0, 0.1)
X[i, 1] = 3. * (np.sin(x) + np.random.normal(0, .2))
plt.figure(figsize=(10, 10))
plt.subplots_adjust(bottom=.04, top=0.95, hspace=.2, wspace=.05,
left=.03, right=.97)
# Fit a Gaussian mixture with EM using ten components
gmm = mixture.GaussianMixture(n_components=10, covariance_type='full',
max_iter=100).fit(X)
plot_results(X, gmm.predict(X), gmm.means_, gmm.covariances_, 0,
'Expectation-maximization')
dpgmm = mixture.BayesianGaussianMixture(
n_components=10, covariance_type='full', weight_concentration_prior=1e-2,
weight_concentration_prior_type='dirichlet_process',
mean_precision_prior=1e-2, covariance_prior=1e0 * np.eye(2),
init_params="random", max_iter=100, random_state=2).fit(X)
plot_results(X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 1,
"Bayesian Gaussian mixture models with a Dirichlet process prior "
r"for $\gamma_0=0.01$.")
X_s, y_s = dpgmm.sample(n_samples=2000)
plot_samples(X_s, y_s, dpgmm.n_components, 0,
"Gaussian mixture with a Dirichlet process prior "
r"for $\gamma_0=0.01$ sampled with $2000$ samples.")
dpgmm = mixture.BayesianGaussianMixture(
n_components=10, covariance_type='full', weight_concentration_prior=1e+2,
weight_concentration_prior_type='dirichlet_process',
mean_precision_prior=1e-2, covariance_prior=1e0 * np.eye(2),
init_params="kmeans", max_iter=100, random_state=2).fit(X)
plot_results(X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 2,
"Bayesian Gaussian mixture models with a Dirichlet process prior "
r"for $\gamma_0=100$")
X_s, y_s = dpgmm.sample(n_samples=2000)
plot_samples(X_s, y_s, dpgmm.n_components, 1,
"Gaussian mixture with a Dirichlet process prior "
r"for $\gamma_0=100$ sampled with $2000$ samples.")
plt.show()
| bsd-3-clause |
genn-team/genn | userproject/PotjansMicrocircuit_project/plot.py | 1 | 2257 | import csv
import matplotlib.pyplot as plt
import numpy as np
import re
import sys
from os import path
N_full = {
'23': {'E': 20683, 'I': 5834},
'4' : {'E': 21915, 'I': 5479},
'5' : {'E': 4850, 'I': 1065},
'6' : {'E': 14395, 'I': 2948}
}
assert len(sys.argv) >= 2
data_dir = sys.argv[1] + "_output"
N_scaling = float(sys.argv[2]) if len(sys.argv) >= 3 else 0.5
def load_spikes(filename):
# Parse filename and use to get population name and size
match = re.match(".*\.([0-9]+)([EI])\.st", filename)
name = match.group(1) + match.group(2)
num = int(N_full[match.group(1)][match.group(2)] * N_scaling)
# Read CSV spikes
spikes = np.loadtxt(filename, dtype={"names": ("time", "neuron_id"),
"formats": (np.float, np.int)})
return spikes["time"], spikes["neuron_id"], name, num
pop_spikes = [load_spikes(path.join(data_dir, sys.argv[1] + ".6I.st")),
load_spikes(path.join(data_dir, sys.argv[1] + ".6E.st")),
load_spikes(path.join(data_dir, sys.argv[1] + ".5I.st")),
load_spikes(path.join(data_dir, sys.argv[1] + ".5E.st")),
load_spikes(path.join(data_dir, sys.argv[1] + ".4I.st")),
load_spikes(path.join(data_dir, sys.argv[1] + ".4E.st")),
load_spikes(path.join(data_dir, sys.argv[1] + ".23I.st")),
load_spikes(path.join(data_dir, sys.argv[1] + ".23E.st"))]
# Find the maximum spike time and convert to seconds
duration_s = max(np.amax(t) for t, _, _, _ in pop_spikes) / 1000.0
# Create plot
figure, axes = plt.subplots(1, 2)
start_id = 0
bar_y = 0.0
for t, i, name, num in pop_spikes:
# Plot spikes
actor = axes[0].scatter(t, i + start_id, s=2, edgecolors="none")
# Plot bar showing rate in matching colour
axes[1].barh(bar_y, len(t) / (float(num) * duration_s), align="center", color=actor.get_facecolor(), ecolor="black")
# Update offset
start_id += num
# Update bar pos
bar_y += 1.0
axes[0].set_xlabel("Time [ms]")
axes[0].set_ylabel("Neuron number")
axes[1].set_xlabel("Mean firing rate [Hz]")
axes[1].set_yticks(np.arange(0.0, len(pop_spikes) * 1.0, 1.0))
axes[1].set_yticklabels(list(zip(*pop_spikes))[2])
# Show plot
plt.show()
| gpl-2.0 |
sgranade/python-image-processing-intro | overview/image_processing_sample_code.py | 1 | 20069 | # -*- coding: utf-8 -*-
"""
Examples from the "Image Processing with Python" presentation.
@author Stephen Granade <stephen@granades.com>
"""
from __future__ import division
from __future__ import print_function
#####
# IMAGE PROCESSING WITH SCIPY
# I'm often going to show images side-by-side, so here's a helper
# function to do that
def compare_images(imgs, title=None, subtitles=None, cmaps=None):
"""Plots multiple images side by side for comparison.
Args
----
imgs : sequence of ndarrays
The images to be plotted.
title : string
The overall plot's title, if any.
subtitles : sequence of strings
Titles for the sub-plots, if any.
cmaps : sequence of color maps
The color maps to use with the sub-plots, if any.
If None, then all sub-plots default to grey.
"""
fig, ax = plt.subplots(1, len(imgs))
if title:
plt.suptitle(title)
for ix, img in enumerate(imgs):
cmapstr = 'gray'
titlestr = None
try:
if cmaps:
cmapstr = cmaps[ix]
except:
pass
try:
if subtitles:
titlestr = subtitles[ix]
except:
pass
ax[ix].imshow(img, cmap=cmapstr)
ax[ix].set_axis_off()
if titlestr:
ax[ix].set_title(titlestr)
plt.tight_layout()
return fig, ax
#####
# SIMPLE IMAGE LOADING AND DISPLAYING
import numpy as np
from scipy import ndimage
from scipy import misc
import matplotlib.pyplot as plt
img = misc.imread('Schroedinger.jpg')
plt.imshow(img)
# Why's it all rainbow like that? Because SCIENCE. We can fix that, though
plt.close('all')
plt.imshow(img, cmap='gray')
# You can also specify a color map object to use instead of a string
# For matplotlib color maps, see http://matplotlib.org/examples/color/colormaps_reference.html
plt.close('all')
plt.imshow(img, cmap=plt.cm.gray)
# Save with imsave, which lets you auto-specify the file type by extension
misc.imsave('Schroedinger-2.png', img)
# The PPT has more information about the Python Imaging Libray (PIL) that
# scipy uses for image reading and saving.
# To set image-type-specific options, convert an ndarray image to a PIL
# image object and use its save() function
pil_img = misc.toimage(img) # Gets a PIL Image
pil_img.save('Schroedinger-3.jpg', quality=30)
# You can adjust the image's luminance contrast when you show it
plt.close('all')
plt.figure()
plt.subplot(1,2,1)
plt.imshow(img, cmap='gray')
plt.subplot(1,2,2)
plt.imshow(img, cmap='gray', vmin=30, vmax=150)
# What if you wanted to adjust it dynamically? A library called guiqwt can help
# See https://code.google.com/p/guiqwt/
import guidata
import guiqwt
import guiqwt.image
import guiqwt.plot
_app = guidata.qapplication() # Required to start up guidata
imageitem = guiqwt.builder.make.image(img, colormap='gray')
win = guiqwt.plot.ImageDialog(edit=False, toolbar=True, wintitle="Contrast",
options=dict(show_contrast=True))
plot = win.get_plot()
plot.add_item(imageitem)
win.show()
win.exec_()
# Click on the plotted image to use the contrast enhancement panel
#####
# CROPPING, SCALING, ROTATING, FLIPPING
# Crop using standard array slicing notation
imgy, imgx = img.shape
cropped_img = img[0:imgy //2, :]
plt.close('all')
plt.imshow(cropped_img, cmap='gray')
# Note that, with Matplotlib's default "jet" rainbow color map, you can see things
# like JPEG artifacts more easily
plt.close('all')
compare_images([cropped_img, cropped_img],
title="Check Out Those JPEG Artifacts",
cmaps=['gray', 'jet'])
# To scale, use the imresize() function from scipy.misc
resized_img = misc.imresize(img, 0.30)
plt.close('all')
compare_images([img, resized_img], title='Float-Resized Image',
subtitles=['Original', 'Resized'])
# imresize takes the size as a float (fraction), integer (percent), or
# tuple (final size)...or so it says. As far as I can tell, integer
# scaling is broken
resized_img = misc.imresize(img, 10)
# Tuples work, though
resized_img = misc.imresize(img, (img.shape[0] // 2, img.shape[1]))
plt.close('all')
compare_images([img, resized_img], title='Tuple-Resized Image')
# You can also define the interpolation method:
# interp='nearest' (Preserves hard images, so jaggy)
# interp='bilinear'
# interp='bicubic' (Good for smooth gradients)
# interp='cubic'
# To rotate, use the rotate() function from scipy.ndimage
rotated_img = ndimage.rotate(img, 30)
plt.close('all')
compare_images([img, rotated_img], title='Rotated Image')
# By default rotate will make the array big enough that nothing gets cut
# off. You can change that, of course
cropped_rotated_img = ndimage.rotate(img, 30, reshape=False)
plt.close('all')
fig, ax = compare_images([rotated_img, cropped_rotated_img],
title='Reshaped & Non-Reshaped Rotation',
subtitles=['Rotation w/Reshaping',
'Rotation w/o Reshaping'])
# Since the two graphs use different scales, re-scale the second, smaller
# plot to match the first
ax[1].set_xlim(ax[0].get_xlim())
ax[1].set_ylim(ax[0].get_ylim())
plt.draw()
# To flip, use the standard NumPy functions flipud() and fliprl()
flipped_img = np.flipud(img)
plt.close('all')
compare_images([img, flipped_img], title='Flipped Image')
#####
# FILTERING
# The presentation explains what filtering is, and why you might want to use a filter
# scipy.ndimage includes several common filters. For example, Gaussian
# filters, which soften and blur images, are in scipy.ndimage
blurred_img = ndimage.gaussian_filter(img, sigma=1)
plt.close('all')
compare_images([img, blurred_img], title='Blurred Image',
subtitles=['Original', "Gaussian Blurred $\sigma$=1"])
# The larger the Gaussian's sigma, the more it blurs
more_blurred_img = ndimage.gaussian_filter(img, sigma=3)
plt.close('all')
compare_images([img, blurred_img, more_blurred_img], title='Comparing Blurring',
subtitles=['Original', 'Gaussian $\sigma$=1',
'Gaussian $\sigma$=3'])
# What if you have a noisy image?
cropped_img = img[50:140, 90:180]
noisy_img = cropped_img + (cropped_img.std()*np.random.random(cropped_img.shape) -
(cropped_img.std()/2)*np.ones(cropped_img.shape))
plt.close('all')
compare_images([cropped_img, noisy_img], title="Noisy Image")
# You can use a Gaussian filter to de-noise the image
denoised_img = ndimage.gaussian_filter(noisy_img, sigma=1)
plt.close('all')
compare_images([cropped_img, noisy_img, denoised_img],
title="Gaussian Denoising",
subtitles=['Original', 'Noisy', 'Denoised'])
# Or you can use a median filter to better preserve edges
median_denoised_img = ndimage.median_filter(noisy_img, 3)
plt.close('all')
compare_images([noisy_img, denoised_img, median_denoised_img],
title="Gaussian vs Median Denoising",
subtitles=['Noisy', 'Gaussian', 'Median'])
#####
# READING IMAGES INTO DIFFERENT COLOR SPACES
# You can read in color images
color_img = ndimage.imread('Commodore-Grace-Hopper.jpg')
plt.close('all')
plt.imshow(color_img)
plt.title("Color Image")
print("The color image's dimensions are %s" % str(color_img.shape))
# You can read in a color image as greyscale
grey_img = ndimage.imread('Commodore-Grace-Hopper.jpg', flatten=True)
plt.close('all')
plt.imshow(grey_img, cmap='gray')
plt.title("Color Image Read In Greyscale")
print("The dimensions of the color image read in as greyscale are %s" %
str(grey_img.shape))
# By default, color images are read in using the RGB color space
# but you can change that
ycbcr_img = ndimage.imread('Commodore-Grace-Hopper.jpg', mode='YCbCr')
plt.close('all')
plt.imshow(ycbcr_img)
plt.title("Color Image Read In in YCbCr")
print("The YCbCr image's dimensions are %s" % str(ycbcr_img.shape))
# I'm not actually using these color maps, but I'm leaving them in as
# an example of how to make a gradient color map
import matplotlib.colors
cb_cmap = matplotlib.colors.LinearSegmentedColormap.from_list(
'cb_cmap', ['yellow', 'blue'])
cr_cmap = matplotlib.colors.LinearSegmentedColormap.from_list(
'cr_cmap', ['yellow', 'red'])
# Create RGB representations of each individual channel
ychannel = ycbcr_img.copy();
ychannel[:,:,1] = ychannel[:,:,0]
ychannel[:,:,2] = ychannel[:,:,0]
cbchannel = ycbcr_img.copy();
cbchannel[:,:,0] = 128
cbchannel[:,:,2] = 128
crchannel = ycbcr_img.copy();
crchannel[:,:,0] = 128
crchannel[:,:,1] = 128
plt.close('all')
compare_images([color_img, ychannel, cbchannel, crchannel],
title="YCbCr channels",
subtitles=['Color', 'Luminance (Y)', 'Chroma Blue (Cb)',
'Chroma Red (Cr)'])
#####
# IMAGE PROCESSING WITH SCIKIT-IMAGE
# The PPT lists a number of SciKits that may be of interest. Here we're going
# to work with scikit-image, or skimage.
import skimage
import skimage.color
import skimage.io
#####
# LOCAL FILTERING
# skimage has a lot of local filters available, like Sobel
import skimage.filter
img = misc.imread('Schroedinger.jpg')
schro_vsobel = skimage.filter.vsobel(img)
plt.close('all')
compare_images([img, schro_vsobel], title="Vertical Sobel Edge Detection",
cmaps=['gray', None])
print(schro_vsobel.dtype)
# For some of the processing skimage does, it converts images to floating
# point, scaled from [-1, 1]. *NOT* [0, 1]
# There are other edge-detecting local transforms as well
schro_vprewitt = skimage.filter.vprewitt(img)
schro_vscharr = skimage.filter.vscharr(img)
plt.close('all')
compare_images([schro_vsobel, schro_vprewitt, schro_vscharr],
title="Sobel, Prewitt, and Scharr Edge Detection",
cmaps=[None, None, None],
subtitles=['Sobel', 'Prewitt', 'Scharr'])
# Remember my noise reduction example earlier? skimage has better routines
cropped_img = img[50:140, 90:180]
noisy_img = cropped_img + (
(cropped_img.std()*np.random.random(cropped_img.shape) -
(cropped_img.std()/2)*np.ones(cropped_img.shape)))
median_denoised_img = ndimage.median_filter(noisy_img, 3)
total_var_denoised_img = skimage.filter.denoise_tv_chambolle(noisy_img,
weight=30)
plt.close('all')
compare_images([cropped_img, noisy_img, median_denoised_img,
total_var_denoised_img],
title="Denoised Image",
subtitles=['Original', 'Noisy', 'Median', 'Total Variation'])
# What if you want to create a binary image using a threshold?
sudoku = ndimage.imread('sudoku.jpg', flatten=True)
# We could do a simple global threshold using a blindly-chosen threshold
sudoku_global_thresh = sudoku >= 128
# or use a better method to find that threshold
otsu_thresh = skimage.filter.threshold_otsu(sudoku)
sudoku_otsu_thresh = sudoku >= otsu_thresh
# but skimage has an adaptive threshold function
sudoku_adaptive_thresh = skimage.filter.threshold_adaptive(sudoku,
block_size=91,
offset=2)
plt.close('all')
compare_images([sudoku, sudoku_global_thresh, sudoku_otsu_thresh,
sudoku_adaptive_thresh],
title="Global, Otsu's Method, and Adaptive Thresholding",
subtitles=['Original', 'Global Threshold', "Otsu's Method",
'Adaptive Threshold'])
#####
# ADJUSTING EXPOSURE
import skimage.exposure
# Using skimage, we can perform contrast enhancement automatically by
# equalizing the picture's histogram. The presentation has more information
# on histogram equalization
# Because of the flatten operation, "sudoku" is of type float
print("Sudoku is of type %s, with max value %f and min value of %f" %
(sudoku.dtype.name, np.max(sudoku), np.min(sudoku)))
# but it's not scaled from [-1, 1] like skimage wants. Fix that!
sudoku_scaled = (sudoku - 127.5)/256
sudoku_equalized = skimage.exposure.equalize_hist(sudoku_scaled)
plt.close('all')
compare_images([sudoku_scaled, sudoku_equalized], title="Equalizing Exposure",
subtitles=['Original', 'Equalized'])
#####
# MORPHOLOGICAL OPERATIONS
import skimage.morphology as mo
# To learn about morphological image processing like erosion, dilation,
# opening and closing, see
# https://www.cs.auckland.ac.nz/courses/compsci773s1c/lectures/ImageProcessing-html/topic4.htm
# Using mode='L' to read in greyscale prevents us from getting an array
# of floats back
squares = ndimage.imread('squares.png', mode='L')
# Erosion eats away at bright areas
squares_eroded = mo.erosion(squares, mo.square(3))
squares_diff = squares - squares_eroded
plt.close('all')
compare_images([squares, squares_eroded, squares_diff],
title="Morphological Erosion",
subtitles=['Original', 'Eroded', 'Difference'])
# Dilation expands bright areas
squares_dilated = mo.dilation(squares, mo.square(3))
squares_diff = squares_dilated - squares
plt.close('all')
compare_images([squares, squares_dilated, squares_diff],
title="Morphological Dilation",
subtitles=['Original', 'Dilated', 'Difference'])
# Opening erodes and then dilates, opening up dark gaps between features
squares_opened = mo.opening(squares, mo.square(3))
squares_diff = squares - squares_opened
plt.close('all')
compare_images([squares, squares_opened, squares_diff],
title="Morphological Opening",
subtitles=['Original', 'Opened', 'Difference'])
# Closing dilates and then erodes, filling in small dark gaps between features
squares_closed = mo.closing(squares, mo.square(3))
squares_diff = squares_closed - squares
plt.close('all')
compare_images([squares, squares_closed, squares_diff],
title="Morphological Closing",
subtitles=['Original', 'Closed', 'Difference'])
#####
# PARAMETRIC TRANSFORMATIONS
# Parametric transformations use matrices to describe translations,
# rotations, scaling, skew, and more. For more information, see
# http://www.willamette.edu/~gorr/classes/GeneralGraphics/Transforms/transforms2d.htm
# Early in the class, we rotated using scipy.ndimage.rotate and scaled
# using scipy.misc.imresize (see above)
# Alternatively: you can use the skikit-image routines, which have the
# advantage of being faster and also accepting transformation matrices
import skimage.transform as tf
img = misc.imread('Schroedinger.jpg')
# skimage.transform includes rotation
rotated_img = tf.rotate(img, 30)
uncropped_rotated_img = tf.rotate(img, 30, resize=True)
plt.close('all')
compare_images([img, rotated_img, uncropped_rotated_img],
title='Unresized & Resized scikit-image Rotation',
subtitles=['Original', 'Rotation w/o Resizing',
'Rotation w/Resizing'])
# Note that this is opposite of how scipy.ndimage.rotate() works: by
# default, the image isn't resized (or reshaped, to use ndimage.rotate()'s
# language
# skimage.transform also includes rescale()
rescaled_img = tf.rescale(img, scale=.30)
plt.close('all')
compare_images([img, rescaled_img], title='Float-Rescaled Image',
subtitles=['Original', 'Resized'])
# skimage.transform.rescale() will let you pass a tuple to scale it
# by different percentages in each direction
rescaled_img = tf.rescale(img, scale=(.3, .5))
plt.close('all')
compare_images([img, rescaled_img], title='Tuple-Rescaled Image')
# If you want to specify the final shape of it, use
# skimage.transform.resize()
resized_img = tf.resize(img, (img.shape[0] // 2, img.shape[1]))
plt.close('all')
compare_images([img, resized_img], title='Resized Image')
# You can define the interpolation method with the "order" parameter:
# order = 0 (nearest neighbor; preserves hard images, so jaggy)
# order = 1 (bilinear, the default)
# order = 2 (biquadratic)
# order = 3 (bicubic, good for smooth gradients)
# order = 4 (biquartic)
# order = 5 (biquintic)
# skimage.transform includes several transformations as classes
# The SimilarityTransform is for translation, rotation, and scale
shiftright = tf.SimilarityTransform(translation=(-20, 0))
plt.close('all')
compare_images([img, tf.warp(img, shiftright)],
title='Translation with scikit-image',
subtitles=['Original', 'Translated'])
rotccw = tf.SimilarityTransform(rotation=np.pi / 4)
plt.close('all')
compare_images([img, tf.warp(img, rotccw)],
title='Rotation with scikit-image',
subtitles=['Original', 'Rotated'])
upsize = tf.SimilarityTransform(scale=0.9)
plt.close('all')
compare_images([img, tf.warp(img, upsize)],
title='Scaling with scikit-image',
subtitles=['Original', 'Scaled'])
# AffineTransformation adds shearing, along with translation, rotation,
# and scale
skewhoriz = tf.AffineTransform(shear=np.pi/4)
skewvert = tf.AffineTransform(matrix=skewhoriz._matrix.T)
plt.close('all')
compare_images([img,
tf.warp(img, skewhoriz,
output_shape=(img.shape[0], img.shape[1] * 2)),
tf.warp(img, skewvert,
output_shape=(img.shape[0] * 2, img.shape[1]))],
title='Affine Skew with scikit-image',
subtitles=['Original', 'Skewed Horizontal',
'Skewed Vertical'])
#####
# LABELING REGIONS
# Let's generate some blobs to work with
points = np.zeros((256, 256))
num_pts = 20
point_array = (256*np.random.random((2, num_pts**2))).astype(np.int)
points[(point_array[0]), (point_array[1])] = 1
blurred_points = ndimage.gaussian_filter(points, sigma=256/(4.*num_pts))
blobs = blurred_points > np.mean(blurred_points)
plt.close('all')
compare_images([points, blurred_points, blobs], title='Generating Blobs',
subtitles=['Points', 'Blurred Points', 'Thresholded Blobs'])
# Label the connected regions
labels = skimage.morphology.label(blobs)
plt.close('all')
compare_images([blobs, labels], title="Blobs and Their Labels",
cmaps=['gray', 'jet'])
#####
# FEATURE MATCHING
import skimage.transform as tf
from skimage.feature import (match_descriptors, ORB, plot_matches)
schroedinger = misc.imread('Schroedinger.jpg')
# Transform the image using the skimage.transform library
# "rotate" does what you might expect
schroedinger_rotate = tf.rotate(schroedinger, 180)
# This sets up a transformation that changes the image's scale, rotates it,
# and moves it. "warp" then applies that transformation to the image
tform = tf.AffineTransform(scale=(1.3, 1.1), rotation=0.5,
translation=(0, -200))
schroedinger_warped = tf.warp(schroedinger, tform)
# ORB is an algorithm that detects good features in an image and then
# describes them in a compact way. The descriptions can then be matched
# across multiple images.
descriptor_extractor = ORB(n_keypoints=200)
# Apply the ORB algorithm to our images
descriptor_extractor.detect_and_extract(schroedinger)
keypoints1 = descriptor_extractor.keypoints
descriptors1 = descriptor_extractor.descriptors
descriptor_extractor.detect_and_extract(schroedinger_rotate)
keypoints2 = descriptor_extractor.keypoints
descriptors2 = descriptor_extractor.descriptors
descriptor_extractor.detect_and_extract(schroedinger_warped)
keypoints3 = descriptor_extractor.keypoints
descriptors3 = descriptor_extractor.descriptors
# See which descriptors match across the images
matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
matches13 = match_descriptors(descriptors1, descriptors3, cross_check=True)
fig, ax = plt.subplots(nrows=2, ncols=1)
plot_matches(ax[0], schroedinger, schroedinger_warped, keypoints1, keypoints2,
matches12)
ax[0].axis('off')
plot_matches(ax[1], schroedinger, schroedinger_warped, keypoints1, keypoints3,
matches13)
ax[1].axis('off')
plt.show()
plt.gray()
| unlicense |
mne-tools/mne-tools.github.io | stable/_downloads/bbc4594eea14cf3d0473ec5148e21b09/30_mne_dspm_loreta.py | 6 | 5666 | """
.. _tut-inverse-methods:
Source localization with MNE/dSPM/sLORETA/eLORETA
=================================================
The aim of this tutorial is to teach you how to compute and apply a linear
minimum-norm inverse method on evoked/raw/epochs data.
"""
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import make_inverse_operator, apply_inverse
###############################################################################
# Process MEG data
data_path = sample.data_path()
raw_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw.fif')
raw = mne.io.read_raw_fif(raw_fname) # already has an average reference
events = mne.find_events(raw, stim_channel='STI 014')
event_id = dict(aud_l=1) # event trigger and conditions
tmin = -0.2 # start of each epoch (200ms before the trigger)
tmax = 0.5 # end of each epoch (500ms after the trigger)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
baseline = (None, 0) # means from the first instant to t = 0
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=('meg', 'eog'), baseline=baseline, reject=reject)
###############################################################################
# Compute regularized noise covariance
# ------------------------------------
# For more details see :ref:`tut_compute_covariance`.
noise_cov = mne.compute_covariance(
epochs, tmax=0., method=['shrunk', 'empirical'], rank=None, verbose=True)
fig_cov, fig_spectra = mne.viz.plot_cov(noise_cov, raw.info)
###############################################################################
# Compute the evoked response
# ---------------------------
# Let's just use the MEG channels for simplicity.
evoked = epochs.average().pick('meg')
evoked.plot(time_unit='s')
evoked.plot_topomap(times=np.linspace(0.05, 0.15, 5), ch_type='mag',
time_unit='s')
###############################################################################
# It's also a good idea to look at whitened data:
evoked.plot_white(noise_cov, time_unit='s')
del epochs, raw # to save memory
###############################################################################
# Inverse modeling: MNE/dSPM on evoked and raw data
# -------------------------------------------------
# Here we first read the forward solution. You will likely need to compute
# one for your own data -- see :ref:`tut-forward` for information on how
# to do it.
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-oct-6-fwd.fif'
fwd = mne.read_forward_solution(fname_fwd)
###############################################################################
# Next, we make an MEG inverse operator.
inverse_operator = make_inverse_operator(
evoked.info, fwd, noise_cov, loose=0.2, depth=0.8)
del fwd
# You can write it to disk with::
#
# >>> from mne.minimum_norm import write_inverse_operator
# >>> write_inverse_operator('sample_audvis-meg-oct-6-inv.fif',
# inverse_operator)
###############################################################################
# Compute inverse solution
# ------------------------
# We can use this to compute the inverse solution and obtain source time
# courses:
method = "dSPM"
snr = 3.
lambda2 = 1. / snr ** 2
stc, residual = apply_inverse(evoked, inverse_operator, lambda2,
method=method, pick_ori=None,
return_residual=True, verbose=True)
###############################################################################
# Visualization
# -------------
# We can look at different dipole activations:
fig, ax = plt.subplots()
ax.plot(1e3 * stc.times, stc.data[::100, :].T)
ax.set(xlabel='time (ms)', ylabel='%s value' % method)
###############################################################################
# Examine the original data and the residual after fitting:
fig, axes = plt.subplots(2, 1)
evoked.plot(axes=axes)
for ax in axes:
ax.texts = []
for line in ax.lines:
line.set_color('#98df81')
residual.plot(axes=axes)
###############################################################################
# Here we use peak getter to move visualization to the time point of the peak
# and draw a marker at the maximum peak vertex.
# sphinx_gallery_thumbnail_number = 9
vertno_max, time_max = stc.get_peak(hemi='rh')
subjects_dir = data_path + '/subjects'
surfer_kwargs = dict(
hemi='rh', subjects_dir=subjects_dir,
clim=dict(kind='value', lims=[8, 12, 15]), views='lateral',
initial_time=time_max, time_unit='s', size=(800, 800), smoothing_steps=10)
brain = stc.plot(**surfer_kwargs)
brain.add_foci(vertno_max, coords_as_verts=True, hemi='rh', color='blue',
scale_factor=0.6, alpha=0.5)
brain.add_text(0.1, 0.9, 'dSPM (plus location of maximal activation)', 'title',
font_size=14)
# The documentation website's movie is generated with:
# brain.save_movie(..., tmin=0.05, tmax=0.15, interpolation='linear',
# time_dilation=20, framerate=10, time_viewer=True)
###############################################################################
# There are many other ways to visualize and work with source data, see
# for example:
#
# - :ref:`tut-viz-stcs`
# - :ref:`ex-morph-surface`
# - :ref:`ex-morph-volume`
# - :ref:`ex-vector-mne-solution`
# - :ref:`tut-dipole-orientations`
# - :ref:`tut-mne-fixed-free`
# - :ref:`examples using apply_inverse
# <sphx_glr_backreferences_mne.minimum_norm.apply_inverse>`.
| bsd-3-clause |
detrout/debian-statsmodels | statsmodels/sandbox/tsa/diffusion2.py | 38 | 13366 | """ Diffusion 2: jump diffusion, stochastic volatility, stochastic time
Created on Tue Dec 08 15:03:49 2009
Author: josef-pktd following Meucci
License: BSD
contains:
CIRSubordinatedBrownian
Heston
IG
JumpDiffusionKou
JumpDiffusionMerton
NIG
VG
References
----------
Attilio Meucci, Review of Discrete and Continuous Processes in Finance: Theory and Applications
Bloomberg Portfolio Research Paper No. 2009-02-CLASSROOM July 1, 2009
http://papers.ssrn.com/sol3/papers.cfm?abstract_id=1373102
this is currently mostly a translation from matlab of
http://www.mathworks.com/matlabcentral/fileexchange/23554-review-of-discrete-and-continuous-processes-in-finance
license BSD:
Copyright (c) 2008, Attilio Meucci
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
TODO:
* vectorize where possible
* which processes are exactly simulated by finite differences ?
* include or exclude (now) the initial observation ?
* convert to and merge with diffusion.py (part 1 of diffusions)
* which processes can be easily estimated ?
loglike or characteristic function ?
* tests ? check for possible index errors (random indices), graphs look ok
* adjust notation, variable names, more consistent, more pythonic
* delete a few unused lines, cleanup
* docstrings
random bug (showed up only once, need fuzz-testing to replicate)
File "...\diffusion2.py", line 375, in <module>
x = jd.simulate(mu,sigma,lambd,a,D,ts,nrepl)
File "...\diffusion2.py", line 129, in simulate
jumps_ts[n] = CumS[Events]
IndexError: index out of bounds
CumS is empty array, Events == -1
"""
import numpy as np
#from scipy import stats # currently only uses np.random
import matplotlib.pyplot as plt
class JumpDiffusionMerton(object):
'''
Example
-------
mu=.00 # deterministic drift
sig=.20 # Gaussian component
l=3.45 # Poisson process arrival rate
a=0 # drift of log-jump
D=.2 # st.dev of log-jump
X = JumpDiffusionMerton().simulate(mu,sig,lambd,a,D,ts,nrepl)
plt.figure()
plt.plot(X.T)
plt.title('Merton jump-diffusion')
'''
def __init__(self):
pass
def simulate(self, m,s,lambd,a,D,ts,nrepl):
T = ts[-1] # time points
# simulate number of jumps
n_jumps = np.random.poisson(lambd*T, size=(nrepl, 1))
jumps=[]
nobs=len(ts)
jumps=np.zeros((nrepl,nobs))
for j in range(nrepl):
# simulate jump arrival time
t = T*np.random.rand(n_jumps[j])#,1) #uniform
t = np.sort(t,0)
# simulate jump size
S = a + D*np.random.randn(n_jumps[j],1)
# put things together
CumS = np.cumsum(S)
jumps_ts = np.zeros(nobs)
for n in range(nobs):
Events = np.sum(t<=ts[n])-1
#print n, Events, CumS.shape, jumps_ts.shape
jumps_ts[n]=0
if Events > 0:
jumps_ts[n] = CumS[Events] #TODO: out of bounds see top
#jumps = np.column_stack((jumps, jumps_ts)) #maybe wrong transl
jumps[j,:] = jumps_ts
D_Diff = np.zeros((nrepl,nobs))
for k in range(nobs):
Dt=ts[k]
if k>1:
Dt=ts[k]-ts[k-1]
D_Diff[:,k]=m*Dt + s*np.sqrt(Dt)*np.random.randn(nrepl)
x = np.hstack((np.zeros((nrepl,1)),np.cumsum(D_Diff,1)+jumps))
return x
class JumpDiffusionKou(object):
def __init__(self):
pass
def simulate(self, m,s,lambd,p,e1,e2,ts,nrepl):
T=ts[-1]
# simulate number of jumps
N = np.random.poisson(lambd*T,size =(nrepl,1))
jumps=[]
nobs=len(ts)
jumps=np.zeros((nrepl,nobs))
for j in range(nrepl):
# simulate jump arrival time
t=T*np.random.rand(N[j])
t=np.sort(t)
# simulate jump size
ww = np.random.binomial(1, p, size=(N[j]))
S = ww * np.random.exponential(e1, size=(N[j])) - \
(1-ww) * np.random.exponential(e2, N[j])
# put things together
CumS = np.cumsum(S)
jumps_ts = np.zeros(nobs)
for n in range(nobs):
Events = sum(t<=ts[n])-1
jumps_ts[n]=0
if Events:
jumps_ts[n]=CumS[Events]
jumps[j,:] = jumps_ts
D_Diff = np.zeros((nrepl,nobs))
for k in range(nobs):
Dt=ts[k]
if k>1:
Dt=ts[k]-ts[k-1]
D_Diff[:,k]=m*Dt + s*np.sqrt(Dt)*np.random.normal(size=nrepl)
x = np.hstack((np.zeros((nrepl,1)),np.cumsum(D_Diff,1)+jumps))
return x
class VG(object):
'''variance gamma process
'''
def __init__(self):
pass
def simulate(self, m,s,kappa,ts,nrepl):
T=len(ts)
dXs = np.zeros((nrepl,T))
for t in range(T):
dt=ts[1]-0
if t>1:
dt = ts[t]-ts[t-1]
#print dt/kappa
#TODO: check parameterization of gamrnd, checked looks same as np
d_tau = kappa * np.random.gamma(dt/kappa,1.,size=(nrepl))
#print s*np.sqrt(d_tau)
# this raises exception:
#dX = stats.norm.rvs(m*d_tau,(s*np.sqrt(d_tau)))
# np.random.normal requires scale >0
dX = np.random.normal(loc=m*d_tau, scale=1e-6+s*np.sqrt(d_tau))
dXs[:,t] = dX
x = np.cumsum(dXs,1)
return x
class IG(object):
'''inverse-Gaussian ??? used by NIG
'''
def __init__(self):
pass
def simulate(self, l,m,nrepl):
N = np.random.randn(nrepl,1)
Y = N**2
X = m + (.5*m*m/l)*Y - (.5*m/l)*np.sqrt(4*m*l*Y+m*m*(Y**2))
U = np.random.rand(nrepl,1)
ind = U>m/(X+m)
X[ind] = m*m/X[ind]
return X.ravel()
class NIG(object):
'''normal-inverse-Gaussian
'''
def __init__(self):
pass
def simulate(self, th,k,s,ts,nrepl):
T = len(ts)
DXs = np.zeros((nrepl,T))
for t in range(T):
Dt=ts[1]-0
if t>1:
Dt=ts[t]-ts[t-1]
l = 1/k*(Dt**2)
m = Dt
DS = IG().simulate(l,m,nrepl)
N = np.random.randn(nrepl)
DX = s*N*np.sqrt(DS) + th*DS
#print DS.shape, DX.shape, DXs.shape
DXs[:,t] = DX
x = np.cumsum(DXs,1)
return x
class Heston(object):
'''Heston Stochastic Volatility
'''
def __init__(self):
pass
def simulate(self, m, kappa, eta,lambd,r, ts, nrepl,tratio=1.):
T = ts[-1]
nobs = len(ts)
dt = np.zeros(nobs) #/tratio
dt[0] = ts[0]-0
dt[1:] = np.diff(ts)
DXs = np.zeros((nrepl,nobs))
dB_1 = np.sqrt(dt) * np.random.randn(nrepl,nobs)
dB_2u = np.sqrt(dt) * np.random.randn(nrepl,nobs)
dB_2 = r*dB_1 + np.sqrt(1-r**2)*dB_2u
vt = eta*np.ones(nrepl)
v=[]
dXs = np.zeros((nrepl,nobs))
vts = np.zeros((nrepl,nobs))
for t in range(nobs):
dv = kappa*(eta-vt)*dt[t]+ lambd*np.sqrt(vt)*dB_2[:,t]
dX = m*dt[t] + np.sqrt(vt*dt[t]) * dB_1[:,t]
vt = vt + dv
vts[:,t] = vt
dXs[:,t] = dX
x = np.cumsum(dXs,1)
return x, vts
class CIRSubordinatedBrownian(object):
'''CIR subordinated Brownian Motion
'''
def __init__(self):
pass
def simulate(self, m, kappa, T_dot,lambd,sigma, ts, nrepl):
T = ts[-1]
nobs = len(ts)
dtarr = np.zeros(nobs) #/tratio
dtarr[0] = ts[0]-0
dtarr[1:] = np.diff(ts)
DXs = np.zeros((nrepl,nobs))
dB = np.sqrt(dtarr) * np.random.randn(nrepl,nobs)
yt = 1.
dXs = np.zeros((nrepl,nobs))
dtaus = np.zeros((nrepl,nobs))
y = np.zeros((nrepl,nobs))
for t in range(nobs):
dt = dtarr[t]
dy = kappa*(T_dot-yt)*dt + lambd*np.sqrt(yt)*dB[:,t]
yt = np.maximum(yt+dy,1e-10) # keep away from zero ?
dtau = np.maximum(yt*dt, 1e-6)
dX = np.random.normal(loc=m*dtau, scale=sigma*np.sqrt(dtau))
y[:,t] = yt
dtaus[:,t] = dtau
dXs[:,t] = dX
tau = np.cumsum(dtaus,1)
x = np.cumsum(dXs,1)
return x, tau, y
def schout2contank(a,b,d):
th = d*b/np.sqrt(a**2-b**2)
k = 1/(d*np.sqrt(a**2-b**2))
s = np.sqrt(d/np.sqrt(a**2-b**2))
return th,k,s
if __name__ == '__main__':
#Merton Jump Diffusion
#^^^^^^^^^^^^^^^^^^^^^
# grid of time values at which the process is evaluated
#("0" will be added, too)
nobs = 252.#1000 #252.
ts = np.linspace(1./nobs, 1., nobs)
nrepl=5 # number of simulations
mu=.010 # deterministic drift
sigma = .020 # Gaussian component
lambd = 3.45 *10 # Poisson process arrival rate
a=0 # drift of log-jump
D=.2 # st.dev of log-jump
jd = JumpDiffusionMerton()
x = jd.simulate(mu,sigma,lambd,a,D,ts,nrepl)
plt.figure()
plt.plot(x.T) #Todo
plt.title('Merton jump-diffusion')
sigma = 0.2
lambd = 3.45
x = jd.simulate(mu,sigma,lambd,a,D,ts,nrepl)
plt.figure()
plt.plot(x.T) #Todo
plt.title('Merton jump-diffusion')
#Kou jump diffusion
#^^^^^^^^^^^^^^^^^^
mu=.0 # deterministic drift
lambd=4.25 # Poisson process arrival rate
p=.5 # prob. of up-jump
e1=.2 # parameter of up-jump
e2=.3 # parameter of down-jump
sig=.2 # Gaussian component
x = JumpDiffusionKou().simulate(mu,sig,lambd,p,e1,e2,ts,nrepl)
plt.figure()
plt.plot(x.T) #Todo
plt.title('double exponential (Kou jump diffusion)')
#variance-gamma
#^^^^^^^^^^^^^^
mu = .1 # deterministic drift in subordinated Brownian motion
kappa = 1. #10. #1 # inverse for gamma shape parameter
sig = 0.5 #.2 # s.dev in subordinated Brownian motion
x = VG().simulate(mu,sig,kappa,ts,nrepl)
plt.figure()
plt.plot(x.T) #Todo
plt.title('variance gamma')
#normal-inverse-Gaussian
#^^^^^^^^^^^^^^^^^^^^^^^
# (Schoutens notation)
al = 2.1
be = 0
de = 1
# convert parameters to Cont-Tankov notation
th,k,s = schout2contank(al,be,de)
x = NIG().simulate(th,k,s,ts,nrepl)
plt.figure()
plt.plot(x.T) #Todo x-axis
plt.title('normal-inverse-Gaussian')
#Heston Stochastic Volatility
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^
m=.0
kappa = .6 # 2*Kappa*Eta>Lambda^2
eta = .3**2
lambd =.25
r = -.7
T = 20.
nobs = 252.*T#1000 #252.
tsh = np.linspace(T/nobs, T, nobs)
x, vts = Heston().simulate(m,kappa, eta,lambd,r, tsh, nrepl, tratio=20.)
plt.figure()
plt.plot(x.T)
plt.title('Heston Stochastic Volatility')
plt.figure()
plt.plot(np.sqrt(vts).T)
plt.title('Heston Stochastic Volatility - CIR Vol.')
plt.figure()
plt.subplot(2,1,1)
plt.plot(x[0])
plt.title('Heston Stochastic Volatility process')
plt.subplot(2,1,2)
plt.plot(np.sqrt(vts[0]))
plt.title('CIR Volatility')
#CIR subordinated Brownian
#^^^^^^^^^^^^^^^^^^^^^^^^^
m=.1
sigma=.4
kappa=.6 # 2*Kappa*T_dot>Lambda^2
T_dot=1
lambd=1
#T=252*10
#dt=1/252
#nrepl=2
T = 10.
nobs = 252.*T#1000 #252.
tsh = np.linspace(T/nobs, T, nobs)
x, tau, y = CIRSubordinatedBrownian().simulate(m, kappa, T_dot,lambd,sigma, tsh, nrepl)
plt.figure()
plt.plot(tsh, x.T)
plt.title('CIRSubordinatedBrownian process')
plt.figure()
plt.plot(tsh, y.T)
plt.title('CIRSubordinatedBrownian - CIR')
plt.figure()
plt.plot(tsh, tau.T)
plt.title('CIRSubordinatedBrownian - stochastic time ')
plt.figure()
plt.subplot(2,1,1)
plt.plot(tsh, x[0])
plt.title('CIRSubordinatedBrownian process')
plt.subplot(2,1,2)
plt.plot(tsh, y[0], label='CIR')
plt.plot(tsh, tau[0], label='stoch. time')
plt.legend(loc='upper left')
plt.title('CIRSubordinatedBrownian')
#plt.show()
| bsd-3-clause |
sheabrown/faraday_complexity | final/plots.py | 1 | 15250 | # Use inception class to access these
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from keras.utils import plot_model
from sklearn.metrics import confusion_matrix, f1_score, roc_curve
from keras.models import load_model
class plots:
"""
Class for making plots for the inception model.
Functions
_plotCNN
_plotF1
_plotParamProb
_plotROC
"""
def _plotCNN(self, to_file='graph.png'):
plot_model(self.model_, to_file=to_file)
def _plotROC(self, data='test', save=False, to_file='roc.pdf', fontsize=20):
"""
Function for plotting the ROC curve.
To call:
"""
try:
self.fpr_
self.tpr_
except:
self._getROC(data)
plt.figure(1)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.plot(self.fpr_, self.tpr_)
plt.xlabel(r'$\rm FPR$', fontsize=fontsize)
plt.ylabel(r'$\rm TPR$', fontsize=fontsize)
plt.tight_layout()
if save:
plt.savefig(to_file)
plt.close('all')
else:
plt.show()
def _plotF1(self, step=0.025, save=False, to_file='f1_score.pdf', fontsize=20):
"""
Function for plotting the F1 score as a function
of the threshold probability.
To call:
_plotF1(step, save=False, to_file, fontsize=20)
Parameters:
step stepsize to take (0.5 to 1.0)
save (boolean) save image
to_file file to save image to
fontsize fontsize of axis labels
"""
try:
self.threshold_
self.F1_
except:
self._getF1(step)
plt.figure(1)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.plot(self.threshold_, self.F1_)
plt.xlabel(r'$p_\mathrm{cutoff}$', fontsize=fontsize)
plt.ylabel(r'$F_{1} \, \mathrm{score}$', fontsize=fontsize)
plt.tight_layout()
if save:
plt.savefig(to_file)
plt.close('all')
else:
plt.show()
def _plotParamProb(self, param, kind='kde', gridsize=50, save=False, to_file="FluxProb.pdf", fontscale=1.25):
"""
Function for plotting a parameter of the second
component against its probability of being
complex, as measured by the model.
To call:
_plotFluxProb(param, kind, gridsize, save, imfile, fontscale)
Parameters:
param column name in self.dfComplex_
kind seaborn jointplot params: "kde", "hex", etc.
gridsize smoothing parameter
save (boolean) save image
imfile filepath to save image
fontscale axes label scaling
"""
try:
self.dfComplex_
except:
self._getComplexParams()
# ===================================================
# Dictionary for x-axis label
# ===================================================
label = {
"flux": r'$F_{2}$',
"depth": r'$\Delta \phi$',
"chi": r'$\Delta \chi$',
"sig": r'$\sigma_\mathrm{noise}$'
}
# ===================================================
# 1) Retrieve the flux of the second component
# 2) Retrieve the model's probability that the
# source is complex
# ===================================================
valu = pd.Series(self.dfComplex_[param], name=label[param])
prob = pd.Series(self.dfComplex_["prob"], name=r'$p_\mathrm{complex}$')
# ===================================================
# Create the plot
# ===================================================
sns.set(font_scale=fontscale)
sns.jointplot(valu, prob, kind=kind, gridsize=gridsize)
# ===================================================
# Save or display the image
# ===================================================
if save:
plt.savefig(to_file)
plt.close('all')
else:
plt.show()
def _plotBinaryParamProb(self, param, save=False, to_file='param_binary.pdf', fontsize=20,
s=10, alpha=0.05, cComplex='darkorange', cSimple='dodgerblue'):
plt.figure()
plt.scatter(self.dfSimple_[param], self.dfSimple_['prob'], color=cSimple, alpha=alpha, s=s)
plt.scatter(self.dfComplex_[param], self.dfComplex_['prob'], color=cComplex, alpha=alpha, s=s)
plt.xlabel(r'$\sigma$', fontsize=fontsize)
plt.ylabel(r'$p_\mathrm{complex}$', fontsize=fontsize)
if save:
plt.savefig(to_file)
plt.close('all')
else:
plt.show()
def _plotLoss(self, logfile=None, save=False, to_file='loss_vs_epoch.pdf', fontsize=20):
# ===================================================
# Load in the logfile or test to see if a
# logfile has already been loaded
# ===================================================
if logfile == None:
try:
self.dfLog_
except:
print('Please pass in the name of a logfile')
sys.exit(1)
else:
try:
self._loadLog(logfile)
except:
print('Failed to load logfile')
sys.exit(1)
# -------------- Initialize the Graph ---------
fig = plt.figure()
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.xlabel(r'$\rm Epoch$', fontsize=fontsize)
plt.ylabel(r'$\rm Loss$', fontsize=fontsize)
plt.plot(self.dfLog_.index, self.dfLog_['loss'], label='Training Loss')
plt.plot(self.dfLog_.index, self.dfLog_['val_loss'], label='Validation Loss')
plt.legend(loc='best', fontsize=15)
if save:
plt.savefig(to_file)
plt.close()
else:
plt.show()
plt.close()
def _plotAcc(self, logfile=None, save=False, to_file='acc_vs_epoch.pdf', fontsize=20):
"""
Function for plotting the accuracy as a function of epoch.
To call:
_plotAcc(logfile, save, imfile)
Parameters:
"""
# ===================================================
# Load in the logfile or test to see if a
# logfile has already been loaded
# ===================================================
if logfile == None:
try:
self.dfLog_
except:
print('Please pass in the name of a logfile')
sys.exit(1)
else:
try:
self._loadLog(logfile)
except:
print('Failed to load logfile')
sys.exit(1)
# ===================================================
# Plot accuracy vs epoch
# ===================================================
fig = plt.figure()
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.plot(self.dfLog_.index, self.dfLog_['binary_accuracy'], label='Training Binary Accuracy')
plt.plot(self.dfLog_.index, self.dfLog_['val_binary_accuracy'], label='Validation Binary Accuracy')
plt.xlabel('Epoch', fontsize=fontsize)
plt.ylabel('Binary Accuracy ', fontsize=fontsize)
plt.legend(loc='best', fontsize=15)
if save:
plt.savefig(to_file)
plt.close()
else:
plt.show()
plt.close()
'''
def _loadData(self, directory):
"""
Function for loading data arrays from a directory.
To call:
_loadModel(directory)
Parameters:
directory
"""
self.X_data = np.load(directory+'X_data.npy')
self.Y_data = np.load(directory+'label.npy')
#------ creation params --------
self.chi_data = np.load(directory+'chi.npy')
self.depth_data = np.load(directory+'depth.npy')
self.flux_data = np.load(directory+'flux.npy')
self.q_data = np.load(directory+'Q_data.npy')
self.s_data = np.load(directory+'S_data.npy')
self.sig_data = np.load(directory+'sig.npy')
self.u_data = np.load(directory+'U_data.npy')
'''
def _format_param_name(self, param_name):
"""
Function for formatting a string parameter name (chi, depth, etc....) to LateX
form for plot labels.
To call:
_format_param_name(param_name)
Parameters:
param_name
"""
if param_name == 'sigma':
return r'$\sigma$'
elif param_name == 'chi':
return r'$\Delta\chi$'
elif param_name == 'flux':
return r'$\Delta F$'
elif param_name == 'depth':
return r'$\Delta \phi$'
else:
return param_name
def _make_cut(self, param_array, param_name,num_cut=10,prob=0.5, save=False):
"""
Function for cutting along a single parameter value to test the model's performance over
a parameter range. For aid in finding parameter space that model works with certainty within.
Makes a plot showing the True Positive (TP) and True Negative (TN) rates as a function of the
supplied parameter.
To call:
_make_cut(param_array, param_name,num_cut, prob, save)
Parameters:
param_array
param_name
OPTIONAL:
num_cut -- number of cuts to make along the parameter
prob -- probability cutoff to classify as complex or simple
save -- True if want to save a .pdf
"""
cut_array = param_array
# ----------- sigma and other params are formatted differently, this handles either case ------
try:
cut_vals = np.linspace(0.,np.max(cut_array)[0]*.9,num_cut)
oned =False
except:
cut_vals = np.linspace(0.,np.max(cut_array)*.9,num_cut)
oned = True
matrix_vals = []
# --------- make a series of cuts and save results for plotting ----------
for c in cut_vals:
print (c)
#do the cut
float_check = type(0.1); tuple_check = type((0,1))
postcut = [];kept=[]
for i in range(len(cut_array)):
val = cut_array[i]
# ---------- once again handle tuples or floats depending on parameter format ----------
if type(val) == tuple_check:
if abs(val[0]-val[1]) >= c:
postcut.append(abs(val[0]-val[1]))
kept.append(i)
else:
if val >= c:
postcut.append(val)
kept.append(i)
try:
# -------- the subset of data --------------
X_new=np.array([self.X_data[k] for k in kept])
Y_new=np.array([self.Y_data[k] for k in kept])
# ----------- do predictions on the subset ----------
probs = self.model.predict(X_new)[:,1]
# --------- probability cutoff for simple vs complex -------------
predictions = np.where(probs > prob, 1, 0)
'''
#------------ Confusion Matrix -------------
[simple marked as simple simple marked as complex]
[complex marked as simple complex marked as complex]
'''
cm = confusion_matrix(Y_new, predictions)
print(cm)
matrix_vals.append(cm)
except:
print ('Nothing in that cutoff, continuing...')
fstring = self._format_param_name(param_name)
fig = plt.figure(1)
try:
plt.scatter(cut_vals,[float(matrix_vals[i][0,0])/(matrix_vals[i][0,0]+matrix_vals[i][0,1])*100. for i in range(len(matrix_vals))],label='True Simple',c='g')
except:
print ('No simple sources in subsample...')
try:
plt.scatter(cut_vals,[float(matrix_vals[i][1,1])/(matrix_vals[i][1,0]+matrix_vals[i][1,1])*100. for i in range(len(matrix_vals))],label='True Complex',c='b')
except:
print ('No complex sources in subsample...')
plt.xlabel(fstring)
plt.ylabel('Percent Correct')
plt.title(r'Percent Correct over '+fstring)
plt.legend(loc=(0.3,0.8),fontsize=5)
if save:
plt.savefig(param_name+'_plot.png',bbinches='tight')
else:
plt.show()
plt.close()
def _make_2d_cut(self, param_arr1, arr_name1, param_arr2, arr_name2,num_cut=10,prob=0.5,save=False):
"""
Function for cutting along two parameter values to test the model's performance over
a parameter space. For aid in finding parameter space that model works with certainty within.
Makes a plot showing the True Positive (TP) and True Negative (TN) rates as a function of the
supplied parameters. Functions similarly to _make_cut() above.
To call:
_make_2d_cut(param_arr1, arr_name1, param_arr2, arr_name2, num_cut, prob, save)
Parameters:
param_arr1
arr_name1
param_arr2
arr_name2
OPTIONAL:
num_cut -- number of cuts to make along the parameter
prob -- probability cutoff to classify as complex or simple
save -- True if want to save a .pdf
"""
# ----------- sigma and other params are formatted differently, this handles either case ------
try:
cut_vals1 = np.linspace(0.,np.max(param_arr1)[0]*.9,num_cut)
except:
cut_vals1 = np.linspace(0.,np.max(param_arr1)*.9,num_cut)
try:
cut_vals2 = np.linspace(0.,np.max(param_arr2)[0]*.9,num_cut)
except:
cut_vals2 = np.linspace(0.,np.max(param_arr2)*.9,num_cut)
matrix_vals_c = np.zeros((len(cut_vals1),len(cut_vals2)))
matrix_vals_s = np.zeros((len(cut_vals1),len(cut_vals2)))
# --------- make a series of cuts and save results for plotting ----------
for i in range(len(cut_vals1)):
for j in range(len(cut_vals2)):
#do the cut
c1 = cut_vals1[i]; c2 = cut_vals2[j]
float_check = type(0.1); tuple_check = type((0,1))
postcut = [];kept=[]
for k in range(len(param_arr1)):
val1 = param_arr1[k]
val2 = param_arr2[k]
# ---------- once again handle tuples or floats depending on parameter format ----------
if type(val1) == tuple_check:
if abs(val1[0]-val1[1]) >= c1 and abs(val2[0]-val2[1]) >= c2:
kept.append(k)
else:
if val1 >= c1 and val2 >= c2:
kept.append(k)
try:
# -------- the subset of data --------------
X_new=np.array([self.X_data[k] for k in kept])
Y_new=np.array([self.Y_data[k] for k in kept])
# ----------- do predictions on the subset ----------
probs = self.model.predict(X_new)[:,1]
# --------- probability cutoff for simple vs complex -------------
predictions = np.where(probs > prob, 1, 0)
'''
#------------ Confusion Matrix -------------
[simple marked as simple simple marked as complex]
[complex marked as simple complex marked as complex]
'''
cm = confusion_matrix(Y_new, predictions)
print(cm)
matrix_vals_c[i,j] = float(cm[1,1])/(cm[1,0] +cm[1,1])*100.
matrix_vals_s[i,j] = float(cm[0,0])/(cm[0,0] +cm[0,1])*100
except:
print ('Nothing in that cutoff, continuing...')
fstring1 = self._format_param_name(arr_name1)
fstring2 = self._format_param_name(arr_name2)
xv,yv = np.meshgrid(cut_vals1,cut_vals2)
zv_complex = matrix_vals_c
zv_simple = matrix_vals_s
#------- show data as an image with z-axis being the TP/TN rates ----
fig,ax = plt.subplots(1,2,sharey=True,figsize=(12,7))
cax = ax[0].imshow(zv_complex,vmin=50., vmax=100.,cmap='seismic')#,origin='lower')
sax = ax[1].imshow(zv_simple,vmin=50., vmax=100.,cmap='seismic')#,origin='lower')
# ---- set the axis labels ------
ax[0].set_xlabel(fstring1)
ax[0].set_ylabel(fstring2)
ax[1].set_xlabel(fstring1)
ax[1].set_ylabel(fstring2)
# ---------- set the tick labels ---------
ax[0].set_xticks([n for n in range(len(cut_vals1))])
ax[0].set_yticks(range(len(cut_vals2)))
ax[1].set_xticks([n for n in range(len(cut_vals2))])
xlabels = ['%.2f'%(c) for c in cut_vals1]
ylabels = ['%.2f'%(c) for c in cut_vals2]
ax[0].set_xticklabels(xlabels)
ax[0].set_yticklabels(ylabels)
ax[1].set_xticklabels(xlabels)
ax[1].set_yticklabels(ylabels)
#-------- adjust plot sizing and add colorbar ----------
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar(cax, cax=cbar_ax)
ax[0].set_title('Complex Sources')
ax[1].set_title('Simple Sources')
plt.suptitle(r'Percent Correct over '+fstring1+' and '+fstring2)
if save:
plt.savefig(arr_name1+'_'+arr_name2+'_plot.png',bbinches='tight')
else:
plt.show()
plt.close()
if __name__ == '__main__':
testing = plots()
#testing._loadLog('train.log')
#testing._plotLoss('train.log',save=False)
#testing._plotAcc('train.log',save=False)
testing._loadModel('../regularized/model_V1.h5')
testing._loadData('../data/test/')
#testing._make_cut(testing.chi_data, 'chi')
testing._make_2d_cut(testing.chi_data[:1000], 'chi',testing.flux_data[:1000], 'flux', num_cut=25)
| mit |
malayaleecoder/servo | tests/heartbeats/process_logs.py | 139 | 16143 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
from os import path
import sys
import warnings
HB_LOG_IDX_START_TIME = 7
HB_LOG_IDX_END_TIME = HB_LOG_IDX_START_TIME + 1
HB_LOG_IDX_START_ENERGY = 14
HB_LOG_IDX_END_ENERGY = HB_LOG_IDX_START_ENERGY + 1
ENERGY_PROFILER_NAME = 'ApplicationHeartbeat'
SUMMARY_OUTPUT = "summary.txt"
SUMMARY_TIME_IDX = 8
SUMMARY_ENERGY_IDX = SUMMARY_TIME_IDX + 1
SUMMARY_POWER_IDX = SUMMARY_ENERGY_IDX + 1
def autolabel(rects, ax):
"""Attach some text labels.
"""
for rect in rects:
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * rect.get_height(), '', ha='center', va='bottom')
def plot_raw_totals(config, plot_data, max_time, max_time_std, max_energy, max_energy_std, output_dir, normalize):
"""Plot the raw totals for a configuration.
Keyword arguments:
config -- configuration name
plot_data -- (profiler name, total_time, total_time_std, total_energy, total_energy_std)
max_time, max_time_std, max_energy, max_energy_std -- single values
normalize -- True/False
"""
plot_data = sorted(plot_data)
keys = [p for (p, tt, tts, te, tes) in plot_data]
total_times = [tt for (p, tt, tts, te, tes) in plot_data]
total_times_std = [tts for (p, tt, tts, te, tes) in plot_data]
total_energies = [te for (p, tt, tts, te, tes) in plot_data]
total_energies_std = [tes for (p, tt, tts, te, tes) in plot_data]
fig, ax1 = plt.subplots()
ind = np.arange(len(keys)) # the x locations for the groups
width = 0.35 # the width of the bars
# add some text for labels, title and axes ticks
ax1.set_title('Time/Energy Data for Configuration ' + config)
ax1.set_xticks(ind + width)
ax1.set_xticklabels(keys, rotation=45)
fig.set_tight_layout(True)
fig.set_size_inches(len(plot_data) / 1.5, 8)
ax2 = ax1.twinx()
# Normalize
if normalize:
total_times_std /= np.sum(total_times)
total_times /= np.sum(total_times)
total_energies_std /= np.sum(total_energies)
total_energies /= np.sum(total_energies)
ax1.set_ylabel('Time (Normalized)')
ax2.set_ylabel('Energy (Normalized)')
else:
# set time in us instead of ns
total_times_std /= np.array(1000000.0)
total_times /= np.array(1000000.0)
total_energies_std /= np.array(1000000.0)
total_energies /= np.array(1000000.0)
ax1.set_ylabel('Time (ms)')
ax2.set_ylabel('Energy (Joules)')
rects1 = ax1.bar(ind, total_times, width, color='r', yerr=total_times_std)
rects2 = ax2.bar(ind + width, total_energies, width, color='y', yerr=total_energies_std)
ax1.legend([rects1[0], rects2[0]], ['Time', 'Energy'])
# set axis
x1, x2, y1, y2 = plt.axis()
if normalize:
ax1.set_ylim(ymin=0, ymax=1)
ax2.set_ylim(ymin=0, ymax=1)
else:
ax1.set_ylim(ymin=0, ymax=((max_time + max_time_std) * 1.25 / 1000000.0))
ax2.set_ylim(ymin=0, ymax=((max_energy + max_energy_std) * 1.25 / 1000000.0))
autolabel(rects1, ax1)
autolabel(rects2, ax2)
# plt.show()
plt.savefig(path.join(output_dir, config + ".png"))
plt.close(fig)
def create_raw_total_data(config_data):
"""Get the raw data to plot for a configuration
Return: [(profiler, time_mean, time_stddev, energy_mean, energy_stddev)]
Keyword arguments:
config_data -- (trial, trial_data)
"""
# We can't assume that the same number of heartbeats are always issued across trials
# key: profiler name; value: list of timing sums for each trial
profiler_total_times = {}
# key: profiler name; value: list of energy sums for each trial
profiler_total_energies = {}
for (t, td) in config_data:
for (profiler, ts, te, es, ee) in td:
# sum the total times and energies for each profiler in this trial
total_time = np.sum(te - ts)
total_energy = np.sum(ee - es)
# add to list to be averaged later
time_list = profiler_total_times.get(profiler, [])
time_list.append(total_time)
profiler_total_times[profiler] = time_list
energy_list = profiler_total_energies.get(profiler, [])
energy_list.append(total_energy)
profiler_total_energies[profiler] = energy_list
# Get mean and stddev for time and energy totals
return [(profiler,
np.mean(profiler_total_times[profiler]),
np.std(profiler_total_times[profiler]),
np.mean(profiler_total_energies[profiler]),
np.std(profiler_total_energies[profiler]))
for profiler in profiler_total_times.keys()]
def plot_all_raw_totals(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
raw_total_norm_out_dir = path.join(output_dir, 'raw_totals_normalized')
os.makedirs(raw_total_norm_out_dir)
raw_total_out_dir = path.join(output_dir, 'raw_totals')
os.makedirs(raw_total_out_dir)
# (name, (profiler, (time_mean, time_stddev, energy_mean, energy_stddev)))
raw_totals_data = [(config, create_raw_total_data(config_data)) for (config, config_data) in config_list]
mean_times = []
mean_times_std = []
mean_energies = []
mean_energies_std = []
for profiler_tup in [config_tup[1] for config_tup in raw_totals_data]:
for (p, tt, tts, te, tes) in profiler_tup:
mean_times.append(tt)
mean_times_std.append(tts)
mean_energies.append(te)
mean_energies_std.append(tes)
# get consistent max time/energy values across plots
max_t = np.max(mean_times)
max_t_std = np.max(mean_times_std)
max_e = np.max(mean_energies)
max_e_std = np.max(mean_energies_std)
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_norm_out_dir, True)
for data in raw_totals_data]
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_out_dir, False)
for data in raw_totals_data]
def plot_trial_time_series(config, trial, trial_data, max_end_time, max_power, output_dir):
"""Plot time series for a single trial.
Keyword arguments:
config -- the config name
trial -- the trial name
trial_data -- [(profiler, [start times], [end times], [start energies], [end energies])]
max_end_time -- single value to use as max X axis value (for consistency across trials)
output_dir -- the output directory
"""
# TODO: Some profilers may have parallel tasks - need to identify this on plots
max_end_time = max_end_time / 1000000.0
trial_data = sorted(trial_data)
fig, ax1 = plt.subplots()
keys = [p for (p, ts, te, es, ee) in trial_data]
# add some text for labels, title and axes ticks
ax1.set_title('Profiler Activity for ' + config + ', ' + trial)
ax1.set_xlabel('Time (ms)')
ax1.grid(True)
width = 8 # the width of the bars
ax1.set_yticks(10 * np.arange(1, len(keys) + 2))
ax1.set_yticklabels(keys)
ax1.set_ylim(ymin=0, ymax=((len(trial_data) + 1) * 10))
ax1.set_xlim(xmin=0, xmax=max_end_time)
fig.set_tight_layout(True)
fig.set_size_inches(16, len(trial_data) / 3)
i = 10
for (p, ts, te, es, ee) in trial_data:
xranges = [(ts[j] / 1000000.0, (te[j] - ts[j]) / 1000000.0) for j in xrange(len(ts))]
ax1.broken_barh(xranges, (i - 0.5 * width, width))
i += 10
# place a vbar at the final time for this trial
last_profiler_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in trial_data]))
plt.axvline(np.max(last_profiler_times) / 1000000.0, color='black')
power_times = []
power_values = []
for (p, ts, te, es, ee) in trial_data:
if p == ENERGY_PROFILER_NAME:
power_times = te / 1000000.0
power_values = (ee - es) / ((te - ts) / 1000.0)
ax2 = ax1.twinx()
ax2.set_xlim(xmin=0, xmax=max_end_time)
ax2.set_ylim(ymin=0, ymax=max_power)
ax2.set_ylabel('Power (Watts)')
ax2.plot(power_times, power_values, color='r')
# plt.show()
plt.savefig(path.join(output_dir, "ts_" + config + "_" + trial + ".png"))
plt.close(fig)
def hb_energy_times_to_power(es, ee, ts, te):
"""Compute power from start and end energy and times.
Return: power values
"""
return (ee - es) / ((te - ts) / 1000.0)
def plot_all_time_series(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
time_series_out_dir = path.join(output_dir, 'time_series')
os.makedirs(time_series_out_dir)
max_end_times = []
max_power_values = []
for (c, cd) in config_list:
for (t, td) in cd:
trial_max_end_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in td]))
max_end_times.append(np.nanmax(trial_max_end_times))
for (p, ts, te, es, ee) in td:
# We only care about the energy profiler (others aren't reliable for instant power anyway)
if p == ENERGY_PROFILER_NAME and len(te) > 0:
max_power_values.append(np.nanmax(hb_energy_times_to_power(es, ee, ts, te)))
max_time = np.nanmax(max_end_times)
max_power = np.nanmax(np.array(max_power_values)) * 1.2 # leave a little space at the top
for (config, config_data) in config_list:
[plot_trial_time_series(config, trial, trial_data, max_time, max_power, time_series_out_dir)
for (trial, trial_data) in config_data]
def read_heartbeat_log(profiler_hb_log):
"""Read a heartbeat log file.
Return: (profiler name, [start times], [end times], [start energies], [end energies], [instant powers])
Keyword arguments:
profiler_hb_log -- the file to read
"""
with warnings.catch_warnings():
try:
warnings.simplefilter("ignore")
time_start, time_end, energy_start, energy_end = \
np.loadtxt(profiler_hb_log,
dtype=np.dtype('uint64'),
skiprows=1,
usecols=(HB_LOG_IDX_START_TIME,
HB_LOG_IDX_END_TIME,
HB_LOG_IDX_START_ENERGY,
HB_LOG_IDX_END_ENERGY),
unpack=True,
ndmin=1)
except ValueError:
time_start, time_end, energy_start, energy_end = [], [], [], []
name = path.split(profiler_hb_log)[1].split('-')[1].split('.')[0]
return (name,
np.atleast_1d(time_start),
np.atleast_1d(time_end),
np.atleast_1d(energy_start),
np.atleast_1d(energy_end))
def process_trial_dir(trial_dir):
"""Process trial directory.
Return: [(profiler name, [start times], [end times], [start energies], [end energies])]
Time and energy are normalized to 0 start values.
Keyword arguments:
trial_dir -- the directory for this trial
"""
log_data = map(lambda h: read_heartbeat_log(path.join(trial_dir, h)),
filter(lambda f: f.endswith(".log"), os.listdir(trial_dir)))
# Find the earliest timestamps and energy readings
min_t = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [ts for (profiler, ts, te, es, ee) in log_data])))
min_e = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [es for (profiler, ts, te, es, ee) in log_data])))
# Normalize timing/energy data to start values of 0
return [(profiler, ts - min_t, te - min_t, es - min_e, ee - min_e) for (profiler, ts, te, es, ee) in log_data]
def process_config_dir(config_dir):
"""Process a configuration directory.
Return: [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])]
Keyword arguments:
config_dir -- the directory for this configuration - contains subdirectories for each trial
"""
return [(trial_dir, process_trial_dir(path.join(config_dir, trial_dir))) for trial_dir in os.listdir(config_dir)]
def process_logs(log_dir):
"""Process log directory.
Return: [(config, [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])])]
Keyword arguments:
log_dir -- the log directory to process - contains subdirectories for each configuration
"""
return [((config_dir.split('_')[1], process_config_dir(path.join(log_dir, config_dir))))
for config_dir in os.listdir(log_dir)]
def find_best_executions(log_dir):
"""Get the best time, energy, and power from the characterization summaries.
Return: ((config, trial, min_time), (config, trial, min_energy), (config, trial, min_power))
Keyword arguments:
results -- the results from process_logs(...).
"""
DEFAULT = ('', '', 1000000000.0)
min_time = DEFAULT
min_energy = DEFAULT
min_power = DEFAULT
for config_dir in os.listdir(log_dir):
for trial_dir in os.listdir(path.join(log_dir, config_dir)):
with open(path.join(log_dir, config_dir, trial_dir, SUMMARY_OUTPUT), "r") as s:
lines = s.readlines()
time = float(lines[SUMMARY_TIME_IDX].split(':')[1])
energy = int(lines[SUMMARY_ENERGY_IDX].split(':')[1])
power = float(lines[SUMMARY_POWER_IDX].split(':')[1])
if time < min_time[2]:
min_time = (config_dir, trial_dir, time)
if energy < min_energy[2]:
min_energy = (config_dir, trial_dir, energy)
if power < min_power:
min_power = (config_dir, trial_dir, power)
return (min_time, min_energy, min_power)
def main():
"""This script processes the log files from the "characterize.py" script and produces visualizations.
"""
# Default log directory
directory = 'heartbeat_logs'
# Default output directory
output_dir = 'plots'
# Default android
android = False
# Parsing the input of the script
parser = argparse.ArgumentParser(description="Process Heartbeat log files from characterization")
parser.add_argument("-d", "--directory",
default=directory,
help="Heartbeat log directory \"-d heartbeat_logs\"")
parser.add_argument("-o", "--output",
default=output_dir,
help="Specify the log output directory, for example \"-o plots\"")
parser.add_argument("--android",
action="store_true",
dest="android",
default=False,
help="Specify if processing results from Android")
args = parser.parse_args()
if args.directory:
directory = args.directory
if args.output:
output_dir = args.output
if args.android:
android = args.android
if not os.path.exists(directory):
print "Input directory does not exist: " + directory
sys.exit(1)
if os.path.exists(output_dir):
print "Output directory already exists: " + output_dir
sys.exit(1)
res = process_logs(directory)
if not android:
best = find_best_executions(directory)
print 'Best time:', best[0]
print 'Best energy:', best[1]
print 'Best power:', best[2]
os.makedirs(output_dir)
plot_all_raw_totals(res, output_dir)
plot_all_time_series(res, output_dir)
if __name__ == "__main__":
main()
| mpl-2.0 |
dsullivan7/scikit-learn | examples/applications/plot_outlier_detection_housing.py | 243 | 5577 | """
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM algorithm
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwidth parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list( legend1.values() )
legend1_keys_list = list( legend1.keys() )
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teacher ratio by town")
legend2_values_list = list( legend2.values() )
legend2_keys_list = list( legend2.keys() )
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_values_list[0], legend2_values_list[1], legend2_values_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
| bsd-3-clause |
trabucayre/gnuradio | gr-filter/examples/synth_to_chan.py | 6 | 3134 | #!/usr/bin/env python
#
# Copyright 2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys
import numpy
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
from matplotlib import pyplot
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
def main():
N = 1000000
fs = 8000
freqs = [100, 200, 300, 400, 500]
nchans = 7
sigs = list()
fmtx = list()
for fi in freqs:
s = analog.sig_source_f(fs, analog.GR_SIN_WAVE, fi, 1)
fm = analog.nbfm_tx(fs, 4*fs, max_dev=10000, tau=75e-6, fh=0.925*(4*fs)/2.0)
sigs.append(s)
fmtx.append(fm)
syntaps = filter.firdes.low_pass_2(len(freqs), fs, fs/float(nchans)/2, 100, 100)
print("Synthesis Num. Taps = %d (taps per filter = %d)" % (len(syntaps),
len(syntaps) / nchans))
chtaps = filter.firdes.low_pass_2(len(freqs), fs, fs/float(nchans)/2, 100, 100)
print("Channelizer Num. Taps = %d (taps per filter = %d)" % (len(chtaps),
len(chtaps) / nchans))
filtbank = filter.pfb_synthesizer_ccf(nchans, syntaps)
channelizer = filter.pfb.channelizer_ccf(nchans, chtaps)
noise_level = 0.01
head = blocks.head(gr.sizeof_gr_complex, N)
noise = analog.noise_source_c(analog.GR_GAUSSIAN, noise_level)
addnoise = blocks.add_cc()
snk_synth = blocks.vector_sink_c()
tb = gr.top_block()
tb.connect(noise, (addnoise,0))
tb.connect(filtbank, head, (addnoise, 1))
tb.connect(addnoise, channelizer)
tb.connect(addnoise, snk_synth)
snk = list()
for i,si in enumerate(sigs):
tb.connect(si, fmtx[i], (filtbank, i))
for i in range(nchans):
snk.append(blocks.vector_sink_c())
tb.connect((channelizer, i), snk[i])
tb.run()
if 1:
channel = 1
data = snk[channel].data()[1000:]
f1 = pyplot.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(data[10000:10200] )
s1.set_title(("Output Signal from Channel %d" % channel))
fftlen = 2048
winfunc = numpy.blackman
#winfunc = numpy.hamming
f2 = pyplot.figure(2)
s2 = f2.add_subplot(1,1,1)
s2.psd(data, NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen / 4,
window = lambda d: d*winfunc(fftlen))
s2.set_title(("Output PSD from Channel %d" % channel))
f3 = pyplot.figure(3)
s3 = f3.add_subplot(1,1,1)
s3.psd(snk_synth.data()[1000:], NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen / 4,
window = lambda d: d*winfunc(fftlen))
s3.set_title("Output of Synthesis Filter")
pyplot.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
JRosenfeldIntern/data-assistant | UnitTests/test_preview.py | 1 | 34329 | import functools
import pathlib
import sys
import traceback
import unittest
import xml.etree.ElementTree as ET
import zipfile
from inc_datasources import _XMLMethodNames, _localWorkspace, _outputDirectory, _daGPTools
sys.path.insert(0, _daGPTools)
import arcpy
import pandas as pd
import tempfile
from scripts import dla
from create import *
def clear_feature_classes(directory: str):
"""
the dla.gdb is the test workspace the feature classes are created in. To pull the one we want, we clear the workspace
so that the newly created one is the only one that exists. This function clears the workspace.
:param directory:
:return:
"""
arcpy.env.workspace = directory
featureclasses = arcpy.ListFeatureClasses()
if featureclasses is not None:
for featureclass in featureclasses:
arcpy.Delete_management(os.path.join(directory, featureclass))
def build_correct_fields(xml_location: str, include_globalid: bool = False):
"""
takes the xml file and creates the fields that should be in the new feature class
:param xml_location: str
:param include_globalid: bool
:return:
"""
fields = dla.getXmlElements(xml_location, "Field")
correct_fields = []
for field in fields:
if not include_globalid and str.lower(dla.getNodeValue(field, "TargetName")) != "globalid":
correct_fields.append(dla.getNodeValue(field, "TargetName"))
return correct_fields
def make_copy(directory: str, lw: dict):
"""
Copies the target feature class into the dla.gdb for comparison in the tests
:param directory: str
:param lw : dict
:return:
"""
arcpy.env.workspace = lw["Target"]
arcpy.CopyFeatures_management(lw["TargetName"], os.path.join(directory, "copy"))
def xml_compare(x1: ET, x2: ET, reporter=None):
"""
taken from:
https://bitbucket.org/ianb/formencode/src/tip/formencode/doctest_xml_compare.py?fileviewer=file-view-default#cl-70
:param x1:
:param x2:
:param reporter:
:return:
"""
if x1.tag in ['Source', 'Target'] or x2.tag in ['Source', 'Target']:
# We skip asserting the data path is correct because our xml file data paths may not match
return True
if x1.tag != x2.tag:
if reporter:
reporter('Tags do not match: %s and %s' % (x1.tag, x2.tag))
return False
for name, value in x1.attrib.items():
if x2.attrib.get(name) != value:
if reporter:
reporter('Attributes do not match: %s=%r, %s=%r'
% (name, value, name, x2.attrib.get(name)))
return False
for name in x2.attrib.keys():
if name not in x1.attrib:
if reporter:
reporter('x2 has an attribute x1 is missing: %s'
% name)
return False
if not text_compare(x1.text, x2.text):
if reporter:
reporter('text: %r != %r' % (x1.text, x2.text))
return False
if not text_compare(x1.tail, x2.tail):
if reporter:
reporter('tail: %r != %r' % (x1.tail, x2.tail))
return False
cl1 = x1.getchildren()
cl2 = x2.getchildren()
if len(cl1) != len(cl2):
if reporter:
reporter('children length differs, %i != %i'
% (len(cl1), len(cl2)))
return False
i = 0
for c1, c2 in zip(cl1, cl2):
i += 1
if not xml_compare(c1, c2, reporter=reporter):
if reporter:
reporter('children %i do not match: %s'
% (i, c1.tag))
return False
return True
def text_compare(t1: str, t2: str):
"""
taken from:
https://bitbucket.org/ianb/formencode/src/tip/formencode/doctest_xml_compare.py?fileviewer=file-view-default#cl-70
:param t1:
:param t2:
:return:
"""
if not t1 and not t2:
return True
if t1 == '*' or t2 == '*':
return True
return (t1 or '').strip() == (t2 or '').strip()
class UnitTests(unittest.TestCase):
"""
Runs the unit tests for the various functions for all test cases and data sources
"""
def __init__(self, test_object, *args, **kwargs):
super(UnitTests, self).__init__(*args, **kwargs)
self.testObject = test_object
self.local_workspace = self.testObject.local_workspace
self.localDirectory = _outputDirectory
self.sourceWorkspace = self.local_workspace["Source"]
self.targetWorkspace = self.local_workspace["Target"]
self.sourceFC = self.local_workspace["SourceName"]
self.targetFC = self.local_workspace["TargetName"]
self.localFC = list()
self.localDataPath = ""
self.localFields = tuple()
self.sourceDataPath = os.path.join(self.local_workspace["Source"], self.local_workspace["SourceName"])
self.targetDataPath = os.path.join(self.local_workspace["Target"], self.local_workspace["TargetName"])
self.sourceFields = tuple(arcpy.ListFields(self.sourceDataPath))
self.targetFields = tuple(arcpy.ListFields(self.targetDataPath))
self.methods = _XMLMethodNames
self.xmlLocation = self.local_workspace["xmlLocation"]
self.outXML = os.path.join(str(pathlib.Path(self.local_workspace["outXML"]).parent),
pathlib.Path(self.local_workspace["outXML"]).stem,
os.path.basename(self.local_workspace["outXML"]))
self.correctXML = self.local_workspace["correctXML"]
def test_create(self):
"""
Creates the feature class or xml file for testing
:return:
"""
clear_feature_classes(_outputDirectory)
self.testObject.main()
if self.testObject.title != "CreateConfig":
self.set_local_info()
def get_default_values(self):
"""
Returns a dictionary where the key is the field name and the value is that field's default value
:return: dict
"""
out_dict = dict()
for field in self.targetFields:
out_dict[field.name] = field.defaultValue
return out_dict
def set_local_info(self):
"""
Once the feature class being tested is created, sets the datapath and fields of that feature class
:return:
"""
arcpy.env.workspace = self.localDirectory
self.localFC = arcpy.ListFeatureClasses()[0]
arcpy.env.workspace = ""
self.localDataPath = os.path.join(_outputDirectory, self.localFC)
self.localFields = tuple(arcpy.ListFields(self.localDataPath))
@staticmethod
def build_data_frame(data_path: str, columns: tuple):
"""
Builds and caches a pandas DataFrame object containing the information from the specified feature class
:param data_path: str
:param columns: tupe(str)
:return: pd.DataFrame object
"""
# creates a searchCursor for a given feature class and returns an array of that table
return pd.DataFrame(list(arcpy.da.SearchCursor(data_path, columns)), columns=columns)
@functools.lru_cache()
def get_xml_parse(self):
"""
Returns and caches a SourceTargetParser object containing information in it from the specified
SourceTarget.xml file
:return: SourceTargetParser object
"""
return SourceTargetParser(self.xmlLocation)
def test_fields(self):
"""
Compares the xml file with the mutated file to ensure that the fields were correctly transferred over
and not tampered with
:return:
"""
if self.testObject.title not in ["Preview", "Stage", "Append", "Replace"]:
return
correct_fields = build_correct_fields(self.xmlLocation, self.testObject.globalIDCheck)
if self.testObject.title in ["Append", "Replace"]:
fields = arcpy.ListFields(self.targetDataPath)
else:
fields = arcpy.ListFields(self.localDataPath)
fieldnames = []
for field in fields:
if self.testObject.globalIDCheck:
if field.name.lower() not in ["", "objectid", "shape"]:
fieldnames.append(field.name)
else:
if field.name.lower() not in ["", "objectid", "shape", "globalid"]:
fieldnames.append(field.name)
for cfield in correct_fields:
self.assertIn(cfield, fieldnames)
def test_length(self):
"""
Ensures that the mutated file, depending on which it is, is the correct needed length
:return:
"""
if self.testObject.title not in ["Preview", "Stage", "Append", "Replace"]:
return
source_table = self.build_data_frame(self.sourceDataPath, tuple([field.name for field in self.sourceFields]))
local_table = self.build_data_frame(self.localDataPath, tuple([field.name for field in self.localFields]))
# target_table = (list(arcpy.da.SearchCursor(self.targetDataPath, "*")))
target_table = self.build_data_frame(self.targetDataPath, tuple([field.name for field in self.targetFields]))
mode = self.testObject.title # variable assignment to help with readability
if mode == "Preview":
if len(source_table) < self.testObject.RowLimit:
self.assertEqual(len(local_table), len(source_table))
else:
self.assertEqual(len(local_table), self.testObject.RowLimit)
elif mode == "Stage":
self.assertEqual(len(local_table), len(source_table))
elif mode == "Append":
self.assertEqual(len(target_table), len(local_table) + len(source_table))
elif mode == "Replace":
self.assertEqual(len(target_table), len(local_table))
else:
self.assertIn(mode, ["Preview", "Stage", "Append", "Replace"])
def test_replace_data(self):
"""
Ensures the correct rows were appended and removed and in the correct order
:return:
"""
replaced_rows_list = []
targetfields = list()
for field in self.targetFields:
if field.name.lower() not in ['globalid', 'objectid']:
targetfields.append(field.name)
localfields = list()
for field in self.localFields:
if field.name.lower() not in ['globalid', 'objectid']:
localfields.append(field.name)
copy = self.build_data_frame(self.localDataPath, tuple(localfields)).iterrows()
target = self.build_data_frame(self.targetDataPath, tuple(targetfields)).iterrows()
replace_dict = self.get_xml_parse().parse_replace()
for copy_row, targetRow in zip(copy, target): # will iterate through until all of the copy cursor is exhausted
copy_row = copy_row[1]
targetRow = targetRow[1]
while not targetRow.equals(copy_row):
replaced_rows_list.append(copy_row)
copy_row = next(copy)
copy_row = copy_row[1]
for targetRow, copy_row in zip(target, replaced_rows_list):
# now iterates through the rows that should have been
targetRow = targetRow[1]
# these assertions make sure the targetRow SHOULD have been replaced
if replace_dict["Operator"] == "=":
self.assertEqual(targetRow[replace_dict["FieldName"]], replace_dict["Value"])
if replace_dict["Operator"] == "!=":
self.assertNotEqual(targetRow[replace_dict["FieldName"]], replace_dict["Value"])
if replace_dict["Operator"] == "Like":
self.assertIn(replace_dict["Value"], targetRow[replace_dict["FieldName"]])
self.assertTrue(targetRow.equals(copy_row))
# appended to ensure order and accuracy. Here the target cursor starts
# at where the beginning of the re-appended rows should be
def test_data(self):
"""
Ensures that the mutated file has the correct data in each row, and that the data asisstant actions were
performed correctly
:return:
"""
source_table = self.build_data_frame(self.sourceDataPath, tuple([field.name for field in self.sourceFields]))
local_table = self.build_data_frame(self.localDataPath, tuple([field.name for field in self.localFields]))
target_table = self.build_data_frame(self.targetDataPath, tuple([field.name for field in self.targetFields]))
parse_object = self.get_xml_parse()
parse_object.data = parse_object.parse()
xml_fields = parse_object.get_pairings()
method_dict = parse_object.get_methods()
xml_data = parse_object.get_data()
default_values = self.get_default_values()
if self.testObject.title in ["Preview", "Stage"]: # needed so that we can use the same function to test append
target = local_table
else:
if 'GLOBALID' in target_table.columns:
target_table = target_table.drop('GLOBALID', 1) # TODO: Might need to omit other itrations of globalid
if 'GLOBALID' in local_table.columns:
local_table = local_table.drop('GLOBALID', 1) # TODO: Might need to omit other itrations of globalid
# self.assertTrue(local_table.equals(target_table.head(len(local_table))))
self.assertTrue((local_table == target_table.head(len(local_table))).all().all())
target = target_table.drop(range(len(local_table))) # ensures we are only comparing the newly appended data
for field in xml_fields.keys():
if method_dict[field] == self.methods["None"]:
self.none_test(target[field], default_values[field])
elif method_dict[field] == self.methods["Copy"]:
self.copy_test(source_table[xml_fields[field]], target[field])
elif method_dict[field] == self.methods["Set Value"]:
self.set_value_test(target[field], xml_data[field][self.methods["Set Value"]])
elif method_dict[field] == self.methods["Value Map"]:
self.value_map_test(source_table[xml_fields[field]], target[field],
xml_data[field][self.methods["Value Map"]], xml_data[field]["Otherwise"])
elif method_dict[field] == self.methods["Change Case"]:
self.change_case_test(source_table[xml_fields[field]], target[field],
xml_data[field][self.methods["Change Case"]])
elif method_dict[field] == self.methods["Concatenate"]:
self.concatenate_test(target[field], xml_data[field]["Separator"],
xml_data[field]["Concatenate"])
elif method_dict[field] == self.methods["Left"]:
self.left_test(source_table[xml_fields[field]], target[field], xml_data[field]["Left"])
elif method_dict[field] == self.methods["Right"]:
self.right_test(source_table[xml_fields[field]], target[field], xml_data[field]["Right"])
elif method_dict[field] == self.methods["Substring"]:
self.substring_test(source_table[xml_fields[field]], target[field], xml_data[field]["Start"],
xml_data[field]["Length"])
elif method_dict[field] == self.methods["Split"]:
self.split_test(source_table[xml_fields[field]], target[field], xml_data[field]["SplitAt"],
xml_data[field]["Part"])
elif method_dict[field] == self.methods["Conditional Value"]:
self.conditional_value_test(source_table[xml_fields[field]], target[field],
xml_data[field]["Oper"], xml_data[field]["If"], xml_data[field]["Then"],
xml_data[field]["Else"])
elif method_dict[field] == self.methods["Domain Map"]:
self.domain_map_test(source_table[xml_fields[field]], target[field],
xml_data[field][self.methods["Domain Map"]])
else:
self.assertIn(method_dict[field], self.methods)
def none_test(self, target: pd.Series, defaultValue):
"""
Ensures that the vector is a vector of none
:param target:
:param defaultValue:
:return:
"""
self.assertTrue(len(target.unique()) == 1 and (
target.unique()[0] is None or target.unique()[0] == 'None' or target.unique()[0] == defaultValue),
target.to_string())
def copy_test(self, source: pd.Series, target: pd.Series):
"""
Ensures that the copy source got copied to the target. In other words, ensures that the two vectors are equal.
"""
self.assertTrue((source == target.astype(source.dtype)).all(),
"Mis-match bewteen these fields: " + source.name + " " + target.name)
def set_value_test(self, target: pd.Series, value: pd.Series):
"""
Ensures that the target values are all set properly
:param target:
:param value:
:return:
"""
self.assertTrue(len(target.unique()) == 1 and target.unique() == value)
def value_map_test(self, source: pd.Series, target: pd.Series, value_dict: dict, otherwise):
"""
Ensures the values are set to what they need to be based on the preset configuration in the value map
:param source:
:param target:
:param value_dict
:param otherwise
:return:
"""
for s, t in zip(source, target):
if s in value_dict:
self.assertTrue(str(t) == str(value_dict[s]), str(t) + " != " + str(value_dict[s]))
else:
self.assertTrue(str(t) == str(otherwise))
def change_case_test(self, source: pd.Series, target: pd.Series, manipulation: str):
"""
Ensures the row correctly was changed
:param source:
:param target:
:param manipulation: str
:return:
"""
if manipulation == "Uppercase":
self.assertTrue((source.str.upper() == target).all())
elif manipulation == "Lowercase":
self.assertTrue((source.str.lower() == target).all())
elif manipulation == "Capitalize":
self.assertTrue((source.str.capitalize() == target).all())
elif manipulation == "Title":
self.assertTrue((source.str.title() == target).all())
else:
self.assertIn(manipulation, ["Uppercase", "Lowercase", "Capitalize", "Title"])
def concatenate_test(self, target: pd.Series, seperator: str,
cfields: list):
"""
Ensures the row concatenates the correct field values
:param target:
:param seperator:
:param cfields:
:return:
"""
source_table = self.build_data_frame(self.sourceDataPath, tuple([field.name for field in self.sourceFields]))
if seperator == "(space)":
seperator = " "
compare_column = source_table[cfields.pop(0)]
for cfield in cfields:
right = source_table[cfield].replace("NaN", "").astype(str)
compare_column = compare_column.astype(str).str.cat(right, sep=seperator)
self.assertTrue((target == compare_column).all())
def left_test(self, source: pd.Series, target: pd.Series, number: int):
"""
Ensures the correct number of charcters from the left were mapped
:param source:
:param target
:param number: int
:return:
"""
self.assertTrue((source.astype(str).apply(lambda f: f[:number]) == target.astype(str)).all())
def right_test(self, source: pd.Series, target: pd.Series, number: int):
"""
Ensures the correct number of characters from the right were mapped
:param source:
:param target:
:param number:
:return:
"""
self.assertTrue((source.astype(str).apply(lambda f: f[:-number]) == target.astype(str)).all())
def substring_test(self, source: pd.Series, target: pd.Series, start: int, length: int):
"""
Ensures the correct substring was pulled from each row
:param source:
:param target:
:param start:
:param length:
:return:
"""
self.assertTrue((source.astype(str).apply(lambda f: f[start:length + start]) == target.astype(str)).all())
def split_test(self, source: pd.Series, target: pd.Series, split_point: str, part: int):
"""
Ensures the correct split was made and the resulting data is correct
:param source:
:param target:
:param split_point:
:param part:
:return:
"""
for sfield, tfield in zip(source, target):
self.assertTrue(sfield.split(split_point)[part] == tfield)
def conditional_value_test(self, source: pd.Series, target: pd.Series, oper: str, if_value,
then_value, else_value):
"""
Ensures that the conditional value evaluates correctly in each row of the column
:param source:
:param target:
:param oper:
:param if_value:
:param then_value:
:param else_value:
:return:
"""
for sfield, tfield in zip(source, target):
if oper == "==":
if sfield == if_value:
self.assertEqual(then_value, tfield)
else:
self.assertEqual(else_value, tfield)
elif oper == "!'":
if sfield != if_value:
self.assertEqual(then_value, tfield)
else:
self.assertEqual(else_value, tfield)
elif oper == "<":
if sfield < if_value:
self.assertEqual(then_value, tfield)
else:
self.assertEqual(else_value, tfield)
elif oper == ">":
if sfield > if_value:
self.assertEqual(then_value, tfield)
else:
self.assertEqual(else_value, tfield)
else:
self.assertIn(oper, ["==", "!=", "<", ">"])
def domain_map_test(self, source: pd.Series, target: pd.Series, mappings: dict):
"""
Ensures the domain map pairings are correctly mapped in the target column
:param self:
:param source:
:param target:
:param mappings:
:return:
"""
for s, t in zip(source, target):
if s in mappings:
if mappings[s] == "(None)":
# In the event that a is loaded in the xml but not mapped to any target domain, we want to
# make sure that the source and target values are the same
self.assertEqual(s, t)
self.assertEqual(mappings[s], t)
def test_xml(self):
"""
Tests to see that the newly created xml file is equal to a pre-determined correct file
:return:
"""
if self.testObject.title != "CreateConfig":
return
out_xml = ET.parse(self.outXML).getroot()
correct_xml = ET.parse(self.correctXML).getroot()
self.assertTrue(xml_compare(out_xml, correct_xml))
def destage(self):
"""
After staging is done, the xml reflects there should be a feature class that append can use to append to source.
This function deletes this line in the xml so the xml can be used again or so append can recreate the mapping.
:return:
"""
xml = ET.parse(self.xmlLocation)
root = xml.getroot()
datasets = root.getchildren()[0]
staged = datasets.getchildren()[len(datasets.getchildren()) - 1]
if staged.tag == "Staged":
datasets.remove(staged)
xml.write(self.xmlLocation)
def main(self):
"""
Runs all of the tests
:return:
"""
if self.testObject.title == "CreateConfig":
self.test_create()
self.test_xml()
return
else:
self.test_create()
self.test_length()
self.test_fields()
if self.testObject.title == 'Replace':
self.test_replace_data()
else:
self.test_data()
class SourceTargetParser(object):
"""
Class designed to store the essential parts of the xml file in readable python data structrues
"""
def __init__(self, xml_file: str):
self.xmlLocation = xml_file
self.xml = ET.parse(self.xmlLocation).getroot()
self.targetFields = []
self.methods = _XMLMethodNames # not actually the methods in this file, just the naming syntax for the xml
self.data = dict()
@functools.lru_cache()
def get_sourcefields(self):
"""
Returns and caches the source names as specified in the xml. Some might be None if there is no mapping to the
corresponding target field.
:return:
"""
sourcefields = []
fields = self.xml.find('Fields').getchildren()
for field in fields:
sourceName = field.find('SourceName').text
sourcefields.append(sourceName)
return sourcefields
def get_data(self):
"""
Returns the xml data
:return: dict
"""
return self.data
@functools.lru_cache()
def get_targetfields(self):
"""
Returns and caches the target field names as specified in the xml.
:return:
"""
targetfields = []
fields = self.xml.find('Fields').getchildren()
for field in fields:
targetName = field.find('TargetName').text
targetfields.append(targetName)
return targetfields
@functools.lru_cache()
def get_pairings(self) -> dict:
"""
Returns a dictionary where key is TargetName and value is SourceName for each field
:return: dict
"""
pairings = dict()
fields = self.xml.find('Fields').getchildren()
for field in fields:
sourcename = field.find('SourceName').text
targetname = field.find('TargetName').text
pairings[targetname] = sourcename
return pairings
@functools.lru_cache()
def get_methods(self) -> dict:
"""
Returns and caches the methods in order of appearence in the xml file.
:return:
"""
method_dict = dict()
fields = self.xml.find('Fields').getchildren()
for field in fields:
targetname = field.find('TargetName').text
method = field.find('Method').text
method_dict[targetname] = method
return method_dict
@functools.lru_cache()
def parse_replace(self) -> dict:
"""
Returns a dictionary with the information used by Replace By Field Value
:return: dict
"""
datasets = self.xml.find('Datasets')
replace_by = datasets.find('ReplaceBy')
if len(replace_by.getchildren()) == 0:
raise (AssertionError("ReplaceBy is empty in the XML"))
outdict = dict()
outdict["FieldName"] = replace_by.find('FieldName').text
outdict['Operator'] = replace_by.find('Operator').text
outdict['Value'] = replace_by.find('Value').text
return outdict
def parse(self):
"""
Interprets the xml file and stores the information in appropriate places
:return:
"""
data = dict()
fields = self.xml.find('Fields').getchildren()
for field in fields:
target_name = field.find('TargetName').text
method = field.find('Method').text # added for visibility
if method == self.methods["Set Value"]:
data[target_name] = dict()
data[target_name][self.methods["Set Value"]] = field.find(self.methods["Set Value"]).text
elif method == self.methods["Domain Map"]:
domain_map = field.find(self.methods["Domain Map"]).getchildren()
data[target_name] = dict()
data[target_name][self.methods["Domain Map"]] = dict()
for tag in domain_map:
if tag.tag == "sValue":
svalue = tag.text
if tag.tag == "tValue":
data[target_name][self.methods["Domain Map"]][svalue] = tag.text
svalue = ""
elif method == self.methods["Value Map"]:
value_map = field.find(self.methods["Value Map"]).getchildren()
data[target_name] = dict()
data[target_name][self.methods["Value Map"]] = dict()
for tag in value_map:
if tag.tag == "sValue":
svalue = tag.text
elif tag.tag == "tValue":
data[target_name][self.methods["Value Map"]][svalue] = tag.text
svalue = ""
elif tag.tag == "Otherwise":
data[target_name]["Otherwise"] = tag.text
elif method == self.methods["Change Case"]:
data[target_name] = dict()
data[target_name][self.methods["Change Case"]] = field.find(self.methods["Change Case"]).text
elif method == self.methods["Concatenate"]:
data[target_name] = dict()
data[target_name][self.methods["Concatenate"]] = list()
data[target_name]["Separator"] = field.find("Separator").text
cfields = field.find("cFields").getchildren()
for cfield in cfields:
data[target_name][self.methods["Concatenate"]].append(cfield.find('Name').text)
elif method == self.methods["Left"]:
data[target_name] = dict()
data[target_name][self.methods["Left"]] = int(field.find(self.methods["Left"]).text)
elif method == self.methods["Right"]:
data[target_name] = dict()
data[target_name][self.methods["Right"]] = int(field.find(self.methods["Right"]).text)
elif method == self.methods["Substring"]:
data[target_name] = dict()
data[target_name]["Start"] = int(field.find('Start').text)
data[target_name]["Length"] = int(field.find('Length').text)
elif method == self.methods["Split"]:
data[target_name] = dict()
data[target_name]["SplitAt"] = field.find("SplitAt").text
data[target_name]["Part"] = int(field.find("Part").text)
elif method == self.methods["Conditional Value"]:
data[target_name] = dict()
data[target_name]["Oper"] = field.find("Oper").text.strip("\'").strip("\"")
data[target_name]["If"] = field.find("If").text.strip("\'").strip("\"")
data[target_name]["Then"] = field.find("Then").text.strip("\'").strip("\"")
data[target_name]["Else"] = field.find("Else").text.strip("\'").strip("\"")
else:
assert method in self.methods.values()
return data
def make_temp_file() -> tempfile.TemporaryDirectory:
"""
Returns a temporary directory that is used to store the local data for the tests
:return:
"""
localfolder = str(pathlib.Path(".\localData").absolute())
return tempfile.TemporaryDirectory(dir=localfolder)
def change_workspace(lw: list, tmp_name: str) -> list:
"""
Changes the data paths to reflect the new temporary file made
:param lw: list
:param tmp_name: str
:return:
"""
out_workspace = lw.copy()
for workspace in out_workspace:
the_path = ""
for part in pathlib.Path(workspace["Source"]).parts:
the_path = os.path.join(the_path, part)
if part == 'localData':
the_path = os.path.join(the_path, tmp_name)
workspace["Source"] = the_path
the_path = ""
for part in pathlib.Path(workspace["Target"]).parts:
the_path = os.path.join(the_path, part)
if part == 'localData':
the_path = os.path.join(the_path, tmp_name)
workspace["Target"] = the_path
return out_workspace
def set_up_data(tmpdir: str):
"""
Unzips all data into local directory
:param tmpdir:
:return:
"""
workspace = str(pathlib.Path(".\localData").absolute())
for file in os.listdir(workspace):
if ".zip" in file:
with zipfile.ZipFile(os.path.join(workspace, file)) as unzipper:
unzipper.extractall(tmpdir)
def change_xml_path(t_workspace: list):
"""
Changes the source and target path in the xml files for testing
:param t_workspace:
:return:
"""
for workspace in t_workspace:
xml = ET.parse(workspace["xmlLocation"])
root = xml.getroot()
datasets = root.find('Datasets').getchildren()
for field in datasets:
if field.tag == "Source":
field.text = os.path.join(workspace["Source"], workspace["SourceName"])
if field.tag == "Target":
field.text = os.path.join(workspace["Target"], workspace["TargetName"])
xml.write(workspace["xmlLocation"])
if __name__ == '__main__':
tmp = make_temp_file()
temp_workspace = change_workspace(_localWorkspace, pathlib.Path(tmp.name).stem)
set_up_data(tmp.name)
change_xml_path(temp_workspace)
try:
for local_workspace in temp_workspace:
UnitTests(Preview(local_workspace)).main()
except:
traceback.print_exc()
sys.exit(-1)
finally:
try:
tmp.cleanup()
except PermissionError:
print("Unable to delete temporary folder: Permission Error")
pass
| apache-2.0 |
funbaker/astropy | astropy/visualization/scripts/tests/test_fits2bitmap.py | 2 | 2337 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from ....io import fits
try:
import matplotlib # pylint: disable=W0611
import matplotlib.image as mpimg
HAS_MATPLOTLIB = True
from ..fits2bitmap import fits2bitmap, main
except ImportError:
HAS_MATPLOTLIB = False
@pytest.mark.skipif('not HAS_MATPLOTLIB')
class TestFits2Bitmap:
def setup_class(self):
self.filename = 'test.fits'
def test_function(self, tmpdir):
filename = tmpdir.join(self.filename).strpath
fits.writeto(filename, np.ones((128, 128)))
fits2bitmap(filename)
def test_script(self, tmpdir):
filename = tmpdir.join(self.filename).strpath
fits.writeto(filename, np.ones((128, 128)))
main([filename, '-e', '0'])
def test_exten_num(self, tmpdir):
filename = tmpdir.join(self.filename).strpath
data = np.ones((100, 100))
hdu1 = fits.PrimaryHDU()
hdu2 = fits.ImageHDU(data)
hdulist = fits.HDUList([hdu1, hdu2])
hdulist.writeto(filename)
main([filename, '-e', '1'])
def test_exten_name(self, tmpdir):
filename = tmpdir.join(self.filename).strpath
data = np.ones((100, 100))
hdu1 = fits.PrimaryHDU()
extname = 'SCI'
hdu2 = fits.ImageHDU(data)
hdu2.header['EXTNAME'] = extname
hdulist = fits.HDUList([hdu1, hdu2])
hdulist.writeto(filename)
main([filename, '-e', extname])
@pytest.mark.parametrize('file_exten', ['.gz', '.bz2'])
def test_compressed_fits(self, tmpdir, file_exten):
filename = tmpdir.join('test.fits' + file_exten).strpath
fits.writeto(filename, np.ones((128, 128)))
main([filename, '-e', '0'])
def test_orientation(self, tmpdir):
"""
Regression test to check the image vertical orientation/origin.
"""
filename = tmpdir.join(self.filename).strpath
out_filename = 'fits2bitmap_test.png'
out_filename = tmpdir.join(out_filename).strpath
data = np.zeros((32, 32))
data[0:16, :] = 1.
fits.writeto(filename, data)
main([filename, '-e', '0', '-o', out_filename])
img = mpimg.imread(out_filename)
assert img[0, 0, 0] == 0
assert img[31, 31, 0] == 1
| bsd-3-clause |
deepesch/scikit-learn | benchmarks/bench_plot_neighbors.py | 287 | 6433 | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import pylab as pl
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
pl.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = pl.subplot(sbplt, yscale='log')
pl.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = pl.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = pl.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
pl.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
pl.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
pl.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
pl.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
pl.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
pl.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
pl.show()
| bsd-3-clause |
deepgram/kur | examples/language-model/view_logs.py | 1 | 1159 | """
Copyright 2016 Deepgram
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from kur.loggers import BinaryLogger
import sys
log_dir_name = sys.argv[1]
training_loss = BinaryLogger.load_column(log_dir_name, 'training_loss_total')
validation_loss = BinaryLogger.load_column(log_dir_name, 'validation_loss_total')
plt.xlabel('Epoch')
plt.ylabel('Loss')
epoch = list(range(1, 1+len(training_loss)))
t_line, = plt.plot(epoch, training_loss, 'co-', label='Training Loss')
v_line, = plt.plot(epoch, validation_loss, 'mo-', label='Validation Loss')
plt.legend(handles=[t_line, v_line])
plt.savefig('loss.pdf')
| apache-2.0 |
pnedunuri/scikit-learn | sklearn/cluster/tests/test_affinity_propagation.py | 341 | 2620 | """
Testing for Clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.cluster.affinity_propagation_ import AffinityPropagation
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
# Affinity Propagation algorithm
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
# Test input validation
assert_raises(ValueError, affinity_propagation, S[:, :-1])
assert_raises(ValueError, affinity_propagation, S, damping=0)
af = AffinityPropagation(affinity="unknown")
assert_raises(ValueError, af.fit, X)
def test_affinity_propagation_predict():
# Test AffinityPropagation.predict
af = AffinityPropagation(affinity="euclidean")
labels = af.fit_predict(X)
labels2 = af.predict(X)
assert_array_equal(labels, labels2)
def test_affinity_propagation_predict_error():
# Test exception in AffinityPropagation.predict
# Not fitted.
af = AffinityPropagation(affinity="euclidean")
assert_raises(ValueError, af.predict, X)
# Predict not supported when affinity="precomputed".
S = np.dot(X, X.T)
af = AffinityPropagation(affinity="precomputed")
af.fit(S)
assert_raises(ValueError, af.predict, X)
| bsd-3-clause |
shahankhatch/scikit-learn | examples/ensemble/plot_adaboost_twoclass.py | 347 | 3268 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
KimCaleb421/Thermostat | plotting.py | 1 | 2801 | # plotting.py
# Tim Kerins 21-04-14
# A very simple datalogger script that parses data from the serial port and
# saves it as <time_stamp>.csv
#! /usr/bin/env python
import sys, serial,time, datetime
from matplotlib import pyplot as plt
# set the serial port the Arduino is connected to
serPort = '/dev/ttyACM0'
# open the file for writing
filename = time.strftime("%m-%d_%H-%M-%S")
dataFile = open("./Data/%s.csv" % filename, "w");
print "\n********************************************************"
print "\nLog Data"
print "\nAttempting to open Serial Port : ",serPort,"for logging\n"
# opens usb serial port for loggig
ser = serial.Serial(serPort,9600,timeout=1)
# checks the port is open
if (ser.isOpen() == False):
print "ERROR : Unable to open serial port ",serPort,"\n"
exit(0);
else:
print "Port ",serPort," opened\n"
# force print to console
sys.stdout.flush()
# waits for signal from user to start logging
print "Hit return to start logging ..."
key = sys.stdin.readline()
start = time.time()
dataFile.write("Time, Temperature, Baby set T, Crit T, Bang-Bang Counter, Heater, Comments\n")
# sends signal to start logging
ser.write('1');
print "Logging Started. CTRL-C to stop\n\n"
print "Time, Temperature, Baby set T, Crit T, Bang-Bang Counter, Heater, Comments\n"
#def trunc(f,n):
# slen = len('%.*f' % (n,f))
# return str(f)[:slen]
while True:
try:
# read data from serial writes to stdio and dataFile
line = ser.readline()
#elapsed = time.time() - start
#print str("{:.2f}".format(elapsed)) + ',',
#print str(','),
# print str(datetime.timedelta(seconds=elapsed)), #trunc(elapsed,2))),
print line;
dataFile.write(line)
except KeyboardInterrupt: #CTRL-C triggered here
# sends signal to stop logging
ser.write('0')
print "Logging Stopped\n"
break;
# close the serial port
ser.flush()
ser.close()
# close the datafile
dataFile.close()
print "Port ",serPort," closed\n"
print "\n********************************************************\n"
#print "\nPlot graph (y/n) ?"
#key = sys.stdin.read(1)
#if key=='y':
# print "\nPlotting graph ...\n"
# now plot the graph
# read all of the data from the textfile
# f = open("loggedData.dat",'r')
# lines = f.readlines()
# f.close()
# intialize variables to be lists
# x = []
# y = []
# lines and add to lists
# for line in lines:
# p=line.split(',')
# x.append(float(p[0]))
# y.append(float(p[1]))
# fig=plt.figure()
# graph=fig.add_subplot(111)
# graph.set_title("Data logged from Arduino UNO")
# graph.plot(x,y,'ro')
# plt.show()
# print "Plot complete\n"
#else:
# print "Finishing\n"
# rest for 3 seconds
time.sleep(3)
| gpl-2.0 |
irit-melodi/attelo | attelo/io.py | 3 | 11799 | """
Saving and loading data or models
"""
from __future__ import print_function
from itertools import chain
import codecs
import copy
import csv
import json
import sys
import time
import traceback
from sklearn.datasets import load_svmlight_file
import educe # WIP
from .edu import (EDU, FAKE_ROOT_ID, FAKE_ROOT)
from .table import (DataPack, DataPackException,
UNKNOWN, UNRELATED,
get_label_string, groupings)
from .util import truncate
# pylint: disable=too-few-public-methods
class IoException(Exception):
"""
Exceptions related to reading/writing data
"""
def __init__(self, msg):
super(IoException, self).__init__(msg)
# ---------------------------------------------------------------------
# feedback
# ---------------------------------------------------------------------
# pylint: disable=redefined-builtin, invalid-name
class Torpor(object):
"""
Announce that we're about to do something, then do it,
then say we're done.
Usage: ::
with Torpor("doing a slow thing"):
some_slow_thing
Output (1): ::
doing a slow thing...
Output (2a): ::
doing a slow thing... done
Output (2b): ::
doing a slow thing... ERROR
<stack trace>
:param quiet: True to skip the message altogether
"""
def __init__(self, msg,
sameline=True,
quiet=False,
file=sys.stderr):
self._msg = msg
self._file = file
self._sameline = sameline
self._quiet = quiet
self._start = 0
self._end = 0
def __enter__(self):
# we grab the wall time instead of using time.clock() (A)
# because we # are not using this for profiling but just to
# get a rough idea what's going on, and (B) because we want
# to include things like IO into the mix
self._start = time.time()
if self._quiet:
return
elif self._sameline:
print(self._msg, end="... ", file=self._file)
else:
print("[start]", self._msg, file=self._file)
def __exit__(self, type, value, tb):
self._end = time.time()
if tb is None:
if not self._quiet:
done = "done" if self._sameline else "[-end-] " + self._msg
ms_elapsed = 1000 * (self._end - self._start)
final_msg = u"{} [{:.0f} ms]".format(done, ms_elapsed)
print(final_msg, file=self._file)
else:
if not self._quiet:
oops = "ERROR!" if self._sameline else "ERROR! " + self._msg
print(oops, file=self._file)
traceback.print_exception(type, value, tb)
sys.exit(1)
# pylint: redefined-builtin, invalid-name
# ---------------------------------------------------------------------
# tables
# ---------------------------------------------------------------------
def load_edus(edu_file):
"""
Read EDUs (see :doc:`../input`)
:rtype: [EDU]
.. _format: https://github.com/kowey/attelo/doc/inputs.rst
"""
def read_edu(row):
'interpret a single row'
expected_len = 6
if len(row) != expected_len:
oops = ('This row in the EDU file {efile} has {num} '
'elements instead of the expected {expected}: '
'{row}')
raise IoException(oops.format(efile=edu_file,
num=len(row),
expected=expected_len,
row=row))
[global_id, txt, grouping, subgrouping, start_str, end_str] = row
start = int(start_str)
end = int(end_str)
return EDU(global_id,
txt.decode('utf-8'),
start,
end,
grouping,
subgrouping)
with open(edu_file, 'rb') as instream:
reader = csv.reader(instream, dialect=csv.excel_tab)
return [read_edu(r) for r in reader if r]
def load_pairings(edu_file):
"""
Read and return EDU pairings (see :doc:`../input`).
We assume the order is parent, child
:rtype: [(string, string)]
.. _format: https://github.com/kowey/attelo/doc/inputs.rst
"""
def read_pair(row):
'interpret a single row'
if len(row) < 2 or len(row) > 3:
oops = ('This row in the pairings file {efile} has '
'{num} elements instead of the expected 2 or 3')
raise IoException(oops.format(efile=edu_file,
num=len(row),
row=row))
return tuple(row[:2])
with open(edu_file, 'rb') as instream:
reader = csv.reader(instream, dialect=csv.excel_tab)
return [read_pair(r) for r in reader if r]
def load_labels(feature_file):
"""
Read the very top of a feature file and read the labels comment,
return the sequence of labels, else return None
:rtype: [string] or None
"""
with codecs.open(feature_file, 'r', 'utf-8') as stream:
line = stream.readline()
if line.startswith('#'):
seq = line[1:].split()
if seq[0] == 'labels:':
return seq[1:]
# fall-through case, no labels found
return None
def _process_edu_links(edus, pairings):
"""
Convert from the results of :py:method:load_edus: and
:py:method:load_pairings: to a sequence of edus and pairings
respectively
:rtype: ([EDU], [(EDU,EDU)])
"""
edumap = {e.id: e for e in edus}
enames = frozenset(chain.from_iterable(pairings))
if FAKE_ROOT_ID in enames:
edus2 = [FAKE_ROOT] + edus
edumap[FAKE_ROOT_ID] = FAKE_ROOT
else:
edus2 = copy.copy(edus)
naughty = [x for x in enames if x not in edumap]
if naughty:
oops = ('The pairings file mentions the following EDUs but the EDU '
'file does not actually include EDUs to go with them: {}')
raise DataPackException(oops.format(truncate(', '.join(naughty),
1000)))
pairings2 = [(edumap[e1], edumap[e2]) for e1, e2 in pairings]
return edus2, pairings2
def load_multipack(edu_file, pairings_file, feature_file, vocab_file,
corpus_path=None, # WIP
verbose=False):
"""Read EDUs and features for edu pairs.
Perform some basic sanity checks, raising
:py:class:`IoException` if they should fail
Parameters
----------
... TODO
corpus_path : string
Path to the labelled corpus, to retrieve the original gold
structures ; at the moment, only works with the RST corpus to
access gold RST constituency trees.
Returns
-------
mpack: Multipack
Multipack (= dict) from grouping to DataPack.
"""
vocab = load_vocab(vocab_file)
with Torpor("Reading edus and pairings", quiet=not verbose):
edus, pairings = _process_edu_links(load_edus(edu_file),
load_pairings(pairings_file))
with Torpor("Reading features", quiet=not verbose):
labels = [UNKNOWN] + load_labels(feature_file)
# pylint: disable=unbalanced-tuple-unpacking
data, targets = load_svmlight_file(feature_file,
n_features=len(vocab))
# pylint: enable=unbalanced-tuple-unpacking
# WIP augment DataPack with the gold structure for each grouping
if corpus_path is None:
ctargets = {}
else:
corpus_reader = educe.rst_dt.corpus.Reader(corpus_path)
# FIXME should be [v] so that it is adapted to forests (lists)
# of structures, e.g. produced by for_intra()
ctargets = {k.doc: v for k, v in corpus_reader.slurp().items()}
# TODO modify educe.rst_dt.corpus.Reader.slurp_subcorpus() to
# convert fine-grained to coarse-grained relations by default,
# e.g. add kwarg coarse_rels=True, then find all current callers
# but this one and call slurp* with coarse_rels=False
# end WIP
with Torpor("Build data packs", quiet=not verbose):
dpack = DataPack.load(edus, pairings, data, targets, ctargets,
labels, vocab)
mpack = {grp_name: dpack.selected(idxs)
for grp_name, idxs in groupings(pairings).items()}
return mpack
def load_vocab(filename):
"""Read feature vocabulary"""
features = []
with codecs.open(filename, 'r', 'utf-8') as stream:
for line in stream:
features.append(line.split('\t')[0])
return features
# ---------------------------------------------------------------------
# predictions
# ---------------------------------------------------------------------
def write_predictions_output(dpack, predicted, filename):
"""
Write predictions to an output file whose format
is documented in :doc:`../output`
"""
links = {}
for edu1, edu2, label in predicted:
links[(edu1, edu2)] = label
def mk_row(edu1, edu2):
'return a list of columns'
edu1_id = edu1.id
edu2_id = edu2.id
row = [edu1_id,
edu2_id,
links.get((edu1_id, edu2_id), UNRELATED)]
return [x.encode('utf-8') for x in row]
with open(filename, 'wb') as fout:
writer = csv.writer(fout, dialect=csv.excel_tab)
# by convention the zeroth edu is the root node
for edu1, edu2 in dpack.pairings:
writer.writerow(mk_row(edu1, edu2))
def load_predictions(edu_file):
"""
Read back predictions (see :doc:`../output`), returning a list
of triples: parent id, child id, relation label (or 'UNRELATED')
:rtype: [(string, string, string)]
"""
def mk_pair(row):
'interpret a single row'
expected_len = 3
if len(row) < expected_len:
oops = ('This row in the predictions file {efile} has {num} '
'elements instead of the expected {expected}: '
'{row}')
raise IoException(oops.format(efile=edu_file,
num=len(row),
expected=expected_len,
row=row))
return tuple(x.decode('utf-8') for x in row)
with open(edu_file, 'rb') as instream:
reader = csv.reader(instream, dialect=csv.excel_tab)
return [mk_pair(r) for r in reader if r]
def load_gold_predictions(pairings_file, feature_file, verbose=False):
"""
Load a pairings and feature file as though it were a set of
predictions
:rtype: [(string, string, string)]
"""
pairings = load_pairings(pairings_file)
with Torpor("Reading features", quiet=not verbose):
labels = load_labels(feature_file)
# pylint: disable=unbalanced-tuple-unpacking
_, targets = load_svmlight_file(feature_file)
# pylint: enable=unbalanced-tuple-unpacking
return [(x1, x2, get_label_string(labels, t))
for ((x1, x2), t) in zip(pairings, targets)]
# ---------------------------------------------------------------------
# folds
# ---------------------------------------------------------------------
def load_fold_dict(filename):
"""
Load fold dictionary into memory from file
"""
with open(filename, 'r') as stream:
return json.load(stream)
def save_fold_dict(fold_dict, filename):
"""
Dump fold dictionary to a file
"""
with open(filename, 'w') as stream:
json.dump(fold_dict, stream, indent=2)
| gpl-3.0 |
modsim/CADET | doc/examples/breakthrough.py | 1 | 5437 | # Import libraries
import numpy as np
import matplotlib.pyplot as plt
from cadet import Cadet
Cadet.cadet_path = '/path/to/cadet-cli'
# Create model object
model = Cadet()
# Number of unit operations
model.root.input.model.nunits = 3
# Inlet
model.root.input.model.unit_000.unit_type = 'INLET'
model.root.input.model.unit_000.ncomp = 1
model.root.input.model.unit_000.inlet_type = 'PIECEWISE_CUBIC_POLY'
# General Rate Model
model.root.input.model.unit_001.unit_type = 'GENERAL_RATE_MODEL'
model.root.input.model.unit_001.ncomp = 1
## Geometry
model.root.input.model.unit_001.col_length = 0.1 # m
model.root.input.model.unit_001.cross_section_area = 0.01 # m
model.root.input.model.unit_001.col_porosity = 0.37 # -
model.root.input.model.unit_001.par_porosity = 0.33 # -
model.root.input.model.unit_001.par_radius = 1e-6 # m
## Transport
model.root.input.model.unit_001.col_dispersion = 1e-8 # m^2 / s (interstitial volume)
model.root.input.model.unit_001.film_diffusion = [1e-5] # m / s
model.root.input.model.unit_001.par_diffusion = [1e-10,] # m^2 / s (mobile phase)
model.root.input.model.unit_001.par_surfdiffusion = [0.0,] # m^2 / s (solid phase)
## Adsorption
model.root.input.model.unit_001.adsorption_model = 'MULTI_COMPONENT_LANGMUIR'
model.root.input.model.unit_001.adsorption.is_kinetic = True # Kinetic binding
model.root.input.model.unit_001.adsorption.mcl_ka = [1.0,] # m^3 / (mol * s) (mobile phase)
model.root.input.model.unit_001.adsorption.mcl_kd = [1.0,] # 1 / s (desorption)
model.root.input.model.unit_001.adsorption.mcl_qmax = [100.0,] # mol / m^3 (solid phase)
## Initial conditions
model.root.input.model.unit_001.init_c = [0.0,]
model.root.input.model.unit_001.init_q = [0.0,]
## Discretization
### Grid cells
model.root.input.model.unit_001.discretization.ncol = 20
model.root.input.model.unit_001.discretization.npar = 5
### Bound states
model.root.input.model.unit_001.discretization.nbound = [1]
### Other options
model.root.input.model.unit_001.discretization.par_disc_type = 'EQUIDISTANT_PAR'
model.root.input.model.unit_001.discretization.use_analytic_jacobian = 1
model.root.input.model.unit_001.discretization.reconstruction = 'WENO'
model.root.input.model.unit_001.discretization.gs_type = 1
model.root.input.model.unit_001.discretization.max_krylov = 0
model.root.input.model.unit_001.discretization.max_restarts = 10
model.root.input.model.unit_001.discretization.schur_safety = 1.0e-8
model.root.input.model.unit_001.discretization.weno.boundary_model = 0
model.root.input.model.unit_001.discretization.weno.weno_eps = 1e-10
model.root.input.model.unit_001.discretization.weno.weno_order = 3
## Outlet
model.root.input.model.unit_002.unit_type = 'OUTLET'
model.root.input.model.unit_002.ncomp = 1
# Sections
model.root.input.solver.sections.nsec = 1
model.root.input.solver.sections.section_times = [0.0, 1200,] # s
model.root.input.solver.sections.section_continuity = []
# Inlet sections
model.root.input.model.unit_000.sec_000.const_coeff = [1.0e-3,] # mol / m^3
model.root.input.model.unit_000.sec_000.lin_coeff = [0.0,]
model.root.input.model.unit_000.sec_000.quad_coeff = [0.0,]
model.root.input.model.unit_000.sec_000.cube_coeff = [0.0,]
# Switches
model.root.input.model.connections.nswitches = 1
model.root.input.model.connections.switch_000.section = 0
model.root.input.model.connections.switch_000.connections = [
0, 1, -1, -1, 60/1e6, # [unit_000, unit_001, all components, all components, Q/ m^3*s^-1
1, 2, -1, -1, 60/1e6] # [unit_001, unit_002, all components, all components, Q/ m^3*s^-1
# Solver settings
model.root.input.model.solver.gs_type = 1
model.root.input.model.solver.max_krylov = 0
model.root.input.model.solver.max_restarts = 10
model.root.input.model.solver.schur_safety = 1e-8
# Number of cores for parallel simulation
model.root.input.solver.nthreads = 1
# Tolerances for the time integrator
model.root.input.solver.time_integrator.abstol = 1e-6
model.root.input.solver.time_integrator.algtol = 1e-10
model.root.input.solver.time_integrator.reltol = 1e-6
model.root.input.solver.time_integrator.init_step_size = 1e-6
model.root.input.solver.time_integrator.max_steps = 1000000
# Return data
model.root.input['return'].split_components_data = 0
model.root.input['return'].split_ports_data = 0
model.root.input['return'].unit_000.write_solution_bulk = 1
model.root.input['return'].unit_000.write_solution_inlet = 1
model.root.input['return'].unit_000.write_solution_outlet = 1
# Copy settings to the other unit operations
model.root.input['return'].unit_001 = model.root.input['return'].unit_000
model.root.input['return'].unit_002 = model.root.input['return'].unit_000
# Solution times
model.root.input.solver.user_solution_times = np.linspace(0, 1200, 1001)
# Save and run simulation
model.filename = 'model.h5'
model.save()
data = model.run()
if data.returncode == 0:
print("Simulation completed successfully")
model.load()
else:
print(data)
raise Exception("Simulation failed")
# Plot restuls
plt.figure()
time = model.root.output.solution.solution_times
c = model.root.output.solution.unit_001.solution_outlet
plt.plot(time/60, c)
plt.xlabel('$time~/~min$')
plt.ylabel('$Outlet~concentration~/~mol \cdot m^{-3} $')
plt.show()
| gpl-3.0 |
thegooglecodearchive/mpmath | mpmath/visualization.py | 6 | 9486 | """
Plotting (requires matplotlib)
"""
from colorsys import hsv_to_rgb, hls_to_rgb
from .libmp import NoConvergence
from .libmp.backend import xrange
class VisualizationMethods(object):
plot_ignore = (ValueError, ArithmeticError, ZeroDivisionError, NoConvergence)
def plot(ctx, f, xlim=[-5,5], ylim=None, points=200, file=None, dpi=None,
singularities=[], axes=None):
r"""
Shows a simple 2D plot of a function `f(x)` or list of functions
`[f_0(x), f_1(x), \ldots, f_n(x)]` over a given interval
specified by *xlim*. Some examples::
plot(lambda x: exp(x)*li(x), [1, 4])
plot([cos, sin], [-4, 4])
plot([fresnels, fresnelc], [-4, 4])
plot([sqrt, cbrt], [-4, 4])
plot(lambda t: zeta(0.5+t*j), [-20, 20])
plot([floor, ceil, abs, sign], [-5, 5])
Points where the function raises a numerical exception or
returns an infinite value are removed from the graph.
Singularities can also be excluded explicitly
as follows (useful for removing erroneous vertical lines)::
plot(cot, ylim=[-5, 5]) # bad
plot(cot, ylim=[-5, 5], singularities=[-pi, 0, pi]) # good
For parts where the function assumes complex values, the
real part is plotted with dashes and the imaginary part
is plotted with dots.
.. note :: This function requires matplotlib (pylab).
"""
if file:
axes = None
fig = None
if not axes:
import pylab
fig = pylab.figure()
axes = fig.add_subplot(111)
if not isinstance(f, (tuple, list)):
f = [f]
a, b = xlim
colors = ['b', 'r', 'g', 'm', 'k']
for n, func in enumerate(f):
x = ctx.arange(a, b, (b-a)/float(points))
segments = []
segment = []
in_complex = False
for i in xrange(len(x)):
try:
if i != 0:
for sing in singularities:
if x[i-1] <= sing and x[i] >= sing:
raise ValueError
v = func(x[i])
if ctx.isnan(v) or abs(v) > 1e300:
raise ValueError
if hasattr(v, "imag") and v.imag:
re = float(v.real)
im = float(v.imag)
if not in_complex:
in_complex = True
segments.append(segment)
segment = []
segment.append((float(x[i]), re, im))
else:
if in_complex:
in_complex = False
segments.append(segment)
segment = []
if hasattr(v, "real"):
v = v.real
segment.append((float(x[i]), v))
except ctx.plot_ignore:
if segment:
segments.append(segment)
segment = []
if segment:
segments.append(segment)
for segment in segments:
x = [s[0] for s in segment]
y = [s[1] for s in segment]
if not x:
continue
c = colors[n % len(colors)]
if len(segment[0]) == 3:
z = [s[2] for s in segment]
axes.plot(x, y, '--'+c, linewidth=3)
axes.plot(x, z, ':'+c, linewidth=3)
else:
axes.plot(x, y, c, linewidth=3)
axes.set_xlim([float(_) for _ in xlim])
if ylim:
axes.set_ylim([float(_) for _ in ylim])
axes.set_xlabel('x')
axes.set_ylabel('f(x)')
axes.grid(True)
if fig:
if file:
pylab.savefig(file, dpi=dpi)
else:
pylab.show()
def default_color_function(ctx, z):
if ctx.isinf(z):
return (1.0, 1.0, 1.0)
if ctx.isnan(z):
return (0.5, 0.5, 0.5)
pi = 3.1415926535898
a = (float(ctx.arg(z)) + ctx.pi) / (2*ctx.pi)
a = (a + 0.5) % 1.0
b = 1.0 - float(1/(1.0+abs(z)**0.3))
return hls_to_rgb(a, b, 0.8)
def cplot(ctx, f, re=[-5,5], im=[-5,5], points=2000, color=None,
verbose=False, file=None, dpi=None, axes=None):
"""
Plots the given complex-valued function *f* over a rectangular part
of the complex plane specified by the pairs of intervals *re* and *im*.
For example::
cplot(lambda z: z, [-2, 2], [-10, 10])
cplot(exp)
cplot(zeta, [0, 1], [0, 50])
By default, the complex argument (phase) is shown as color (hue) and
the magnitude is show as brightness. You can also supply a
custom color function (*color*). This function should take a
complex number as input and return an RGB 3-tuple containing
floats in the range 0.0-1.0.
To obtain a sharp image, the number of points may need to be
increased to 100,000 or thereabout. Since evaluating the
function that many times is likely to be slow, the 'verbose'
option is useful to display progress.
.. note :: This function requires matplotlib (pylab).
"""
if color is None:
color = ctx.default_color_function
import pylab
if file:
axes = None
fig = None
if not axes:
fig = pylab.figure()
axes = fig.add_subplot(111)
rea, reb = re
ima, imb = im
dre = reb - rea
dim = imb - ima
M = int(ctx.sqrt(points*dre/dim)+1)
N = int(ctx.sqrt(points*dim/dre)+1)
x = pylab.linspace(rea, reb, M)
y = pylab.linspace(ima, imb, N)
# Note: we have to be careful to get the right rotation.
# Test with these plots:
# cplot(lambda z: z if z.real < 0 else 0)
# cplot(lambda z: z if z.imag < 0 else 0)
w = pylab.zeros((N, M, 3))
for n in xrange(N):
for m in xrange(M):
z = ctx.mpc(x[m], y[n])
try:
v = color(f(z))
except ctx.plot_ignore:
v = (0.5, 0.5, 0.5)
w[n,m] = v
if verbose:
print(n, "of", N)
rea, reb, ima, imb = [float(_) for _ in [rea, reb, ima, imb]]
axes.imshow(w, extent=(rea, reb, ima, imb), origin='lower')
axes.set_xlabel('Re(z)')
axes.set_ylabel('Im(z)')
if fig:
if file:
pylab.savefig(file, dpi=dpi)
else:
pylab.show()
def splot(ctx, f, u=[-5,5], v=[-5,5], points=100, keep_aspect=True, \
wireframe=False, file=None, dpi=None, axes=None):
"""
Plots the surface defined by `f`.
If `f` returns a single component, then this plots the surface
defined by `z = f(x,y)` over the rectangular domain with
`x = u` and `y = v`.
If `f` returns three components, then this plots the parametric
surface `x, y, z = f(u,v)` over the pairs of intervals `u` and `v`.
For example, to plot a simple function::
>>> from mpmath import *
>>> f = lambda x, y: sin(x+y)*cos(y)
>>> splot(f, [-pi,pi], [-pi,pi]) # doctest: +SKIP
Plotting a donut::
>>> r, R = 1, 2.5
>>> f = lambda u, v: [r*cos(u), (R+r*sin(u))*cos(v), (R+r*sin(u))*sin(v)]
>>> splot(f, [0, 2*pi], [0, 2*pi]) # doctest: +SKIP
.. note :: This function requires matplotlib (pylab) 0.98.5.3 or higher.
"""
import pylab
import mpl_toolkits.mplot3d as mplot3d
if file:
axes = None
fig = None
if not axes:
fig = pylab.figure()
axes = mplot3d.axes3d.Axes3D(fig)
ua, ub = u
va, vb = v
du = ub - ua
dv = vb - va
if not isinstance(points, (list, tuple)):
points = [points, points]
M, N = points
u = pylab.linspace(ua, ub, M)
v = pylab.linspace(va, vb, N)
x, y, z = [pylab.zeros((M, N)) for i in xrange(3)]
xab, yab, zab = [[0, 0] for i in xrange(3)]
for n in xrange(N):
for m in xrange(M):
fdata = f(ctx.convert(u[m]), ctx.convert(v[n]))
try:
x[m,n], y[m,n], z[m,n] = fdata
except TypeError:
x[m,n], y[m,n], z[m,n] = u[m], v[n], fdata
for c, cab in [(x[m,n], xab), (y[m,n], yab), (z[m,n], zab)]:
if c < cab[0]:
cab[0] = c
if c > cab[1]:
cab[1] = c
if wireframe:
axes.plot_wireframe(x, y, z, rstride=4, cstride=4)
else:
axes.plot_surface(x, y, z, rstride=4, cstride=4)
axes.set_xlabel('x')
axes.set_ylabel('y')
axes.set_zlabel('z')
if keep_aspect:
dx, dy, dz = [cab[1] - cab[0] for cab in [xab, yab, zab]]
maxd = max(dx, dy, dz)
if dx < maxd:
delta = maxd - dx
axes.set_xlim3d(xab[0] - delta / 2.0, xab[1] + delta / 2.0)
if dy < maxd:
delta = maxd - dy
axes.set_ylim3d(yab[0] - delta / 2.0, yab[1] + delta / 2.0)
if dz < maxd:
delta = maxd - dz
axes.set_zlim3d(zab[0] - delta / 2.0, zab[1] + delta / 2.0)
if fig:
if file:
pylab.savefig(file, dpi=dpi)
else:
pylab.show()
VisualizationMethods.plot = plot
VisualizationMethods.default_color_function = default_color_function
VisualizationMethods.cplot = cplot
VisualizationMethods.splot = splot
| bsd-3-clause |
justincassidy/scikit-learn | examples/missing_values.py | 233 | 3056 | """
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via cross-validation.
Sometimes dropping rows or using marker values is more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
| bsd-3-clause |
drgulevich/microLLG | anim.py | 1 | 2478 | #!/usr/bin/env python
###
### Animation example: http://matplotlib.org/examples/animation/dynamic_image.html
###
from pylab import *
import matplotlib.animation as animation
interpolation='nearest'
#interpolation='gaussian'
#interpolation=None
#cmap=plt.get_cmap('bwr')
#cmap=plt.get_cmap('seismic_r')
cmap=plt.get_cmap('coolwarm_r')
### --- Parameters ---
#fig, ax = subplots(figsize=(6,6))
fig, ax = subplots(figsize=(12,12))
#subplots_adjust(left=0, right=1, bottom=0, top=1)
mx=magdata[0,1:Nx+1,1:Ny+1,0]
my=magdata[0,1:Nx+1,1:Ny+1,1]
mz=magdata[0,1:Nx+1,1:Ny+1,2]
im=ax.imshow(mz.T,interpolation=interpolation, cmap = cmap, origin='lower',vmin=-1,vmax=1,zorder=1)
#im=ax.imshow(mz.T,interpolation=interpolation, cmap = cmap, origin='lower',extent=[1,Nx,1,Ny],vmin=-1,vmax=1,zorder=1)
#im=imshow(magdata[0,:,:,2].T,interpolation=interpolation, cmap = cmap, origin='lower',vmin=-1,vmax=1)
#width=0.0016
#scale=1
#width=0.0012
#scale=0.8
width=0.0015
scale=1.1
#X, Y = meshgrid(np.arange(1,Nx+1),np.arange(1,Ny+1))
#Q = ax.quiver(X, Y, mx.T,my.T,pivot='mid',zorder=2,width=width, scale=scale, scale_units='x')
Q = ax.quiver(mx.T,my.T,pivot='mid',zorder=2,width=width, scale=scale, scale_units='x')
#Q = ax.quiver(X, Y, U, V, pivot='mid', color='r', units='inches')
mt = text(.5, .5, 't=%.2f' % 0., fontsize=15)
#mt = text(1.5, 1.5, 't=%.2f' % 0., fontsize=15)
#time_text = text(.5, .5, '', fontsize=15)
def init():
return updatefig(0)
def updatefig(frame):
data=magdata[frame,1:Nx+1,1:Ny+1,2].T
im.set_array(data)
Q.set_UVC(magdata[frame,1:Nx+1,1:Ny+1,0].T, magdata[frame,1:Nx+1,1:Ny+1,1].T)
mt.set_text('t=%.2f' % (frame*countout*dt))
return im,Q,mt,
def animate_as_gif(frame):
return updatefig(frame)
#export = True
export = False
if(export==True):
anim = animation.FuncAnimation(fig, animate_as_gif, np.arange(0, Nframes), init_func=init, interval=100, blit=True, repeat=False)
anim.save('animation.gif', writer='imagemagick')
else:
#anim = animation.FuncAnimation(fig, updatefig, np.arange(1, Nframes), init_func=init, interval=500, blit=True, repeat=False)
anim = animation.FuncAnimation(fig, updatefig, np.arange(0, Nframes), init_func=init, interval=100, blit=True, repeat=False)
# anim = animation.FuncAnimation(fig, updatefig, np.arange(0, Nframes,100), init_func=init, interval=100, blit=True, repeat=False)
fig.tight_layout()
gca().set_aspect('equal', adjustable='box')
#axis('off')
show()
| gpl-3.0 |
vivekmishra1991/scikit-learn | examples/svm/plot_svm_regression.py | 249 | 1451 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
clemkoa/scikit-learn | sklearn/metrics/cluster/supervised.py | 13 | 31406 | """Utilities to evaluate the clustering performance of models.
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Wei LI <kuantkid@gmail.com>
# Diego Molla <dmolla-aliod@gmail.com>
# Arnaud Fouchet <foucheta@gmail.com>
# Thierry Guillemot <thierry.guillemot.work@gmail.com>
# Gregory Stupp <stuppie@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
from __future__ import division
from math import log
import numpy as np
from scipy import sparse as sp
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.validation import check_array
from ...utils.fixes import comb
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays."""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None, sparse=False):
"""Build a contingency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps : None or float, optional.
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
sparse : boolean, optional.
If True, return a sparse CSR continency matrix. If ``eps is not None``,
and ``sparse is True``, will throw ValueError.
.. versionadded:: 0.18
Returns
-------
contingency : {array-like, sparse}, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
Will be a ``scipy.sparse.csr_matrix`` if ``sparse=True``.
"""
if eps is not None and sparse:
raise ValueError("Cannot set 'eps' when sparse=True")
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = sp.coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int)
if sparse:
contingency = contingency.tocsr()
contingency.sum_duplicates()
else:
contingency = contingency.toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance.
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly matching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://link.springer.com/article/10.1007%2FBF01908075
.. [wk] https://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
n_classes = np.unique(labels_true).shape[0]
n_clusters = np.unique(labels_pred).shape[0]
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (n_classes == n_clusters == 1 or
n_classes == n_clusters == 0 or
n_classes == n_clusters == n_samples):
return 1.0
# Compute the ARI using the contingency data
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
sum_comb_c = sum(comb2(n_c) for n_c in np.ravel(contingency.sum(axis=1)))
sum_comb_k = sum(comb2(n_k) for n_k in np.ravel(contingency.sum(axis=0)))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.data)
prod_comb = (sum_comb_c * sum_comb_k) / comb(n_samples, 2)
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return (sum_comb - prod_comb) / (mean_comb - prod_comb)
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once.
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity : float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure : float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
MI = mutual_info_score(None, None, contingency=contingency)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness /
(homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity : float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings.
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`|U_i|` is the number of the samples
in cluster :math:`U_i` and :math:`|V_j|` is the number of the
samples in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^|U| \sum_{j=1}^|V| \\frac{|U_i\cap V_j|}{N}
\log\\frac{N|U_i \cap V_j|}{|U_i||V_j|}
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency : {None, array, sparse matrix},
shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi : float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
else:
contingency = check_array(contingency,
accept_sparse=['csr', 'csc', 'coo'],
dtype=[int, np.int32, np.int64])
if isinstance(contingency, np.ndarray):
# For an array
nzx, nzy = np.nonzero(contingency)
nz_val = contingency[nzx, nzy]
elif sp.issparse(contingency):
# For a sparse matrix
nzx, nzy, nz_val = sp.find(contingency)
else:
raise ValueError("Unsupported type for 'contingency': %s" %
type(contingency))
contingency_sum = contingency.sum()
pi = np.ravel(contingency.sum(axis=1))
pj = np.ravel(contingency.sum(axis=0))
log_contingency_nm = np.log(nz_val)
contingency_nm = nz_val / contingency_sum
# Don't need to calculate the full outer product, just for non-zeroes
outer = pi.take(nzx) * pj.take(nzy)
log_outer = -np.log(outer) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum)) +
contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings.
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<https://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1 or
classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
contingency = contingency.astype(np.float64)
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings.
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1 or
classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
contingency = contingency.astype(np.float64)
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def fowlkes_mallows_score(labels_true, labels_pred, sparse=False):
"""Measure the similarity of two clusterings of a set of points.
The Fowlkes-Mallows index (FMI) is defined as the geometric mean between of
the precision and recall::
FMI = TP / sqrt((TP + FP) * (TP + FN))
Where ``TP`` is the number of **True Positive** (i.e. the number of pair of
points that belongs in the same clusters in both ``labels_true`` and
``labels_pred``), ``FP`` is the number of **False Positive** (i.e. the
number of pair of points that belongs in the same clusters in
``labels_true`` and not in ``labels_pred``) and ``FN`` is the number of
**False Negative** (i.e the number of pair of points that belongs in the
same clusters in ``labels_pred`` and not in ``labels_True``).
The score ranges from 0 to 1. A high value indicates a good similarity
between two clusters.
Read more in the :ref:`User Guide <fowlkes_mallows_scores>`.
Parameters
----------
labels_true : int array, shape = (``n_samples``,)
A clustering of the data into disjoint subsets.
labels_pred : array, shape = (``n_samples``, )
A clustering of the data into disjoint subsets.
sparse : bool
Compute contingency matrix internally with sparse matrix.
Returns
-------
score : float
The resulting Fowlkes-Mallows score.
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import fowlkes_mallows_score
>>> fowlkes_mallows_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> fowlkes_mallows_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally random, hence the FMI is null::
>>> fowlkes_mallows_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `E. B. Fowkles and C. L. Mallows, 1983. "A method for comparing two
hierarchical clusterings". Journal of the American Statistical
Association
<http://wildfire.stat.ucla.edu/pdflibrary/fowlkes.pdf>`_
.. [2] `Wikipedia entry for the Fowlkes-Mallows Index
<https://en.wikipedia.org/wiki/Fowlkes-Mallows_index>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples, = labels_true.shape
c = contingency_matrix(labels_true, labels_pred, sparse=True)
tk = np.dot(c.data, c.data) - n_samples
pk = np.sum(np.asarray(c.sum(axis=0)).ravel() ** 2) - n_samples
qk = np.sum(np.asarray(c.sum(axis=1)).ravel() ** 2) - n_samples
return tk / np.sqrt(pk * qk) if tk != 0. else 0.
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = np.bincount(label_idx).astype(np.float64)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
leewujung/ooi_sonar | during_incubator/test_nmf_options.py | 1 | 5502 | #!/usr/local/bin/python
'''
Testing NMF initialization and various methods using
scikit learn and NIMFA
'''
import os, sys, glob, re
import datetime
from matplotlib.dates import date2num, num2date
import numpy as np
from calendar import monthrange
import h5py
from sklearn import decomposition
sys.path.insert(0,'/home/wu-jung/code_git/mi-instrument/')
from concat_raw import get_num_days_pings, get_data_from_h5
from echogram_decomp import find_nearest_time_idx,reshape_into_3freq,reshape_into_1freq,\
sep_into_freq,plot_decomp_v,plot_decomp_transform
import matplotlib.pyplot as plt
from modest_image import imshow
from mpl_toolkits.axes_grid1 import make_axes_locatable
# Set default colormap
plt.rcParams['image.cmap'] = 'jet'
# Get info of all files and set path
data_path = '/home/wu-jung/internal_2tb/ooi_sonar/figs/20170310_monthly_smpl'
save_path = '/home/wu-jung/internal_2tb/ooi_sonar/figs/20170310_nmf_options'
fname_form = '*.h5'
fname_all = glob.glob(os.path.join(data_path,fname_form))
n_comp = 5 # number of components for NMF
# Constant params
fmt = '%Y%m%d'
all_hr = range(24) # list of all hour
all_min = range(1,11) # list of all minutes
pings_per_day = len(all_hr)*len(all_min) # number of pings per day
max_missing_ping = 5 # maximum number of missing pings in a day
# Load data
fname = 'CE04OSPS_20150901-20150930_smpl.h5'
f = h5py.File(os.path.join(data_path,fname),'r')
Sv_mtx0 = np.asarray(f['Sv_mtx'])
depth_tick = np.asarray(f['depth_tick'])
depth_label = np.asarray(f['depth_label'])
time_tick = np.asarray(f['time_tick'])
time_label = np.asarray(f['time_label'])
depth_bin_num = np.asarray(f['depth_bin_num'])
pings_per_day = np.asarray(f['pings_per_day'])
f.close()
fname = 'CE04OSPS_20151001-20151031_smpl.h5'
#fname = 'CE04OSPS_20160501-20160601_smpl.h5'
f = h5py.File(os.path.join(data_path,fname),'r')
Sv_mtx1 = np.asarray(f['Sv_mtx'])
f.close()
# Set plotting params
vec_len_each_day = pings_per_day*depth_bin_num # length of vector for 1 day
# Prep data
if Sv_mtx0.shape[2]>Sv_mtx1.shape[2]:
Sv_mtx0 = np.delete(Sv_mtx0,range(Sv_mtx1.shape[2],Sv_mtx0.shape[2]),axis=2)
else:
Sv_mtx1 = np.delete(Sv_mtx1,range(Sv_mtx0.shape[2],Sv_mtx1.shape[2]),axis=2)
Sv_vec0 = reshape_into_3freq(Sv_mtx0,vec_len_each_day)
Sv_vec1 = reshape_into_3freq(Sv_mtx1,vec_len_each_day)
# Run NMF on 1st set
nmf0 = decomposition.NMF(n_components=n_comp)
r_mtx0 = nmf0.fit_transform(Sv_vec0-Sv_vec0.min())
v_comps0 = sep_into_freq(nmf0.components_,pings_per_day,depth_bin_num)
# Run NMF on 2nd set WITHOUT initialization
nmf1 = decomposition.NMF(n_components=n_comp)
r_mtx1 = nmf1.fit_transform(Sv_vec1-Sv_vec1.min())
v_comps1 = sep_into_freq(nmf1.components_,pings_per_day,depth_bin_num)
# Run NMF on 2nd set WITH initialization H & W
nmf2 = decomposition.NMF(n_components=n_comp,init='custom')
r_mtx2 = nmf2.fit_transform(Sv_vec1-Sv_vec1.min(),\
H=np.copy(nmf0.components_),W=np.copy(r_mtx0))
v_comps2 = sep_into_freq(nmf2.components_,pings_per_day,depth_bin_num)
# Assemblge plot_param
plot_params = dict(zip(['depth_tick','depth_label','time_tick','time_label'],\
[depth_tick,depth_label,time_tick,time_label]))
fig_comp = plot_decomp_v(v_comps0,plot_params)
fig_comp.set_figheight(3)
fig_comp.set_figwidth(8)
fig_comp.suptitle('Set 0')
fig_comp = plot_decomp_v(v_comps1,plot_params)
fig_comp.set_figheight(3)
fig_comp.set_figwidth(8)
fig_comp.suptitle('Set 1 no init')
fig_comp = plot_decomp_v(v_comps2,plot_params)
fig_comp.set_figheight(3)
fig_comp.set_figwidth(8)
fig_comp.suptitle('Set 1 with init')
save_fname = '.png'
fig,ax=plt.subplots(5,3)
for comp_num in range(5):
ax[comp_num,0].imshow(v_comps0[comp_num,0,:,:],aspect='auto')
ax[comp_num,1].imshow(v_comps1[comp_num,0,:,:],aspect='auto')
ax[comp_num,2].imshow(v_comps2[comp_num,0,:,:],aspect='auto')
if comp_num==0:
ax[comp_num,0].set_title('Set 1')
ax[comp_num,1].set_title('Set 2 no init')
ax[comp_num,2].set_title('Set 2 with init')
fig.set_figwidth(12)
fig.set_figheight(10)
fig.savefig(os.path.join(save_path,save_fname),dpi=200)
save_fname = 'test1_new2.png'
comp_num = 1;
fig,ax=plt.subplots(1,2)
ax[0].imshow(v_comps0[comp_num,0,:,:],aspect='auto')
ax[0].set_title('Set 1')
ax[1].imshow(v_comps1[comp_num,0,:,:],aspect='auto')
ax[1].set_title('Set 2 no init')
fig.set_figwidth(8)
fig.set_figheight(2)
fig.savefig(os.path.join(save_path,save_fname),dpi=200)
# Try out NIMFA
nmf = nimfa.Nmf(Sv_vec0-Sv_vec0.min(),rank=5)
nmf_fit = nmf()
W = nmf_fit.basis()
H = nmf_fit.coef()
V = np.empty((5,1440,1046))
for Hcomp in range(H.shape[0]):
V[Hcomp,:,:] = H[Hcomp,:].reshape((1440,1046))
snmf = nimfa.Snmf(Sv_vec0-Sv_vec0.min(),rank=5)
snmf_fit = snmf()
W_snmf = snmf_fit.basis()
H_snmf = snmf_fit.coef()
V_snmf = np.empty((5,1440,1046))
for Hcomp in range(H_snmf.shape[0]):
V_snmf[Hcomp,:,:] = H_snmf[Hcomp,:].reshape((1440,1046))
snmf.estimate_rank(rank_range=[2,3,4,5,6,7,8], n_run=10, idx=0, what='all')
fig,ax=plt.subplots(5)
for Hcomp in range(H.shape[0]):
ax[Hcomp].imshow(V[Hcomp,0:480,:].T,aspect='auto')
fig.set_figwidth(6)
fig.set_figheight(6)
fig,ax=plt.subplots(5)
for Hcomp in range(H.shape[0]):
ax[Hcomp].imshow(V_snmf[Hcomp,0:480,:].T,aspect='auto')
fig.set_figwidth(6)
fig.set_figheight(6)
| apache-2.0 |
hrjn/scikit-learn | examples/cluster/plot_kmeans_stability_low_dim_dense.py | 338 | 4324 | """
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
| bsd-3-clause |
junglehaw/junglehaw.github.io | _includes/nqt_simple.py | 1 | 12071 | import tensorflow as tf
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
'''
import pandas as pd
import datetime
def download_price(id):
df=get_price(id,start_date='2005-01-04',end_date=datetime.datetime.now())
print(df)
pd.DataFrame.to_csv(df, id + '.csv')
download_price('000001.XSHG')
download_price('399006.XSHE')
download_price('000905.XSHG')
download_price('000300.XSHG')
download_price('600809.XSHG')
download_price('600519.XSHG')
'''
class seq_batch(object):
def __init__(self, data, time_steps, batch_size, target_index, lag, backward=False):
self.data = data
self.time_steps = time_steps
self.batch_size = batch_size
if backward:
self.batch_start = len(self.data) % self.time_steps
else:
self.batch_start = 0
self.target_index = target_index
self.lag = lag
def get_batch(self):
# xs shape (50batch, 20steps)
xs = np.arange(self.batch_start, self.batch_start + self.time_steps * self.batch_size).reshape((self.batch_size, self.time_steps))
seq = self.data[xs]
res = self.data[xs + self.lag][:, :, self.target_index] # the high
self.batch_start += self.time_steps
# plt.plot(xs[0, :], res[0, :], 'r', xs[0, :], seq[0, :], 'b--')
# plt.show()
# returned seq, res and xs: shape (batch, step, input)
if self.batch_start > len(self.data) - self.batch_size*self.time_steps - self.lag:
self.batch_start = np.random.randint(0, self.time_steps, 1)
target_ifeats = np.logical_not(np.isin(np.arange(self.data.shape[1]),self.target_index))
return [seq[:, :, target_ifeats], res, xs]
def get_inference_batch(self):
# xs shape (50batch, 20steps)
xs = np.arange(self.batch_start, self.batch_start + self.time_steps * self.batch_size).reshape((self.batch_size, self.time_steps))
seq = self.data[xs]
res = self.data[xs + self.lag][:, :, self.target_index] # the high
self.batch_start += self.time_steps
return [seq[:, :, 1:], res[:, :, np.newaxis], xs]
def get_feats_batch(self):
xs = np.arange(self.batch_start, self.batch_start + self.time_steps * self.batch_size).reshape((self.batch_size, self.time_steps))
seq = self.data[xs]
self.batch_start += self.time_steps
return [seq[:, :, 1:], xs]
class LSTMRegress(object):
def __init__(self, n_steps, input_size, output_size, cell_size, batch_size):
self.n_steps = n_steps
self.input_size = input_size
self.output_size = output_size
self.cell_size = cell_size
self.batch_size = batch_size
with tf.name_scope('inputs'):
self.xs = tf.placeholder(tf.float32, [None, n_steps, input_size], name='xs')
self.ys = tf.placeholder(tf.float32, [None, n_steps, output_size], name='ys')
with tf.variable_scope('in_hidden'):
self.add_input_layer()
with tf.variable_scope('LSTM_cell'):
self.add_cell()
with tf.variable_scope('out_hidden'):
self.add_output_layer()
with tf.name_scope('cost'):
self.compute_cost()
with tf.name_scope('train'):
self.train_op = tf.train.AdamOptimizer(learn_rate).minimize(self.cost)
def add_input_layer(self,):
l_in_x = tf.reshape(self.xs, [-1, self.input_size], name='2_2D') # (batch*n_step, in_size)
# Ws (in_size, cell_size)
Ws_in = self._weight_variable([self.input_size, self.cell_size])
# bs (cell_size, )
bs_in = self._bias_variable([self.cell_size,])
# l_in_y = (batch * n_steps, cell_size)
with tf.name_scope('Wx_plus_b'):
l_in_y = tf.matmul(l_in_x, Ws_in) + bs_in
# reshape l_in_y ==> (batch, n_steps, cell_size)
self.l_in_y = tf.reshape(l_in_y, [-1, self.n_steps, self.cell_size], name='2_3D')
def add_cell(self):
lstm_cell = tf.contrib.rnn.BasicLSTMCell(self.cell_size, forget_bias=1.0, state_is_tuple=True)
with tf.name_scope('initial_state'):
self.cell_init_state = lstm_cell.zero_state(self.batch_size, dtype=tf.float32)
self.cell_outputs, self.cell_final_state = tf.nn.dynamic_rnn(
lstm_cell, self.l_in_y, initial_state=self.cell_init_state, time_major=False)
def add_output_layer(self):
# shape = (batch * steps, cell_size)
l_out_x = tf.reshape(self.cell_outputs, [-1, self.cell_size], name='2_2D')
Ws_out = self._weight_variable([self.cell_size, self.output_size])
bs_out = self._bias_variable([self.output_size, ])
# shape = (batch * steps, output_size)
with tf.name_scope('Wx_plus_b'):
self.pred = tf.matmul(l_out_x, Ws_out) + bs_out
def compute_cost(self):
losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[tf.reshape(self.pred, [-1], name='reshape_pred')],
[tf.reshape(self.ys, [-1], name='reshape_target')],
[tf.ones([self.batch_size * self.n_steps * self.output_size], dtype=tf.float32)],
average_across_timesteps=True,
softmax_loss_function=self.ms_error,
name='losses'
)
with tf.name_scope('average_cost'):
self.cost = tf.div(
tf.reduce_sum(losses, name='losses_sum'),
self.batch_size,
name='average_cost')
tf.summary.scalar('cost', self.cost)
def ms_error(self, labels, logits):
return tf.square(tf.subtract(labels, logits))
def _weight_variable(self, shape, name='weights'):
initializer = tf.random_normal_initializer(mean=0., stddev=1.,)
return tf.get_variable(shape=shape, initializer=initializer, name=name)
def _bias_variable(self, shape, name='biases'):
initializer = tf.constant_initializer(0.1)
return tf.get_variable(name=name, shape=shape, initializer=initializer)
def not_suspended(row):
return not (row[0] == row[1] and row[0] == row[2] and row[0] == row[3])
def infer_show(n, save_path, session, saver, model, batcher, n_time_step=1, clf=True):
saver.restore(session, save_path)
for i in range(n):
try:
seq, res, xs = batcher.get_inference_batch()
except IndexError:
plt.pause(30)
if i == 0:
feed_dict = {model.xs: seq, model.ys: res}
else:
feed_dict = {model.xs: seq, model.ys: res, model.cell_final_state: state}
state, pred = session.run([model.cell_final_state, model.pred], feed_dict=feed_dict)
if clf:
plt.clf()
plt.plot(xs[0, :], res[0].flatten(), 'r', xs[0, :], pred.flatten()[0:time_steps], 'b')
plt.draw()
plt.pause(2)
def predict_show(n, save_path, session, saver, model, batcher, clf=True):
saver.restore(session, save_path)
for i in range(n):
try:
seq, xs = batcher.get_feats_batch()
except IndexError:
plt.pause(30)
if i == 0:
feed_dict = {model.xs: seq, }
else:
feed_dict = {model.xs: seq, model.cell_final_state: state}
state, pred = session.run([model.cell_final_state, model.pred], feed_dict=feed_dict)
if clf:
plt.clf()
plt.plot(xs[0, :], pred.flatten()[0:time_steps], 'b')
plt.draw()
plt.pause(200)
def train(n, save_path, session, saver, model, batcher, clf=True):
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter('logs', session.graph)
init = tf.global_variables_initializer()
session.run(init)
for i in range(n):
seq, res, xs = batcher.get_batch()
if i == 0:
feed_dict = {model.xs: seq, model.ys: res}
else:
feed_dict = {model.xs: seq, model.ys: res, model.cell_final_state: state}
_, cost, state, pred = session.run([model.train_op, model.cost, model.cell_final_state, model.pred], feed_dict=feed_dict)
if clf:
plt.clf()
#plt.plot(xs[0, :], res[0].flatten(), 'r', xs[0, :], pred.flatten()[0:time_steps], 'b')
for j in range(res[0].shape[1]):
a = plt.subplot(2, 2, j+1)
a.plot(xs[0, :], res[0, :, j], 'r', xs[0, :], pred[:, j][0:time_steps], 'b')
plt.draw()
plt.pause(0.2 + i*0.01)
if i % 20 == 0:
print('cost', round(cost, 4))
result = session.run(merged, feed_dict)
writer.add_summary(result, i)
saver.save(session, save_path)
batch_start = 0
time_steps = 300
batch_size = 1
#input_size = 15
#output_size = 2
cell_size = 32
learn_rate = 0.006
model_path = 'saver/600809'
inference = False
feats_common = ['open','close','high','low', 'total_turnover', 'volume']
feats_limits = ['limit_up', 'limit_down']
feats_target = ['ts'] + feats_common + feats_limits
col_names000 = ['ts'] + feats_common
usecols_qt = ['ts', 'open', 'total_turnover', 'volume']
usecols_target = ['ts', 'high', 'low', 'total_turnover', 'volume']
if __name__ == '__main__':
ftarget = '600809.XSHG.csv'
f600519 = '600519.XSHG.csv'
f000001 = '000001.XSHG.csv'
f000300 = '000300.XSHG.csv'
f000905 = '000905.XSHG.csv'
f399006 = '399006.XSHE.csv' # from 2010
f600600 = '600600.XSHG.csv'
f600189 = '600189.XSHG.csv'
df_target = pd.read_csv(ftarget, names=feats_target, header=0, index_col='ts', usecols=usecols_target)
df600519 = pd.read_csv(f600519, names=feats_target, header=0, index_col='ts', usecols=usecols_target)
df600600 = pd.read_csv(f600600, names=feats_target, header=0, index_col='ts', usecols=usecols_target)
df600189 = pd.read_csv(f600189, names=feats_target, header=0, index_col='ts', usecols=usecols_target)
df000001 = pd.read_csv(f000001, names=col_names000, header=0, index_col='ts', usecols=usecols_qt)
df000300 = pd.read_csv(f000300, names=col_names000, header=0, index_col='ts', usecols=usecols_qt)
df000905 = pd.read_csv(f000905, names=col_names000, header=0, index_col='ts', usecols=usecols_qt)
merged = df_target.join(df600519, how='inner', rsuffix='600519')\
.join(df600600, how='inner', rsuffix='600600')\
.join(df600189, how='inner', rsuffix='600189')\
.join(df000001, how='inner', rsuffix='000001')\
.join(df000300, how='inner', rsuffix='000300')\
.join(df000905, how='inner', rsuffix='000905')
for i in range(len(merged.columns.values)):
print(i, merged.columns.values[i])
#raw = merged.loc[:, feats_target[1]:].values
#filtered = raw[np.array([not_suspended(row) for row in raw])][:, [0, 2, 4, 5, 8, 14]]
filtered = merged.values
scaler = MinMaxScaler(feature_range=(0, 1))
normv = scaler.fit_transform(filtered)
plt.plot(normv)
plt.show()
#print(normv)
target_ifeat = [0, 4, 8, 12]
if inference:
model = LSTMRegress(time_steps, normv.shape[1] - len(target_ifeat), len(target_ifeat), cell_size, 1)
session = tf.Session()
saver = tf.train.Saver()
inference_batcher = seq_batch(normv, time_steps, batch_size, target_ifeat, 10, backward=True)
#infer_show(200, model_path, session, saver, model, inference_batcher, clf=False)
predict_show(200, model_path, session, saver, model, inference_batcher, clf=False)
else:
model = LSTMRegress(time_steps, normv.shape[1] - len(target_ifeat), len(target_ifeat), cell_size, batch_size)
session = tf.Session()
saver = tf.train.Saver()
batcher = seq_batch(normv, time_steps, batch_size, target_ifeat, 10)
train(400, model_path, session, saver, model, batcher, clf=False) | mit |
nikitasingh981/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 161 | 1380 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
chenyyx/scikit-learn-doc-zh | examples/en/gaussian_process/plot_gpr_noisy_targets.py | 64 | 3706 | """
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression example computed in two different ways:
1. A noise-free case
2. A noisy case with known noise-level per datapoint
In both cases, the kernel's parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``alpha`` is applied as a Tikhonov
regularization of the assumed covariance between the training points.
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>s
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
# ----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
kernel = C(1.0, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2))
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, sigma = gp.predict(x, return_std=True)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'r.', markersize=10, label=u'Observations')
plt.plot(x, y_pred, 'b-', label=u'Prediction')
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
# ----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Instanciate a Gaussian Process model
gp = GaussianProcessRegressor(kernel=kernel, alpha=(dy / y) ** 2,
n_restarts_optimizer=10)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, sigma = gp.predict(x, return_std=True)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
plt.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
plt.plot(x, y_pred, 'b-', label=u'Prediction')
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| gpl-3.0 |
Vimos/scikit-learn | sklearn/datasets/twenty_newsgroups.py | 18 | 13730 | """Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
The data is downloaded, extracted and cached in the '~/scikit_learn_data'
folder.
The `fetch_20newsgroups` function will not vectorize the data into numpy
arrays but the dataset lists the filenames of the posts and their categories
as target labels.
The `fetch_20newsgroups_vectorized` function will in addition do a simple
tf-idf vectorization step.
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import os
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import load_files
from .base import _pkl_filepath
from ..utils import check_random_state, Bunch
from ..feature_extraction.text import CountVectorizer
from ..preprocessing import normalize
from ..externals import joblib, six
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
logger = logging.getLogger(__name__)
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = "20news-bydate.tar.gz"
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if os.path.exists(archive_path):
# Download is not complete as the .tar.gz file is removed after
# download.
logger.warning("Download was incomplete, downloading again.")
os.remove(archive_path)
logger.warning("Downloading dataset from %s (14 MB)", URL)
opener = urlopen(URL)
with open(archive_path, 'wb') as f:
f.write(opener.read())
logger.info("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
with open(cache_path, 'wb') as f:
f.write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
def fetch_20newsgroups(data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True):
"""Load the filenames and data from the 20 newsgroups dataset.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset : 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home : optional, default: None
Specify a download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
categories : None or collection of string or unicode
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle : bool, optional
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : numpy random number generator or seed integer
Used to shuffle the dataset.
download_if_missing : optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
remove : tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
"""
data_home = get_data_home(data_home=data_home)
cache_path = _pkl_filepath(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
logger.info("Downloading 20news dataset. "
"This may take a few minutes.")
cache = download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
data.description = 'the 20 newsgroups by date dataset'
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
return data
def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None):
"""Load the 20 newsgroups dataset and transform it into tf-idf vectors.
This is a convenience function; the tf-idf transformation is done using the
default settings for `sklearn.feature_extraction.text.Vectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset : 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home : optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
remove : tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
Returns
-------
bunch : Bunch object
bunch.data: sparse matrix, shape [n_samples, n_features]
bunch.target: array, shape [n_samples]
bunch.target_names: list, length [n_classes]
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = _pkl_filepath(data_home, filebase + ".pkl")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
normalize(X_train, copy=False)
normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
return Bunch(data=data, target=target, target_names=target_names)
| bsd-3-clause |
abhisg/scikit-learn | examples/hetero_feature_union.py | 288 | 6236 | """
=============================================
Feature Union with Heterogeneous Data Sources
=============================================
Datasets can often contain components of that require different feature
extraction and processing pipelines. This scenario might occur when:
1. Your dataset consists of heterogeneous data types (e.g. raster images and
text captions)
2. Your dataset is stored in a Pandas DataFrame and different columns
require different processing pipelines.
This example demonstrates how to use
:class:`sklearn.feature_extraction.FeatureUnion` on a dataset containing
different types of features. We use the 20-newsgroups dataset and compute
standard bag-of-words features for the subject line and body in separate
pipelines as well as ad hoc features on the body. We combine them (with
weights) using a FeatureUnion and finally train a classifier on the combined
set of features.
The choice of features is not particularly helpful, but serves to illustrate
the technique.
"""
# Author: Matt Terry <matt.terry@gmail.com>
#
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.datasets import fetch_20newsgroups
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_footer
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_quoting
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
class ItemSelector(BaseEstimator, TransformerMixin):
"""For data grouped by feature, select subset of data at a provided key.
The data is expected to be stored in a 2D data structure, where the first
index is over features and the second is over samples. i.e.
>> len(data[key]) == n_samples
Please note that this is the opposite convention to sklearn feature
matrixes (where the first index corresponds to sample).
ItemSelector only requires that the collection implement getitem
(data[key]). Examples include: a dict of lists, 2D numpy array, Pandas
DataFrame, numpy record array, etc.
>> data = {'a': [1, 5, 2, 5, 2, 8],
'b': [9, 4, 1, 4, 1, 3]}
>> ds = ItemSelector(key='a')
>> data['a'] == ds.transform(data)
ItemSelector is not designed to handle data grouped by sample. (e.g. a
list of dicts). If your data is structured this way, consider a
transformer along the lines of `sklearn.feature_extraction.DictVectorizer`.
Parameters
----------
key : hashable, required
The key corresponding to the desired value in a mappable.
"""
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key]
class TextStats(BaseEstimator, TransformerMixin):
"""Extract features from each document for DictVectorizer"""
def fit(self, x, y=None):
return self
def transform(self, posts):
return [{'length': len(text),
'num_sentences': text.count('.')}
for text in posts]
class SubjectBodyExtractor(BaseEstimator, TransformerMixin):
"""Extract the subject & body from a usenet post in a single pass.
Takes a sequence of strings and produces a dict of sequences. Keys are
`subject` and `body`.
"""
def fit(self, x, y=None):
return self
def transform(self, posts):
features = np.recarray(shape=(len(posts),),
dtype=[('subject', object), ('body', object)])
for i, text in enumerate(posts):
headers, _, bod = text.partition('\n\n')
bod = strip_newsgroup_footer(bod)
bod = strip_newsgroup_quoting(bod)
features['body'][i] = bod
prefix = 'Subject:'
sub = ''
for line in headers.split('\n'):
if line.startswith(prefix):
sub = line[len(prefix):]
break
features['subject'][i] = sub
return features
pipeline = Pipeline([
# Extract the subject & body
('subjectbody', SubjectBodyExtractor()),
# Use FeatureUnion to combine the features from subject and body
('union', FeatureUnion(
transformer_list=[
# Pipeline for pulling features from the post's subject line
('subject', Pipeline([
('selector', ItemSelector(key='subject')),
('tfidf', TfidfVectorizer(min_df=50)),
])),
# Pipeline for standard bag-of-words model for body
('body_bow', Pipeline([
('selector', ItemSelector(key='body')),
('tfidf', TfidfVectorizer()),
('best', TruncatedSVD(n_components=50)),
])),
# Pipeline for pulling ad hoc features from post's body
('body_stats', Pipeline([
('selector', ItemSelector(key='body')),
('stats', TextStats()), # returns a list of dicts
('vect', DictVectorizer()), # list of dicts -> feature matrix
])),
],
# weight components in FeatureUnion
transformer_weights={
'subject': 0.8,
'body_bow': 0.5,
'body_stats': 1.0,
},
)),
# Use a SVC classifier on the combined features
('svc', SVC(kernel='linear')),
])
# limit the list of categories to make running this exmaple faster.
categories = ['alt.atheism', 'talk.religion.misc']
train = fetch_20newsgroups(random_state=1,
subset='train',
categories=categories,
)
test = fetch_20newsgroups(random_state=1,
subset='test',
categories=categories,
)
pipeline.fit(train.data, train.target)
y = pipeline.predict(test.data)
print(classification_report(y, test.target))
| bsd-3-clause |
xubenben/scikit-learn | benchmarks/bench_random_projections.py | 397 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
rsignell-usgs/notebook | pyugrid/notebook_examples/pyugrid_water_levels.py | 2 | 1882 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# ##Test out UGRID-0.9 compliant unstructured grid model datasets with PYUGRID
# <codecell>
import matplotlib.tri as tri
import datetime as dt
import matplotlib.pyplot as plt
import numpy as np
# <codecell>
import cartopy.crs as ccrs
%matplotlib inline
# <codecell>
import iris
import pyugrid
# <codecell>
iris.FUTURE.netcdf_promote = True
# <codecell>
#ADCIRC
#url = 'http://comt.sura.org/thredds/dodsC/data/comt_1_archive/inundation_tropical/UND_ADCIRC/Hurricane_Ike_3D_final_run_with_waves'
#FVCOM
#url = 'http://www.smast.umassd.edu:8080/thredds/dodsC/FVCOM/NECOFS/Forecasts/NECOFS_GOM3_FORECAST.nc'
#SELFE
url = 'http://comt.sura.org/thredds/dodsC/data/comt_1_archive/inundation_tropical/VIMS_SELFE/Hurricane_Ike_2D_final_run_with_waves'
# <codecell>
ug = pyugrid.UGrid.from_ncfile(url)
print "There are %i nodes"%ug.nodes.shape[0]
print "There are %i faces"%ug.faces.shape[0]
# <codecell>
cube = iris.load_cube(url,'sea_surface_height_above_geoid')
# <codecell>
print cube
# <codecell>
cube.mesh = ug
cube.mesh_dimension = 1 # (0:time,1:node)
# <codecell>
lon = cube.mesh.nodes[:,0]
lat = cube.mesh.nodes[:,1]
nv = cube.mesh.faces
# <codecell>
triang = tri.Triangulation(lon,lat,triangles=nv)
# <codecell>
ind = -1 # last time index
zcube = cube[ind]
# <codecell>
plt.figure(figsize=(12,12))
ax = plt.axes(projection=ccrs.PlateCarree())
ax.set_extent([-90, -60, 5, 50])
ax.coastlines()
levs = np.arange(-1,5,.2)
plt.tricontourf(triang, zcube.data, levels=levs)
plt.colorbar()
plt.tricontour(triang, zcube.data, colors='k',levels=levs)
tvar = cube.coord('time')
tstr = tvar.units.num2date(tvar.points[ind])
gl = ax.gridlines(draw_labels=True)
gl.xlabels_top = False
gl.ylabels_right = False
plt.title('%s: Elevation (m): %s' % (zcube.attributes['title'],tstr));
# <codecell>
| mit |