repo_name
stringlengths 7
92
| path
stringlengths 5
149
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 911
693k
| license
stringclasses 15
values |
---|---|---|---|---|---|
emon10005/scikit-image | doc/examples/plot_medial_transform.py | 14 | 2220 | """
===========================
Medial axis skeletonization
===========================
The medial axis of an object is the set of all points having more than one
closest point on the object's boundary. It is often called the **topological
skeleton**, because it is a 1-pixel wide skeleton of the object, with the same
connectivity as the original object.
Here, we use the medial axis transform to compute the width of the foreground
objects. As the function ``medial_axis`` (``skimage.morphology.medial_axis``)
returns the distance transform in addition to the medial axis (with the keyword
argument ``return_distance=True``), it is possible to compute the distance to
the background for all points of the medial axis with this function. This gives
an estimate of the local width of the objects.
For a skeleton with fewer branches, there exists another skeletonization
algorithm in ``skimage``: ``skimage.morphology.skeletonize``, that computes
a skeleton by iterative morphological thinnings.
"""
import numpy as np
from scipy import ndimage as ndi
from skimage.morphology import medial_axis
import matplotlib.pyplot as plt
def microstructure(l=256):
"""
Synthetic binary data: binary microstructure with blobs.
Parameters
----------
l: int, optional
linear size of the returned image
"""
n = 5
x, y = np.ogrid[0:l, 0:l]
mask = np.zeros((l, l))
generator = np.random.RandomState(1)
points = l * generator.rand(2, n**2)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndi.gaussian_filter(mask, sigma=l/(4.*n))
return mask > mask.mean()
data = microstructure(l=64)
# Compute the medial axis (skeleton) and the distance transform
skel, distance = medial_axis(data, return_distance=True)
# Distance to the background for pixels of the skeleton
dist_on_skel = distance * skel
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4))
ax1.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
ax1.axis('off')
ax2.imshow(dist_on_skel, cmap=plt.cm.spectral, interpolation='nearest')
ax2.contour(data, [0.5], colors='w')
ax2.axis('off')
fig.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0, right=1)
plt.show()
| bsd-3-clause |
amanzi/ats-dev | tools/meshing_ats/meshing_ats/meshing_ats.py | 1 | 34933 | """Extrudes a 2D mesh to generate an ExodusII 3D mesh.
Works with and assumes all polyhedra cells (and polygon faces).
To see usage, run:
------------------------------------------------------------
python meshing_ats.py -h
Example distributed with this source, to run:
------------------------------------------------------------
$> cd four-polygon-test
$> python ../meshing_ats.py -n 10 -d 1 ./four_polygon.vtk
$> mkdir run0
$> cd run0
$> ats --xml_file=../test1-fv-four-polygon.xml
Requires building the latest version of Exodus
------------------------------------------------------------
Note that this is typically done in your standard ATS installation,
assuming you have built your Amanzi TPLs with shared libraries (the
default through bootstrap).
In that case, simply ensure that ${AMANZI_TPLS_DIR}/SEACAS/lib is in
your PYTHONPATH.
"""
from __future__ import print_function
import sys,os
import numpy as np
import collections
import argparse
try:
import exodus
except ImportError:
sys.path.append(os.path.join(os.environ["SEACAS_DIR"],"lib"))
import exodus
class SideSet(object):
def __init__(self, name, setid, elem_list, side_list):
assert(type(setid) == int)
assert(type(elem_list) == list or type(elem_list) == np.ndarray)
assert(type(side_list) == list or type(side_list) == np.ndarray)
self.name = name
self.setid = setid
self.elem_list = elem_list
self.side_list = side_list
class LabeledSet(object):
def __init__(self, name, setid, entity, ent_ids):
assert entity in ['CELL', 'FACE', 'NODE']
assert(type(setid) == int)
assert(type(ent_ids) == list or type(ent_ids) == np.ndarray)
self.name = name
self.setid = setid
self.entity = entity
self.ent_ids = np.array(ent_ids)
class Mesh2D(object):
def __init__(self, coords, connectivity, labeled_sets=None, check_handedness=True):
"""
Creates a 2D mesh from coordinates and a list cell-to-node connectivity lists.
coords : numpy array of shape (NCOORDS, NDIMS)
connectivity : list of lists of integer indices into coords specifying a
(clockwise OR counterclockwise) ordering of the nodes around
the 2D cell
labeled_sets : list of LabeledSet objects
"""
assert type(coords) == np.ndarray
assert len(coords.shape) == 2
self.dim = coords.shape[1]
self.coords = coords
self.conn = connectivity
if labeled_sets is not None:
self.labeled_sets = labeled_sets
else:
self.labeled_sets = []
self.validate()
self.edge_counts()
if check_handedness:
self.check_handedness()
def validate(self):
assert self.coords.shape[1] == 2 or self.coords.shape[1] == 3
assert type(self.conn) is list
for f in self.conn:
assert type(f) is list
assert len(set(f)) == len(f)
for i in f:
assert i < self.coords.shape[0]
for ls in self.labeled_sets:
if ls.entity == "NODE":
size = len(self.coords)
elif ls.entity == "CELL":
size = len(self.conn)
for i in ls.ent_ids:
assert i < size
return True
def num_cells(self):
return len(self.conn)
def num_nodes(self):
return self.coords.shape[0]
def num_edges(self):
return len(self.edges())
@staticmethod
def edge_hash(i,j):
return tuple(sorted((i,j)))
def edges(self):
return self.edge_counts().keys()
def edge_counts(self):
try:
return self._edges
except AttributeError:
self._edges = collections.Counter(self.edge_hash(f[i], f[(i+1)%len(f)]) for f in self.conn for i in range(len(f)))
return self._edges
def check_handedness(self):
for conn in self.conn:
points = np.array([self.coords[c] for c in conn])
cross = 0
for i in range(len(points)):
im = i - 1
ip = i + 1
if ip == len(points):
ip = 0
p = points[ip] - points[i]
m = points[i] - points[im]
cross = cross + p[1] * m[0] - p[0] * m[1]
if cross < 0:
conn.reverse()
def plot(self, color=None, ax=None):
if color is None:
import colors
cm = colors.cm_mapper(0,self.num_cells()-1)
colors = [cm(i) for i in range(self.num_cells())]
else:
colors = color
verts = [[self.coords[i,0:2] for i in f] for f in self.conn]
from matplotlib import collections
gons = collections.PolyCollection(verts, facecolors=colors)
from matplotlib import pyplot as plt
if ax is None:
fig,ax = plt.subplots(1,1)
ax.add_collection(gons)
ax.autoscale_view()
@classmethod
def read_VTK(cls, filename):
try:
return cls.read_VTK_Simplices(filename)
except AssertionError:
return cls.read_VTK_Unstructured(filename)
@classmethod
def read_VTK_Unstructured(cls, filename):
with open(filename,'r') as fid:
points_found = False
polygons_found = False
while True:
line = fid.readline().decode('utf-8')
if not line:
# EOF
break
line = line.strip()
if len(line) == 0:
continue
split = line.split()
section = split[0]
if section == 'POINTS':
ncoords = int(split[1])
points = np.fromfile(fid, count=ncoords*3, sep=' ', dtype='d')
points = points.reshape(ncoords, 3)
points_found = True
elif section == 'POLYGONS':
ncells = int(split[1])
n_to_read = int(split[2])
gons = []
data = np.fromfile(fid, count=n_to_read, sep=' ', dtype='i')
idx = 0
for i in range(ncells):
n_in_gon = data[idx]
gon = list(data[idx+1:idx+1+n_in_gon])
# check handedness -- need normals to point up!
cross = []
for i in range(len(gon)):
if i == len(gon)-1:
ip = 0
ipp = 1
elif i == len(gon)-2:
ip = i+1
ipp = 0
else:
ip = i+1
ipp = i+2
d2 = points[gon[ipp]] - points[gon[ip]]
d1 = points[gon[i]] - points[gon[ip]]
cross.append(np.cross(d2, d1))
if (np.array([c[2] for c in cross]).mean() < 0):
gon.reverse()
gons.append(gon)
idx += n_in_gon + 1
assert(idx == n_to_read)
polygons_found = True
if not points_found:
raise RuntimeError("Unstructured VTK must contain sections 'POINTS'")
if not polygons_found:
raise RuntimeError("Unstructured VTK must contain sections 'POLYGONS'")
return cls(points, gons)
@classmethod
def read_VTK_Simplices(cls, filename):
"""Stolen from meshio, https://github.com/nschloe/meshio/blob/master/meshio/vtk_io.py"""
import vtk_io
with open(filename,'r') as fid:
data = vtk_io.read_buffer(fid)
points = data[0]
if len(data[1]) != 1:
raise RuntimeError("Simplex VTK file is readable by vtk_io but not by meshing_ats. Includes: %r"%data[1].keys())
gons = [v for v in data[1].itervalues()][0]
gons = gons.tolist()
# check handedness
for gon in gons:
cross = []
for i in range(len(gon)):
if i == len(gon)-1:
ip = 0
ipp = 1
elif i == len(gon)-2:
ip = i+1
ipp = 0
else:
ip = i+1
ipp = i+2
d2 = points[gon[ipp]] - points[gon[ip]]
d1 = points[gon[i]] - points[gon[ip]]
cross.append(np.cross(d2, d1))
if (np.array([c[2] for c in cross]).mean() < 0):
gon.reverse()
return cls(points, gons)
@classmethod
def from_Transect(cls, x, z, width=1):
"""Creates a 2D surface strip mesh from transect data"""
# coordinates
if (type(width) is list or type(width) is np.ndarray):
variable_width = True
y = np.array([0,1])
else:
variable_width = False
y = np.array([0,width])
Xc, Yc = np.meshgrid(x, y)
if variable_width:
assert(Yc.shape[1] == 2)
assert(len(width) == Yc.shape[0])
assert(min(width) > 0.)
Yc[:,0] = -width/2.
Yc[:,1] = width/2.
Xc = Xc.flatten()
Yc = Yc.flatten()
Zc = np.concatenate([z,z])
# connectivity
nsurf_cells = len(x)-1
conn = []
for i in range(nsurf_cells):
conn.append([i, i+1, nsurf_cells + i + 2, nsurf_cells + i + 1])
coords = np.array([Xc, Yc, Zc])
return cls(coords.transpose(), conn)
@classmethod
def from_Transect_Guide(cls, x, z, guide):
"""Creates a 2D surface strip mesh from transect data"""
assert type(guide) == np.ndarray
assert guide.shape[1] == 3
# coordinates
Xc = x
Yc = np.zeros_like(x)
Zc = z
nsteps = guide.shape[0]
xnew = Xc
ynew = Yc
znew = Zc
for i in range(nsteps):
xnew = xnew + guide[i][0]
ynew = ynew + guide[i][1]
znew = znew + guide[i][2]
Xc = np.concatenate([Xc, xnew])
Yc = np.concatenate([Yc, ynew])
Zc = np.concatenate([Zc, znew])
# y = np.array([0,1,2])
# Xc, Yc = np.meshgrid(x, y)
# Xc = Xc.flatten()
# Yc = Yc.flatten()
# Zc = np.concatenate([z,z,z])
# connectivity
ns = len(x)
conn = []
for j in range(nsteps):
for i in range(ns-1):
conn.append([j*ns + i, j*ns + i + 1, (j+1)*ns + i + 1, (j+1)*ns + i ])
coords = np.array([Xc, Yc, Zc])
return cls(coords.transpose(), conn)
@classmethod
def from_Transect_GuideX(cls, x, z, guide, nsteps):
"""Creates a 2D surface strip mesh from transect data"""
assert type(guide) == np.ndarray
assert guide.shape[1] == 3
# coordinates
Xc = x
Yc = np.zeros_like(x)
Zc = z
nsteps = guide.shape[0]
xnew = np.zeros_like(x)
ynew = np.zeros(len(x))
znew = np.zeros_like(x)
xnew[:] = Xc[:]
ynew[:] = Yc[:]
znew[:] = Zc[:]
for i in range(nsteps):
print(Yc)
for j in range(len(x)):
xnew[j] = xnew[j] + guide[j][0]
ynew[j] = ynew[j] + guide[j][1]
znew[j] = znew[j] + guide[j][2]
Xc = np.concatenate([Xc, xnew])
Yc = np.concatenate([Yc, ynew])
Zc = np.concatenate([Zc, znew])
# y = np.array([0,1,2])
# Xc, Yc = np.meshgrid(x, y)
# Xc = Xc.flatten()
# Yc = Yc.flatten()
# Zc = np.concatenate([z,z,z])
# connectivity
ns = len(x)
conn = []
for j in range(nsteps):
for i in range(ns-1):
conn.append([j*ns + i, j*ns + i + 1, (j+1)*ns + i + 1, (j+1)*ns + i ])
coords = np.array([Xc, Yc, Zc])
return cls(coords.transpose(), conn)
class Mesh3D(object):
def __init__(self, coords, face_to_node_conn, elem_to_face_conn,
side_sets=None, labeled_sets=None, material_ids=None):
"""
Creates a 3D mesh from coordinates and connectivity lists.
coords : numpy array of shape (NCOORDS, 3)
face_to_node_conn : list of lists of integer indices into coords specifying an
(clockwise OR counterclockwise) ordering of the nodes around
the face
elem_to_face_conn : list of lists of integer indices into face_to_node_conn
specifying a list of faces that make up the elem
"""
assert type(coords) == np.ndarray
assert len(coords.shape) == 2
assert coords.shape[1] == 3
self.dim = coords.shape[1]
self.coords = coords
self.face_to_node_conn = face_to_node_conn
self.elem_to_face_conn = elem_to_face_conn
if labeled_sets is not None:
self.labeled_sets = labeled_sets
else:
self.labeled_sets = []
if side_sets is not None:
self.side_sets = side_sets
else:
self.side_sets = []
if material_ids is not None:
self.material_id_list = collections.Counter(material_ids).keys()
self.material_ids = material_ids
else:
self.material_id_list = [10000,]
self.material_ids = [10000,]*len(self.elem_to_face_conn)
self.validate()
def validate(self):
assert self.coords.shape[1] == 3
assert type(self.face_to_node_conn) is list
for f in self.face_to_node_conn:
assert type(f) is list
assert len(set(f)) == len(f)
for i in f:
assert i < self.coords.shape[0]
assert type(self.elem_to_face_conn) is list
for e in self.elem_to_face_conn:
assert type(e) is list
assert len(set(e)) == len(e)
for i in e:
assert i < len(self.face_to_node_conn)
for ls in self.labeled_sets:
if ls.entity == "NODE":
size = self.num_nodes()
if ls.entity == "FACE":
size = self.num_faces()
elif ls.entity == "CELL":
size = self.num_cells()
for i in ls.ent_ids:
assert i < size
for ss in self.side_sets:
for j,i in zip(ss.elem_list, ss.side_list):
assert j < self.num_cells()
assert i < len(self.elem_to_face_conn[j])
def num_cells(self):
return len(self.elem_to_face_conn)
def num_faces(self):
return len(self.face_to_node_conn)
def num_nodes(self):
return self.coords.shape[0]
def write_exodus(self, filename, face_block_mode="one block"):
"""Write the 3D mesh to ExodusII using arbitrary polyhedra spec"""
# put cells in with blocks, which renumbers the cells, so we have to track sidesets.
# Therefore we keep a map of old cell to new cell ordering
#
# also, though not required by the spec, paraview and visit
# seem to crash if num_face_blocks != num_elem_blocks. So
# make face blocks here too, which requires renumbering the faces.
# -- first pass, form all elem blocks and make the map from old-to-new
new_to_old_elems = []
elem_blks = []
for i_m,m_id in enumerate(self.material_id_list):
# split out elems of this material, save new_to_old map
elems_tuple = [(i,c) for (i,c) in enumerate(self.elem_to_face_conn) if self.material_ids[i] == m_id]
new_to_old_elems.extend([i for (i,c) in elems_tuple])
elems = [c for (i,c) in elems_tuple]
elem_blks.append(elems)
old_to_new_elems = sorted([(old,i) for (i,old) in enumerate(new_to_old_elems)], lambda a,b: int.__cmp__(a[0],b[0]))
# -- deal with faces, form all face blocks and make the map from old-to-new
face_blks = []
if face_block_mode == "one block":
# no reordering of faces needed
face_blks.append(self.face_to_node_conn)
elif face_block_mode == "n blocks, not duplicated":
used_faces = np.zeros((len(self.face_to_node_conn),),'bool')
new_to_old_faces = []
for i_m,m_id in enumerate(self.material_id_list):
# split out faces of this material, save new_to_old map
def used(f):
result = used_faces[f]
used_faces[f] = True
return result
elem_blk = elem_blks[i_m]
faces_tuple = [(f,self.face_to_node_conn[f]) for c in elem_blk for (j,f) in enumerate(c) if not used(f)]
new_to_old_faces.extend([j for (j,f) in faces_tuple])
faces = [f for (j,f) in faces_tuple]
face_blks.append(faces)
# get the renumbering in the elems
old_to_new_faces = sorted([(old,j) for (j,old) in enumerate(new_to_old_faces)], lambda a,b: int.__cmp__(a[0],b[0]))
elem_blks = [[[old_to_new_faces[f][1] for f in c] for c in elem_blk] for elem_blk in elem_blks]
elif face_block_mode == "n blocks, duplicated":
elem_blks_new = []
offset = 0
for i_m, m_id in enumerate(self.material_id_list):
used_faces = np.zeros((len(self.face_to_node_conn),),'bool')
def used(f):
result = used_faces[f]
used_faces[f] = True
return result
elem_blk = elem_blks[i_m]
tuple_old_f = [(f,self.face_to_node_conn[f]) for c in elem_blk for f in c if not used(f)]
tuple_new_old_f = [(new,old,f) for (new,(old,f)) in enumerate(tuple_old_f)]
old_to_new_blk = np.zeros((len(self.face_to_node_conn),),'i')-1
for new,old,f in tuple_new_old_f:
old_to_new_blk[old] = new + offset
elem_blk_new = [[old_to_new_blk[f] for f in c] for c in elem_blk]
#offset = offset + len(ftuple_new)
elem_blks_new.append(elem_blk_new)
face_blks.append([f for i,j,f in tuple_new_old_f])
elem_blks = elem_blks_new
elif face_block_mode == "one block, repeated":
# no reordering of faces needed, just repeat
for eblock in elem_blks:
face_blks.append(self.face_to_node_conn)
else:
raise RuntimeError("Invalid face_block_mode: '%s', valid='one block', 'n blocks, duplicated', 'n blocks, not duplicated'"%face_block_mode)
# open the mesh file
num_elems = sum(len(elem_blk) for elem_blk in elem_blks)
num_faces = sum(len(face_blk) for face_blk in face_blks)
ep = exodus.ex_init_params(title=filename,
num_dim=3,
num_nodes=self.num_nodes(),
num_face=num_faces,
num_face_blk=len(face_blks),
num_elem=num_elems,
num_elem_blk=len(elem_blks),
num_side_sets=len(self.side_sets))
e = exodus.exodus(filename, mode='w', array_type='numpy', init_params=ep)
# put the coordinates
e.put_coord_names(['coordX', 'coordY', 'coordZ'])
e.put_coords(self.coords[:,0], self.coords[:,1], self.coords[:,2])
# put the face blocks
for i_blk, face_blk in enumerate(face_blks):
face_raveled = [n for f in face_blk for n in f]
e.put_polyhedra_face_blk(i_blk+1, len(face_blk), len(face_raveled), 0)
e.put_node_count_per_face(i_blk+1, np.array([len(f) for f in face_blk]))
e.put_face_node_conn(i_blk+1, np.array(face_raveled)+1)
# put the elem blocks
assert len(elem_blks) == len(self.material_id_list)
for i_blk, (m_id, elem_blk) in enumerate(zip(self.material_id_list, elem_blks)):
elems_raveled = [f for c in elem_blk for f in c]
e.put_polyhedra_elem_blk(m_id, len(elem_blk), len(elems_raveled), 0)
e.put_elem_blk_name(m_id, "MATERIAL_ID_%d"%m_id)
e.put_face_count_per_polyhedra(m_id, np.array([len(c) for c in elem_blk]))
e.put_elem_face_conn(m_id, np.array(elems_raveled)+1)
# add sidesets
e.put_side_set_names([ss.name for ss in self.side_sets])
for ss in self.side_sets:
for elem in ss.elem_list:
assert old_to_new_elems[elem][0] == elem
new_elem_list = [old_to_new_elems[elem][1] for elem in ss.elem_list]
e.put_side_set_params(ss.setid, len(ss.elem_list), 0)
e.put_side_set(ss.setid, np.array(new_elem_list)+1, np.array(ss.side_list)+1)
# finish and close
e.close()
@classmethod
def extruded_Mesh2D(cls, mesh2D, layer_types, layer_data, ncells_per_layer, mat_ids):
"""
Regularly extrude a 2D mesh to make a 3D mesh.
mesh2D : a Mesh2D object
layer_types : either a string (type) or list of strings (types)
layer_data : array of data needed (specific to the type)
ncells_per_layer : either a single integer (same number of cells in all
: layers) or a list of number of cells in the layer
mat_ids : either a single integer (one mat_id for all layers)
: or a list of integers (mat_id for each layer)
: or a 2D numpy array of integers (mat_id for each layer and
each column: [layer_id, surface_cell_id])
types:
- 'constant' : (data=float thickness) uniform thickness
- 'function' : (data=function or functor) thickness as a function
: of (x,y)
- 'snapped' : (data=float z coordinate) snap the layer to
: provided z coordinate, telescoping as needed
- 'node' : thickness provided on each node of the surface domain
- 'cell' : thickness provided on each cell of the surface domain,
: interpolate to nodes
NOTE: dz is uniform through the layer in all but the 'snapped' case
NOTE: 2D mesh is always labeled 'surface', extrusion is always downwards
"""
# make the data all lists
# ---------------------------------
def is_list(data):
if type(data) is str:
return False
try:
len(data)
except TypeError:
return False
else:
return True
if is_list(layer_types):
if not is_list(layer_data):
layer_data = [layer_data,]*len(layer_types)
else:
assert len(layer_data) == len(layer_types)
if not is_list(ncells_per_layer):
ncells_per_layer = [ncells_per_layer,]*len(layer_types)
else:
assert len(ncells_per_layer) == len(layer_types)
elif is_list(layer_data):
layer_types = [layer_types,]*len(layer_data)
if not is_list(ncells_per_layer):
ncells_per_layer = [ncells_per_layer,]*len(layer_data)
else:
assert len(ncells_per_layer) == len(layer_data)
elif is_list(ncells_per_layer):
layer_type = [layer_type,]*len(ncells_per_layer)
layer_data = [layer_data,]*len(ncells_per_layer)
else:
layer_type = [layer_type,]
layer_data = [layer_data,]
ncells_per_layer = [ncells_per_layer,]
# helper data and functions for mapping indices from 2D to 3D
# ------------------------------------------------------------------
if min(ncells_per_layer) < 0:
raise RuntimeError("Invalid number of cells, negative value provided.")
ncells_tall = sum(ncells_per_layer)
ncells_total = ncells_tall * mesh2D.num_cells()
nfaces_total = (ncells_tall+1) * mesh2D.num_cells() + ncells_tall * mesh2D.num_edges()
nnodes_total = (ncells_tall+1) * mesh2D.num_nodes()
np_mat_ids = np.array(mat_ids, dtype=int)
if np_mat_ids.size == np.size(np_mat_ids, 0):
if np_mat_ids.size == 1:
np_mat_ids = np.full((len(ncells_per_layer), mesh2D.num_cells()), mat_ids[0], dtype=int)
else:
np_mat_ids = np.empty((len(ncells_per_layer), mesh2D.num_cells()), dtype=int)
for ilay in range(len(ncells_per_layer)):
np_mat_ids[ilay, :] = np.full(mesh2D.num_cells(), mat_ids[ilay], dtype=int)
def col_to_id(column, z_cell):
"""Maps 2D cell ID and index in the vertical to a 3D cell ID"""
return z_cell + column * ncells_tall
def node_to_id(node, z_node):
"""Maps 2D node ID and index in the vertical to a 3D node ID"""
return z_node + node * (ncells_tall+1)
def edge_to_id(edge, z_cell):
"""Maps 2D edge hash and index in the vertical to a 3D face ID of a vertical face"""
return (ncells_tall + 1) * mesh2D.num_cells() + z_cell + edge * ncells_tall
# create coordinates
# ---------------------------------
coords = np.zeros((mesh2D.coords.shape[0],ncells_tall+1, 3),'d')
coords[:,:,0:2] = np.expand_dims(mesh2D.coords[:,0:2],1)
if mesh2D.dim == 3:
coords[:,0,2] = mesh2D.coords[:,2]
# else the surface is at 0 depth
cell_layer_start = 0
for layer_type, layer_datum, ncells in zip(layer_types, layer_data, ncells_per_layer):
if layer_type.lower() == 'constant':
dz = float(layer_datum) / ncells
for i in range(1,ncells+1):
coords[:,cell_layer_start+i,2] = coords[:,cell_layer_start,2] - i * dz
else:
# allocate an array of coordinates for the bottom of the layer
layer_bottom = np.zeros((mesh2D.coords.shape[0],),'d')
if layer_type.lower() == 'snapped':
# layer bottom is uniform
layer_bottom[:] = layer_datum
elif layer_type.lower() == 'function':
# layer thickness is given by a function evaluation of x,y
for node_col in range(mesh2D.coords.shape[0]):
layer_bottom[node_col] = coords[node_col,cell_layer_start,2] - layer_datum(coords[node_col,0,0], coords[node_col,0,1])
elif layer_type.lower() == 'node':
# layer bottom specifically provided through thickness
layer_bottom[:] = coords[:,cell_layer_start,2] - layer_datum
elif layer_type.lower() == 'cell':
# interpolate cell thicknesses to node thicknesses
import scipy.interpolate
centroids = mesh2D.cell_centroids()
interp = scipy.interpolate.interp2d(centroids[:,0], centroids[:,1], layer_datum, kind='linear')
layer_bottom[:] = coords[:,cell_layer_start,2] - interp(mesh2D.coords[:,0], mesh2D.coords[:,1])
else:
raise RuntimeError("Unrecognized layer_type '%s'"%layer_type)
# linspace from bottom of previous layer to bottom of this layer
for node_col in range(mesh2D.coords.shape[0]):
coords[node_col,cell_layer_start:cell_layer_start+ncells+1,2] = np.linspace(coords[node_col,cell_layer_start,2], layer_bottom[node_col], ncells+1)
cell_layer_start = cell_layer_start + ncells
# create faces, face sets, cells
bottom = []
surface = []
faces = []
cells = [list() for c in range(ncells_total)]
# -- loop over the columns, adding the horizontal faces
for col in range(mesh2D.num_cells()):
nodes_2 = mesh2D.conn[col]
surface.append(col_to_id(col,0))
for z_face in range(ncells_tall + 1):
i_f = len(faces)
f = [node_to_id(n, z_face) for n in nodes_2]
if z_face != ncells_tall:
cells[col_to_id(col, z_face)].append(i_f)
if z_face != 0:
cells[col_to_id(col, z_face-1)].append(i_f)
faces.append(f)
bottom.append(col_to_id(col,ncells_tall-1))
# -- loop over the columns, adding the vertical faces
added = dict()
vertical_side_cells = []
vertical_side_indices = []
for col in range(mesh2D.num_cells()):
nodes_2 = mesh2D.conn[col]
for i in range(len(nodes_2)):
edge = mesh2D.edge_hash(nodes_2[i], nodes_2[(i+1)%len(nodes_2)])
try:
i_e = added[edge]
except KeyError:
# faces not yet added to facelist
i_e = len(added.keys())
added[edge] = i_e
for z_face in range(ncells_tall):
i_f = len(faces)
assert i_f == edge_to_id(i_e, z_face)
f = [node_to_id(edge[0], z_face),
node_to_id(edge[1], z_face),
node_to_id(edge[1], z_face+1),
node_to_id(edge[0], z_face+1)]
faces.append(f)
face_cell = col_to_id(col, z_face)
cells[face_cell].append(i_f)
# check if this is an external
if mesh2D._edges[edge] == 1:
vertical_side_cells.append(face_cell)
vertical_side_indices.append(len(cells[face_cell])-1)
else:
# faces already added from previous column
for z_face in range(ncells_tall):
i_f = edge_to_id(i_e, z_face)
cells[col_to_id(col, z_face)].append(i_f)
# Do some idiot checking
# -- check we got the expected number of faces
assert len(faces) == nfaces_total
# -- check every cell is at least a tet
for c in cells:
assert len(c) > 4
# -- check surface sideset has the right number of entries
assert len(surface) == mesh2D.num_cells()
# -- check bottom sideset has the right number of entries
assert len(bottom) == mesh2D.num_cells()
# -- len of vertical sides sideset is number of external edges * number of cells, no pinchouts here
num_sides = ncells_tall * sum(1 for e,c in mesh2D.edge_counts().iteritems() if c == 1)
assert num_sides == len(vertical_side_cells)
assert num_sides == len(vertical_side_indices)
# make the material ids
material_ids = np.zeros((len(cells),),'i')
for col in range(mesh2D.num_cells()):
z_cell = 0
for ilay in range(len(ncells_per_layer)):
ncells = ncells_per_layer[ilay]
for i in range(z_cell, z_cell+ncells):
material_ids[col_to_id(col, i)] = np_mat_ids[ilay, col]
z_cell = z_cell + ncells
# make the side sets
side_sets = []
side_sets.append(SideSet("bottom", 1, bottom, [1,]*len(bottom)))
side_sets.append(SideSet("surface", 2, surface, [0,]*len(surface)))
side_sets.append(SideSet("external_sides", 3, vertical_side_cells, vertical_side_indices))
# reshape coords
coords = coords.reshape(nnodes_total, 3)
for e,s in zip(side_sets[0].elem_list, side_sets[0].side_list):
face = cells[e][s]
fz_coords = np.array([coords[n] for n in faces[face]])
#print "bottom centroid = ", np.mean(fz_coords, axis=0)
for e,s in zip(side_sets[1].elem_list, side_sets[1].side_list):
face = cells[e][s]
fz_coords = np.array([coords[n] for n in faces[face]])
#print "surface centroid = ", np.mean(fz_coords, axis=0)
# instantiate the mesh
return cls(coords, faces, cells, side_sets=side_sets, material_ids=material_ids)
def commandline_options():
parser = argparse.ArgumentParser(description='Extrude a 2D mesh to make a 3D mesh')
parser.add_argument("-n", "--num-cells", default=10, type=int,
help="number of cells to extrude")
parser.add_argument("-d", "--depth", default=40.0, type=float,
help="depth to extrude")
parser.add_argument("-o", "--outfile", default=None, type=str,
help="output filename")
parser.add_argument("-p", "--plot", default=False, action="store_true",
help="plot the 2D mesh")
parser.add_argument("infile",metavar="INFILE", type=str,
help="input filename of surface mesh")
options = parser.parse_args()
if options.outfile is None:
options.outfile = ".".join(options.infile.split(".")[:-1])+".exo"
if os.path.isfile(options.outfile):
print('Output file "%s" exists, cowardly not overwriting.'%options.outfile)
sys.exit(1)
if not os.path.isfile(options.infile):
print('No input file provided')
parser.print_usage()
sys.exit(1)
return options
if __name__ == "__main__":
options = commandline_options()
m2 = Mesh2D.read_VTK(options.infile)
if options.plot:
m2.plot()
m3 = Mesh3D.extruded_Mesh2D(m2, [options.depth,], [options.num_cells,], [10000,])
m3.write_exodus(options.outfile)
| bsd-3-clause |
dimroc/tensorflow-mnist-tutorial | lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/dataframe/tensorflow_dataframe.py | 75 | 29377 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlowDataFrame implements convenience functions using TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe import dataframe as df
from tensorflow.contrib.learn.python.learn.dataframe.transforms import batch
from tensorflow.contrib.learn.python.learn.dataframe.transforms import csv_parser
from tensorflow.contrib.learn.python.learn.dataframe.transforms import example_parser
from tensorflow.contrib.learn.python.learn.dataframe.transforms import in_memory_source
from tensorflow.contrib.learn.python.learn.dataframe.transforms import reader_source
from tensorflow.contrib.learn.python.learn.dataframe.transforms import sparsify
from tensorflow.contrib.learn.python.learn.dataframe.transforms import split_mask
from tensorflow.python.client import session as sess
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner as qr
def _expand_file_names(filepatterns):
"""Takes a list of file patterns and returns a list of resolved file names."""
if not isinstance(filepatterns, (list, tuple, set)):
filepatterns = [filepatterns]
filenames = set()
for filepattern in filepatterns:
names = set(gfile.Glob(filepattern))
filenames |= names
return list(filenames)
def _dtype_to_nan(dtype):
if dtype is dtypes.string:
return b""
elif dtype.is_integer:
return np.nan
elif dtype.is_floating:
return np.nan
elif dtype is dtypes.bool:
return np.nan
else:
raise ValueError("Can't parse type without NaN into sparse tensor: %s" %
dtype)
def _get_default_value(feature_spec):
if isinstance(feature_spec, parsing_ops.FixedLenFeature):
return feature_spec.default_value
else:
return _dtype_to_nan(feature_spec.dtype)
class TensorFlowDataFrame(df.DataFrame):
"""TensorFlowDataFrame implements convenience functions using TensorFlow."""
def run(self,
num_batches=None,
graph=None,
session=None,
start_queues=True,
initialize_variables=True,
**kwargs):
"""Builds and runs the columns of the `DataFrame` and yields batches.
This is a generator that yields a dictionary mapping column names to
evaluated columns.
Args:
num_batches: the maximum number of batches to produce. If none specified,
the returned value will iterate through infinite batches.
graph: the `Graph` in which the `DataFrame` should be built.
session: the `Session` in which to run the columns of the `DataFrame`.
start_queues: if true, queues will be started before running and halted
after producting `n` batches.
initialize_variables: if true, variables will be initialized.
**kwargs: Additional keyword arguments e.g. `num_epochs`.
Yields:
A dictionary, mapping column names to the values resulting from running
each column for a single batch.
"""
if graph is None:
graph = ops.get_default_graph()
with graph.as_default():
if session is None:
session = sess.Session()
self_built = self.build(**kwargs)
keys = list(self_built.keys())
cols = list(self_built.values())
if initialize_variables:
if variables.local_variables():
session.run(variables.local_variables_initializer())
if variables.global_variables():
session.run(variables.global_variables_initializer())
if start_queues:
coord = coordinator.Coordinator()
threads = qr.start_queue_runners(sess=session, coord=coord)
i = 0
while num_batches is None or i < num_batches:
i += 1
try:
values = session.run(cols)
yield collections.OrderedDict(zip(keys, values))
except errors.OutOfRangeError:
break
if start_queues:
coord.request_stop()
coord.join(threads)
def select_rows(self, boolean_series):
"""Returns a `DataFrame` with only the rows indicated by `boolean_series`.
Note that batches may no longer have consistent size after calling
`select_rows`, so the new `DataFrame` may need to be rebatched.
For example:
'''
filtered_df = df.select_rows(df["country"] == "jp").batch(64)
'''
Args:
boolean_series: a `Series` that evaluates to a boolean `Tensor`.
Returns:
A new `DataFrame` with the same columns as `self`, but selecting only the
rows where `boolean_series` evaluated to `True`.
"""
result = type(self)()
for key, col in self._columns.items():
try:
result[key] = col.select_rows(boolean_series)
except AttributeError as e:
raise NotImplementedError((
"The select_rows method is not implemented for Series type {}. "
"Original error: {}").format(type(col), e))
return result
def split(self, index_series, proportion, batch_size=None):
"""Deterministically split a `DataFrame` into two `DataFrame`s.
Note this split is only as deterministic as the underlying hash function;
see `tf.string_to_hash_bucket_fast`. The hash function is deterministic
for a given binary, but may change occasionally. The only way to achieve
an absolute guarantee that the split `DataFrame`s do not change across runs
is to materialize them.
Note too that the allocation of a row to one partition or the
other is evaluated independently for each row, so the exact number of rows
in each partition is binomially distributed.
Args:
index_series: a `Series` of unique strings, whose hash will determine the
partitioning; or the name in this `DataFrame` of such a `Series`.
(This `Series` must contain strings because TensorFlow provides hash
ops only for strings, and there are no number-to-string converter ops.)
proportion: The proportion of the rows to select for the 'left'
partition; the remaining (1 - proportion) rows form the 'right'
partition.
batch_size: the batch size to use when rebatching the left and right
`DataFrame`s. If None (default), the `DataFrame`s are not rebatched;
thus their batches will have variable sizes, according to which rows
are selected from each batch of the original `DataFrame`.
Returns:
Two `DataFrame`s containing the partitioned rows.
"""
if isinstance(index_series, str):
index_series = self[index_series]
left_mask, = split_mask.SplitMask(proportion)(index_series)
right_mask = ~left_mask
left_rows = self.select_rows(left_mask)
right_rows = self.select_rows(right_mask)
if batch_size:
left_rows = left_rows.batch(batch_size=batch_size, shuffle=False)
right_rows = right_rows.batch(batch_size=batch_size, shuffle=False)
return left_rows, right_rows
def split_fast(self, index_series, proportion, batch_size,
base_batch_size=1000):
"""Deterministically split a `DataFrame` into two `DataFrame`s.
Note this split is only as deterministic as the underlying hash function;
see `tf.string_to_hash_bucket_fast`. The hash function is deterministic
for a given binary, but may change occasionally. The only way to achieve
an absolute guarantee that the split `DataFrame`s do not change across runs
is to materialize them.
Note too that the allocation of a row to one partition or the
other is evaluated independently for each row, so the exact number of rows
in each partition is binomially distributed.
Args:
index_series: a `Series` of unique strings, whose hash will determine the
partitioning; or the name in this `DataFrame` of such a `Series`.
(This `Series` must contain strings because TensorFlow provides hash
ops only for strings, and there are no number-to-string converter ops.)
proportion: The proportion of the rows to select for the 'left'
partition; the remaining (1 - proportion) rows form the 'right'
partition.
batch_size: the batch size to use when rebatching the left and right
`DataFrame`s. If None (default), the `DataFrame`s are not rebatched;
thus their batches will have variable sizes, according to which rows
are selected from each batch of the original `DataFrame`.
base_batch_size: the batch size to use for materialized data, prior to the
split.
Returns:
Two `DataFrame`s containing the partitioned rows.
"""
if isinstance(index_series, str):
index_series = self[index_series]
left_mask, = split_mask.SplitMask(proportion)(index_series)
right_mask = ~left_mask
self["left_mask__"] = left_mask
self["right_mask__"] = right_mask
# TODO(soergel): instead of base_batch_size can we just do one big batch?
# avoid computing the hashes twice
m = self.materialize_to_memory(batch_size=base_batch_size)
left_rows_df = m.select_rows(m["left_mask__"])
right_rows_df = m.select_rows(m["right_mask__"])
del left_rows_df[["left_mask__", "right_mask__"]]
del right_rows_df[["left_mask__", "right_mask__"]]
# avoid recomputing the split repeatedly
left_rows_df = left_rows_df.materialize_to_memory(batch_size=batch_size)
right_rows_df = right_rows_df.materialize_to_memory(batch_size=batch_size)
return left_rows_df, right_rows_df
def run_one_batch(self):
"""Creates a new 'Graph` and `Session` and runs a single batch.
Returns:
A dictionary mapping column names to numpy arrays that contain a single
batch of the `DataFrame`.
"""
return list(self.run(num_batches=1))[0]
def run_one_epoch(self):
"""Creates a new 'Graph` and `Session` and runs a single epoch.
Naturally this makes sense only for DataFrames that fit in memory.
Returns:
A dictionary mapping column names to numpy arrays that contain a single
epoch of the `DataFrame`.
"""
# batches is a list of dicts of numpy arrays
batches = [b for b in self.run(num_epochs=1)]
# first invert that to make a dict of lists of numpy arrays
pivoted_batches = {}
for k in batches[0].keys():
pivoted_batches[k] = []
for b in batches:
for k, v in b.items():
pivoted_batches[k].append(v)
# then concat the arrays in each column
result = {k: np.concatenate(column_batches)
for k, column_batches in pivoted_batches.items()}
return result
def materialize_to_memory(self, batch_size):
unordered_dict_of_arrays = self.run_one_epoch()
# there may already be an 'index' column, in which case from_ordereddict)
# below will complain because it wants to generate a new one.
# for now, just remove it.
# TODO(soergel): preserve index history, potentially many levels deep
del unordered_dict_of_arrays["index"]
# the order of the columns in this dict is arbitrary; we just need it to
# remain consistent.
ordered_dict_of_arrays = collections.OrderedDict(unordered_dict_of_arrays)
return TensorFlowDataFrame.from_ordereddict(ordered_dict_of_arrays,
batch_size=batch_size)
def batch(self,
batch_size,
shuffle=False,
num_threads=1,
queue_capacity=None,
min_after_dequeue=None,
seed=None):
"""Resize the batches in the `DataFrame` to the given `batch_size`.
Args:
batch_size: desired batch size.
shuffle: whether records should be shuffled. Defaults to true.
num_threads: the number of enqueueing threads.
queue_capacity: capacity of the queue that will hold new batches.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` with `batch_size` rows.
"""
column_names = list(self._columns.keys())
if shuffle:
batcher = batch.ShuffleBatch(batch_size,
output_names=column_names,
num_threads=num_threads,
queue_capacity=queue_capacity,
min_after_dequeue=min_after_dequeue,
seed=seed)
else:
batcher = batch.Batch(batch_size,
output_names=column_names,
num_threads=num_threads,
queue_capacity=queue_capacity)
batched_series = batcher(list(self._columns.values()))
dataframe = type(self)()
dataframe.assign(**(dict(zip(column_names, batched_series))))
return dataframe
@classmethod
def _from_csv_base(cls, filepatterns, get_default_values, has_header,
column_names, num_threads, enqueue_size,
batch_size, queue_capacity, min_after_dequeue, shuffle,
seed):
"""Create a `DataFrame` from CSV files.
If `has_header` is false, then `column_names` must be specified. If
`has_header` is true and `column_names` are specified, then `column_names`
overrides the names in the header.
Args:
filepatterns: a list of file patterns that resolve to CSV files.
get_default_values: a function that produces a list of default values for
each column, given the column names.
has_header: whether or not the CSV files have headers.
column_names: a list of names for the columns in the CSV files.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed lines.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with examples from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
filenames = _expand_file_names(filepatterns)
if not filenames:
raise ValueError("No matching file names.")
if column_names is None:
if not has_header:
raise ValueError("If column_names is None, has_header must be true.")
with gfile.GFile(filenames[0]) as f:
column_names = csv.DictReader(f).fieldnames
if "index" in column_names:
raise ValueError(
"'index' is reserved and can not be used for a column name.")
default_values = get_default_values(column_names)
reader_kwargs = {"skip_header_lines": (1 if has_header else 0)}
index, value = reader_source.TextFileSource(
filenames,
reader_kwargs=reader_kwargs,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
seed=seed)()
parser = csv_parser.CSVParser(column_names, default_values)
parsed = parser(value)
column_dict = parsed._asdict()
column_dict["index"] = index
dataframe = cls()
dataframe.assign(**column_dict)
return dataframe
@classmethod
def from_csv(cls,
filepatterns,
default_values,
has_header=True,
column_names=None,
num_threads=1,
enqueue_size=None,
batch_size=32,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None):
"""Create a `DataFrame` from CSV files.
If `has_header` is false, then `column_names` must be specified. If
`has_header` is true and `column_names` are specified, then `column_names`
overrides the names in the header.
Args:
filepatterns: a list of file patterns that resolve to CSV files.
default_values: a list of default values for each column.
has_header: whether or not the CSV files have headers.
column_names: a list of names for the columns in the CSV files.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed lines.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with examples from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
def get_default_values(column_names):
# pylint: disable=unused-argument
return default_values
return cls._from_csv_base(filepatterns, get_default_values, has_header,
column_names, num_threads,
enqueue_size, batch_size, queue_capacity,
min_after_dequeue, shuffle, seed)
@classmethod
def from_csv_with_feature_spec(cls,
filepatterns,
feature_spec,
has_header=True,
column_names=None,
num_threads=1,
enqueue_size=None,
batch_size=32,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None):
"""Create a `DataFrame` from CSV files, given a feature_spec.
If `has_header` is false, then `column_names` must be specified. If
`has_header` is true and `column_names` are specified, then `column_names`
overrides the names in the header.
Args:
filepatterns: a list of file patterns that resolve to CSV files.
feature_spec: a dict mapping column names to `FixedLenFeature` or
`VarLenFeature`.
has_header: whether or not the CSV files have headers.
column_names: a list of names for the columns in the CSV files.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed lines.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with examples from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
def get_default_values(column_names):
return [_get_default_value(feature_spec[name]) for name in column_names]
dataframe = cls._from_csv_base(filepatterns, get_default_values, has_header,
column_names, num_threads,
enqueue_size, batch_size, queue_capacity,
min_after_dequeue, shuffle, seed)
# replace the dense columns with sparse ones in place in the dataframe
for name in dataframe.columns():
if name != "index" and isinstance(feature_spec[name],
parsing_ops.VarLenFeature):
strip_value = _get_default_value(feature_spec[name])
(dataframe[name],) = sparsify.Sparsify(strip_value)(dataframe[name])
return dataframe
@classmethod
def from_examples(cls,
filepatterns,
features,
reader_cls=io_ops.TFRecordReader,
num_threads=1,
enqueue_size=None,
batch_size=32,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None):
"""Create a `DataFrame` from `tensorflow.Example`s.
Args:
filepatterns: a list of file patterns containing `tensorflow.Example`s.
features: a dict mapping feature names to `VarLenFeature` or
`FixedLenFeature`.
reader_cls: a subclass of `tensorflow.ReaderBase` that will be used to
read the `Example`s.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with `Example`s from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
filenames = _expand_file_names(filepatterns)
if not filenames:
raise ValueError("No matching file names.")
if "index" in features:
raise ValueError(
"'index' is reserved and can not be used for a feature name.")
index, record = reader_source.ReaderSource(
reader_cls,
filenames,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
seed=seed)()
parser = example_parser.ExampleParser(features)
parsed = parser(record)
column_dict = parsed._asdict()
column_dict["index"] = index
dataframe = cls()
dataframe.assign(**column_dict)
return dataframe
@classmethod
def from_pandas(cls,
pandas_dataframe,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None,
data_name="pandas_data"):
"""Create a `tf.learn.DataFrame` from a `pandas.DataFrame`.
Args:
pandas_dataframe: `pandas.DataFrame` that serves as a data source.
num_threads: the number of threads to use for enqueueing.
enqueue_size: the number of rows to enqueue per step.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
data_name: a scope name identifying the data.
Returns:
A `tf.learn.DataFrame` that contains batches drawn from the given
`pandas_dataframe`.
"""
pandas_source = in_memory_source.PandasSource(
pandas_dataframe,
num_threads=num_threads,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
seed=seed,
data_name=data_name)
dataframe = cls()
dataframe.assign(**(pandas_source()._asdict()))
return dataframe
@classmethod
def from_numpy(cls,
numpy_array,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None,
data_name="numpy_data"):
"""Creates a `tf.learn.DataFrame` from a `numpy.ndarray`.
The returned `DataFrame` contains two columns: 'index' and 'value'. The
'value' column contains a row from the array. The 'index' column contains
the corresponding row number.
Args:
numpy_array: `numpy.ndarray` that serves as a data source.
num_threads: the number of threads to use for enqueueing.
enqueue_size: the number of rows to enqueue per step.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
data_name: a scope name identifying the data.
Returns:
A `tf.learn.DataFrame` that contains batches drawn from the given
array.
"""
numpy_source = in_memory_source.NumpySource(
numpy_array,
num_threads=num_threads,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
seed=seed,
data_name=data_name)
dataframe = cls()
dataframe.assign(**(numpy_source()._asdict()))
return dataframe
@classmethod
def from_ordereddict(cls,
ordered_dict_of_arrays,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None,
data_name="numpy_data"):
"""Creates a `tf.learn.DataFrame` from an `OrderedDict` of `numpy.ndarray`.
The returned `DataFrame` contains a column for each key of the dict plus an
extra 'index' column. The 'index' column contains the row number. Each of
the other columns contains a row from the corresponding array.
Args:
ordered_dict_of_arrays: `OrderedDict` of `numpy.ndarray` that serves as a
data source.
num_threads: the number of threads to use for enqueueing.
enqueue_size: the number of rows to enqueue per step.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
data_name: a scope name identifying the data.
Returns:
A `tf.learn.DataFrame` that contains batches drawn from the given arrays.
Raises:
ValueError: `ordered_dict_of_arrays` contains the reserved name 'index'.
"""
numpy_source = in_memory_source.OrderedDictNumpySource(
ordered_dict_of_arrays,
num_threads=num_threads,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
seed=seed,
data_name=data_name)
dataframe = cls()
dataframe.assign(**(numpy_source()._asdict()))
return dataframe
| apache-2.0 |
qrqiuren/sms-tools | lectures/03-Fourier-properties/plots-code/fft-zero-phase.py | 24 | 1140 | import matplotlib.pyplot as plt
import numpy as np
from scipy.fftpack import fft, fftshift
import sys
sys.path.append('../../../software/models/')
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
N = 512
M = 401
hN = N/2
hM = (M+1)/2
start = .8*fs
xw = x[start-hM:start+hM-1] * np.hamming(M)
plt.figure(1, figsize=(9.5, 6.5))
plt.subplot(411)
plt.plot(np.arange(-hM, hM-1), xw, lw=1.5)
plt.axis([-hN, hN-1, min(xw), max(xw)])
plt.title('x (oboe-A4.wav), M = 401')
fftbuffer = np.zeros(N)
fftbuffer[:hM] = xw[hM-1:]
fftbuffer[N-hM+1:] = xw[:hM-1]
plt.subplot(412)
plt.plot(np.arange(0, N), fftbuffer, lw=1.5)
plt.axis([0, N, min(xw), max(xw)])
plt.title('fftbuffer: N = 512')
X = fftshift(fft(fftbuffer))
mX = 20 * np.log10(abs(X)/N)
pX = np.unwrap(np.angle(X))
plt.subplot(413)
plt.plot(np.arange(-hN, hN), mX, 'r', lw=1.5)
plt.axis([-hN,hN-1,-100,max(mX)])
plt.title('mX')
plt.subplot(414)
plt.plot(np.arange(-hN, hN), pX, 'c', lw=1.5)
plt.axis([-hN,hN-1,min(pX),max(pX)])
plt.title('pX')
plt.tight_layout()
plt.savefig('fft-zero-phase.png')
plt.show()
| agpl-3.0 |
leotrs/decu | test/notsosimple_project/src/script.py | 1 | 1196 | """
testscript.py
-------------
This is a test script for decu.
"""
from decu import Script, experiment, figure, run_parallel
import numpy as np
import matplotlib.pyplot as plt
class TestScript(Script):
@experiment(data_param='data')
def exp(self, data, param, param2):
"""Compute x**param for each data point."""
self.log.info('Working hard for {}..'.format(TestScript.exp.run))
return np.power(data, param) + param2
@figure()
def plot_result(self, data, result):
"""Plot results of experiment."""
plt.plot(data, result)
@figure()
def plot_many_results(self, data, results):
"""Plot results of experiment."""
plt.figure()
for res in results:
plt.plot(data, res)
def main(self):
"""Run some experiments and make some figures."""
data = np.arange(5)
result1 = self.exp(data, param=4, param2=10)
self.plot_result(data, result1)
param_list = [(data, x, y) for x, y in
zip(np.arange(5), np.arange(5, 10))]
result2 = run_parallel(self.exp, param_list)
self.plot_many_results(data, result2, suffix='parallel')
| mit |
MJuddBooth/pandas | pandas/tests/reshape/test_reshape.py | 1 | 25248 | # -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
from collections import OrderedDict
import numpy as np
from numpy import nan
import pytest
from pandas.compat import u
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import Categorical, DataFrame, Index, Series, get_dummies
from pandas.core.sparse.api import SparseArray, SparseDtype
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal
class TestGetDummies(object):
@pytest.fixture
def df(self):
return DataFrame({'A': ['a', 'b', 'a'],
'B': ['b', 'b', 'c'],
'C': [1, 2, 3]})
@pytest.fixture(params=['uint8', 'i8', np.float64, bool, None])
def dtype(self, request):
return np.dtype(request.param)
@pytest.fixture(params=['dense', 'sparse'])
def sparse(self, request):
# params are strings to simplify reading test results,
# e.g. TestGetDummies::test_basic[uint8-sparse] instead of [uint8-True]
return request.param == 'sparse'
def effective_dtype(self, dtype):
if dtype is None:
return np.uint8
return dtype
def test_raises_on_dtype_object(self, df):
with pytest.raises(ValueError):
get_dummies(df, dtype='object')
def test_basic(self, sparse, dtype):
s_list = list('abc')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame({'a': [1, 0, 0],
'b': [0, 1, 0],
'c': [0, 0, 1]},
dtype=self.effective_dtype(dtype))
if sparse:
expected = expected.apply(pd.SparseArray, fill_value=0.0)
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
assert_frame_equal(result, expected)
expected.index = list('ABC')
result = get_dummies(s_series_index, sparse=sparse, dtype=dtype)
assert_frame_equal(result, expected)
def test_basic_types(self, sparse, dtype):
# GH 10531
s_list = list('abc')
s_series = Series(s_list)
s_df = DataFrame({'a': [0, 1, 0, 1, 2],
'b': ['A', 'A', 'B', 'C', 'C'],
'c': [2, 3, 3, 3, 2]})
expected = DataFrame({'a': [1, 0, 0],
'b': [0, 1, 0],
'c': [0, 0, 1]},
dtype=self.effective_dtype(dtype),
columns=list('abc'))
if sparse:
if is_integer_dtype(dtype):
fill_value = 0
elif dtype == bool:
fill_value = False
else:
fill_value = 0.0
expected = expected.apply(SparseArray, fill_value=fill_value)
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_df, columns=s_df.columns,
sparse=sparse, dtype=dtype)
if sparse:
dtype_name = 'Sparse[{}, {}]'.format(
self.effective_dtype(dtype).name,
fill_value
)
else:
dtype_name = self.effective_dtype(dtype).name
expected = Series({dtype_name: 8})
tm.assert_series_equal(result.get_dtype_counts(), expected)
result = get_dummies(s_df, columns=['a'], sparse=sparse, dtype=dtype)
expected_counts = {'int64': 1, 'object': 1}
expected_counts[dtype_name] = 3 + expected_counts.get(dtype_name, 0)
expected = Series(expected_counts).sort_index()
tm.assert_series_equal(result.get_dtype_counts().sort_index(),
expected)
def test_just_na(self, sparse):
just_na_list = [np.nan]
just_na_series = Series(just_na_list)
just_na_series_index = Series(just_na_list, index=['A'])
res_list = get_dummies(just_na_list, sparse=sparse)
res_series = get_dummies(just_na_series, sparse=sparse)
res_series_index = get_dummies(just_na_series_index, sparse=sparse)
assert res_list.empty
assert res_series.empty
assert res_series_index.empty
assert res_list.index.tolist() == [0]
assert res_series.index.tolist() == [0]
assert res_series_index.index.tolist() == ['A']
def test_include_na(self, sparse, dtype):
s = ['a', 'b', np.nan]
res = get_dummies(s, sparse=sparse, dtype=dtype)
exp = DataFrame({'a': [1, 0, 0],
'b': [0, 1, 0]},
dtype=self.effective_dtype(dtype))
if sparse:
exp = exp.apply(pd.SparseArray, fill_value=0.0)
assert_frame_equal(res, exp)
# Sparse dataframes do not allow nan labelled columns, see #GH8822
res_na = get_dummies(s, dummy_na=True, sparse=sparse, dtype=dtype)
exp_na = DataFrame({nan: [0, 0, 1],
'a': [1, 0, 0],
'b': [0, 1, 0]},
dtype=self.effective_dtype(dtype))
exp_na = exp_na.reindex(['a', 'b', nan], axis=1)
# hack (NaN handling in assert_index_equal)
exp_na.columns = res_na.columns
if sparse:
exp_na = exp_na.apply(pd.SparseArray, fill_value=0.0)
assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([nan], dummy_na=True,
sparse=sparse, dtype=dtype)
exp_just_na = DataFrame(Series(1, index=[0]), columns=[nan],
dtype=self.effective_dtype(dtype))
tm.assert_numpy_array_equal(res_just_na.values, exp_just_na.values)
def test_unicode(self, sparse):
# See GH 6885 - get_dummies chokes on unicode values
import unicodedata
e = 'e'
eacute = unicodedata.lookup('LATIN SMALL LETTER E WITH ACUTE')
s = [e, eacute, eacute]
res = get_dummies(s, prefix='letter', sparse=sparse)
exp = DataFrame({'letter_e': [1, 0, 0],
u('letter_%s') % eacute: [0, 1, 1]},
dtype=np.uint8)
if sparse:
exp = exp.apply(pd.SparseArray, fill_value=0)
assert_frame_equal(res, exp)
def test_dataframe_dummies_all_obj(self, df, sparse):
df = df[['A', 'B']]
result = get_dummies(df, sparse=sparse)
expected = DataFrame({'A_a': [1, 0, 1],
'A_b': [0, 1, 0],
'B_b': [1, 1, 0],
'B_c': [0, 0, 1]},
dtype=np.uint8)
if sparse:
expected = pd.DataFrame({
"A_a": pd.SparseArray([1, 0, 1], dtype='uint8'),
"A_b": pd.SparseArray([0, 1, 0], dtype='uint8'),
"B_b": pd.SparseArray([1, 1, 0], dtype='uint8'),
"B_c": pd.SparseArray([0, 0, 1], dtype='uint8'),
})
assert_frame_equal(result, expected)
def test_dataframe_dummies_mix_default(self, df, sparse, dtype):
result = get_dummies(df, sparse=sparse, dtype=dtype)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame({'C': [1, 2, 3],
'A_a': arr([1, 0, 1], dtype=typ),
'A_b': arr([0, 1, 0], dtype=typ),
'B_b': arr([1, 1, 0], dtype=typ),
'B_c': arr([0, 0, 1], dtype=typ)})
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_list(self, df, sparse):
prefixes = ['from_A', 'from_B']
result = get_dummies(df, prefix=prefixes, sparse=sparse)
expected = DataFrame({'C': [1, 2, 3],
'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0],
'from_B_b': [1, 1, 0],
'from_B_c': [0, 0, 1]},
dtype=np.uint8)
expected[['C']] = df[['C']]
cols = ['from_A_a', 'from_A_b', 'from_B_b', 'from_B_c']
expected = expected[['C'] + cols]
typ = pd.SparseArray if sparse else pd.Series
expected[cols] = expected[cols].apply(lambda x: typ(x))
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_str(self, df, sparse):
# not that you should do this...
result = get_dummies(df, prefix='bad', sparse=sparse)
bad_columns = ['bad_a', 'bad_b', 'bad_b', 'bad_c']
expected = DataFrame([[1, 1, 0, 1, 0],
[2, 0, 1, 1, 0],
[3, 1, 0, 0, 1]],
columns=['C'] + bad_columns,
dtype=np.uint8)
expected = expected.astype({"C": np.int64})
if sparse:
# work around astyping & assigning with duplicate columns
# https://github.com/pandas-dev/pandas/issues/14427
expected = pd.concat([
pd.Series([1, 2, 3], name='C'),
pd.Series([1, 0, 1], name='bad_a', dtype='Sparse[uint8]'),
pd.Series([0, 1, 0], name='bad_b', dtype='Sparse[uint8]'),
pd.Series([1, 1, 0], name='bad_b', dtype='Sparse[uint8]'),
pd.Series([0, 0, 1], name='bad_c', dtype='Sparse[uint8]'),
], axis=1)
assert_frame_equal(result, expected)
def test_dataframe_dummies_subset(self, df, sparse):
result = get_dummies(df, prefix=['from_A'], columns=['A'],
sparse=sparse)
expected = DataFrame({'B': ['b', 'b', 'c'],
'C': [1, 2, 3],
'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0]}, dtype=np.uint8)
expected[['C']] = df[['C']]
if sparse:
cols = ['from_A_a', 'from_A_b']
expected[cols] = expected[cols].apply(lambda x: pd.SparseSeries(x))
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_sep(self, df, sparse):
result = get_dummies(df, prefix_sep='..', sparse=sparse)
expected = DataFrame({'C': [1, 2, 3],
'A..a': [1, 0, 1],
'A..b': [0, 1, 0],
'B..b': [1, 1, 0],
'B..c': [0, 0, 1]},
dtype=np.uint8)
expected[['C']] = df[['C']]
expected = expected[['C', 'A..a', 'A..b', 'B..b', 'B..c']]
if sparse:
cols = ['A..a', 'A..b', 'B..b', 'B..c']
expected[cols] = expected[cols].apply(lambda x: pd.SparseSeries(x))
assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep=['..', '__'], sparse=sparse)
expected = expected.rename(columns={'B..b': 'B__b', 'B..c': 'B__c'})
assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep={'A': '..', 'B': '__'},
sparse=sparse)
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_bad_length(self, df, sparse):
with pytest.raises(ValueError):
get_dummies(df, prefix=['too few'], sparse=sparse)
def test_dataframe_dummies_prefix_sep_bad_length(self, df, sparse):
with pytest.raises(ValueError):
get_dummies(df, prefix_sep=['bad'], sparse=sparse)
def test_dataframe_dummies_prefix_dict(self, sparse):
prefixes = {'A': 'from_A', 'B': 'from_B'}
df = DataFrame({'C': [1, 2, 3],
'A': ['a', 'b', 'a'],
'B': ['b', 'b', 'c']})
result = get_dummies(df, prefix=prefixes, sparse=sparse)
expected = DataFrame({'C': [1, 2, 3],
'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0],
'from_B_b': [1, 1, 0],
'from_B_c': [0, 0, 1]})
columns = ['from_A_a', 'from_A_b', 'from_B_b', 'from_B_c']
expected[columns] = expected[columns].astype(np.uint8)
if sparse:
expected[columns] = expected[columns].apply(
lambda x: pd.SparseSeries(x)
)
assert_frame_equal(result, expected)
def test_dataframe_dummies_with_na(self, df, sparse, dtype):
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True,
sparse=sparse, dtype=dtype).sort_index(axis=1)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame({'C': [1, 2, 3, np.nan],
'A_a': arr([1, 0, 1, 0], dtype=typ),
'A_b': arr([0, 1, 0, 0], dtype=typ),
'A_nan': arr([0, 0, 0, 1], dtype=typ),
'B_b': arr([1, 1, 0, 0], dtype=typ),
'B_c': arr([0, 0, 1, 0], dtype=typ),
'B_nan': arr([0, 0, 0, 1], dtype=typ)
}).sort_index(axis=1)
assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, sparse=sparse, dtype=dtype)
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_with_categorical(self, df, sparse, dtype):
df['cat'] = pd.Categorical(['x', 'y', 'y'])
result = get_dummies(df, sparse=sparse, dtype=dtype).sort_index(axis=1)
if sparse:
arr = SparseArray
typ = SparseDtype(dtype, 0)
else:
arr = np.array
typ = dtype
expected = DataFrame({'C': [1, 2, 3],
'A_a': arr([1, 0, 1], dtype=typ),
'A_b': arr([0, 1, 0], dtype=typ),
'B_b': arr([1, 1, 0], dtype=typ),
'B_c': arr([0, 0, 1], dtype=typ),
'cat_x': arr([1, 0, 0], dtype=typ),
'cat_y': arr([0, 1, 1], dtype=typ)
}).sort_index(axis=1)
assert_frame_equal(result, expected)
@pytest.mark.parametrize('get_dummies_kwargs,expected', [
({'data': pd.DataFrame(({u'ä': ['a']}))},
pd.DataFrame({u'ä_a': [1]}, dtype=np.uint8)),
({'data': pd.DataFrame({'x': [u'ä']})},
pd.DataFrame({u'x_ä': [1]}, dtype=np.uint8)),
({'data': pd.DataFrame({'x': [u'a']}), 'prefix':u'ä'},
pd.DataFrame({u'ä_a': [1]}, dtype=np.uint8)),
({'data': pd.DataFrame({'x': [u'a']}), 'prefix_sep':u'ä'},
pd.DataFrame({u'xäa': [1]}, dtype=np.uint8))])
def test_dataframe_dummies_unicode(self, get_dummies_kwargs, expected):
# GH22084 pd.get_dummies incorrectly encodes unicode characters
# in dataframe column names
result = get_dummies(**get_dummies_kwargs)
assert_frame_equal(result, expected)
def test_basic_drop_first(self, sparse):
# GH12402 Add a new parameter `drop_first` to avoid collinearity
# Basic case
s_list = list('abc')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame({'b': [0, 1, 0],
'c': [0, 0, 1]},
dtype=np.uint8)
result = get_dummies(s_list, drop_first=True, sparse=sparse)
if sparse:
expected = expected.apply(pd.SparseArray, fill_value=0)
assert_frame_equal(result, expected)
result = get_dummies(s_series, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
expected.index = list('ABC')
result = get_dummies(s_series_index, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
def test_basic_drop_first_one_level(self, sparse):
# Test the case that categorical variable only has one level.
s_list = list('aaa')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame(index=np.arange(3))
result = get_dummies(s_list, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
result = get_dummies(s_series, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
expected = DataFrame(index=list('ABC'))
result = get_dummies(s_series_index, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
def test_basic_drop_first_NA(self, sparse):
# Test NA handling together with drop_first
s_NA = ['a', 'b', np.nan]
res = get_dummies(s_NA, drop_first=True, sparse=sparse)
exp = DataFrame({'b': [0, 1, 0]}, dtype=np.uint8)
if sparse:
exp = exp.apply(pd.SparseArray, fill_value=0)
assert_frame_equal(res, exp)
res_na = get_dummies(s_NA, dummy_na=True, drop_first=True,
sparse=sparse)
exp_na = DataFrame(
{'b': [0, 1, 0],
nan: [0, 0, 1]},
dtype=np.uint8).reindex(['b', nan], axis=1)
if sparse:
exp_na = exp_na.apply(pd.SparseArray, fill_value=0)
assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([nan], dummy_na=True, drop_first=True,
sparse=sparse)
exp_just_na = DataFrame(index=np.arange(1))
assert_frame_equal(res_just_na, exp_just_na)
def test_dataframe_dummies_drop_first(self, df, sparse):
df = df[['A', 'B']]
result = get_dummies(df, drop_first=True, sparse=sparse)
expected = DataFrame({'A_b': [0, 1, 0],
'B_c': [0, 0, 1]},
dtype=np.uint8)
if sparse:
expected = expected.apply(pd.SparseArray, fill_value=0)
assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_categorical(
self, df, sparse, dtype):
df['cat'] = pd.Categorical(['x', 'y', 'y'])
result = get_dummies(df, drop_first=True, sparse=sparse)
expected = DataFrame({'C': [1, 2, 3],
'A_b': [0, 1, 0],
'B_c': [0, 0, 1],
'cat_y': [0, 1, 1]})
cols = ['A_b', 'B_c', 'cat_y']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'A_b', 'B_c', 'cat_y']]
if sparse:
for col in cols:
expected[col] = pd.SparseSeries(expected[col])
assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_na(self, df, sparse):
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True, drop_first=True,
sparse=sparse).sort_index(axis=1)
expected = DataFrame({'C': [1, 2, 3, np.nan],
'A_b': [0, 1, 0, 0],
'A_nan': [0, 0, 0, 1],
'B_c': [0, 0, 1, 0],
'B_nan': [0, 0, 0, 1]})
cols = ['A_b', 'A_nan', 'B_c', 'B_nan']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected.sort_index(axis=1)
if sparse:
for col in cols:
expected[col] = pd.SparseSeries(expected[col])
assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, drop_first=True,
sparse=sparse)
expected = expected[['C', 'A_b', 'B_c']]
assert_frame_equal(result, expected)
def test_int_int(self):
data = Series([1, 2, 1])
result = pd.get_dummies(data)
expected = DataFrame([[1, 0],
[0, 1],
[1, 0]],
columns=[1, 2],
dtype=np.uint8)
tm.assert_frame_equal(result, expected)
data = Series(pd.Categorical(['a', 'b', 'a']))
result = pd.get_dummies(data)
expected = DataFrame([[1, 0],
[0, 1],
[1, 0]],
columns=pd.Categorical(['a', 'b']),
dtype=np.uint8)
tm.assert_frame_equal(result, expected)
def test_int_df(self, dtype):
data = DataFrame(
{'A': [1, 2, 1],
'B': pd.Categorical(['a', 'b', 'a']),
'C': [1, 2, 1],
'D': [1., 2., 1.]
}
)
columns = ['C', 'D', 'A_1', 'A_2', 'B_a', 'B_b']
expected = DataFrame([
[1, 1., 1, 0, 1, 0],
[2, 2., 0, 1, 0, 1],
[1, 1., 1, 0, 1, 0]
], columns=columns)
expected[columns[2:]] = expected[columns[2:]].astype(dtype)
result = pd.get_dummies(data, columns=['A', 'B'], dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_preserve_categorical_dtype(self, dtype):
# GH13854
for ordered in [False, True]:
cat = pd.Categorical(list("xy"), categories=list("xyz"),
ordered=ordered)
result = get_dummies(cat, dtype=dtype)
data = np.array([[1, 0, 0], [0, 1, 0]],
dtype=self.effective_dtype(dtype))
cols = pd.CategoricalIndex(cat.categories,
categories=cat.categories,
ordered=ordered)
expected = DataFrame(data, columns=cols,
dtype=self.effective_dtype(dtype))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('sparse', [True, False])
def test_get_dummies_dont_sparsify_all_columns(self, sparse):
# GH18914
df = DataFrame.from_dict(OrderedDict([('GDP', [1, 2]),
('Nation', ['AB', 'CD'])]))
df = get_dummies(df, columns=['Nation'], sparse=sparse)
df2 = df.reindex(columns=['GDP'])
tm.assert_frame_equal(df[['GDP']], df2)
def test_get_dummies_duplicate_columns(self, df):
# GH20839
df.columns = ["A", "A", "A"]
result = get_dummies(df).sort_index(axis=1)
expected = DataFrame([[1, 1, 0, 1, 0],
[2, 0, 1, 1, 0],
[3, 1, 0, 0, 1]],
columns=['A', 'A_a', 'A_b', 'A_b', 'A_c'],
dtype=np.uint8).sort_index(axis=1)
expected = expected.astype({"A": np.int64})
tm.assert_frame_equal(result, expected)
class TestCategoricalReshape(object):
def test_reshaping_multi_index_categorical(self):
# construct a MultiIndexed DataFrame formerly created
# via `tm.makePanel().to_frame()`
cols = ['ItemA', 'ItemB', 'ItemC']
data = {c: tm.makeTimeDataFrame() for c in cols}
df = pd.concat({c: data[c].stack() for c in data}, axis='columns')
df.index.names = ['major', 'minor']
df['str'] = 'foo'
dti = df.index.levels[0]
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(dti))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=dti)
tm.assert_frame_equal(result, expected)
class TestMakeAxisDummies(object):
def test_preserve_categorical_dtype(self):
# GH13854
for ordered in [False, True]:
cidx = pd.CategoricalIndex(list("xyz"), ordered=ordered)
midx = pd.MultiIndex(levels=[['a'], cidx],
codes=[[0, 0], [0, 1]])
df = DataFrame([[10, 11]], index=midx)
expected = DataFrame([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
index=midx, columns=cidx)
from pandas.core.reshape.reshape import make_axis_dummies
result = make_axis_dummies(df)
tm.assert_frame_equal(result, expected)
result = make_axis_dummies(df, transform=lambda x: x)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
dhaitz/CalibFW | plotting/modules/plot_sandbox.py | 1 | 75936 | # -*- coding: utf-8 -*-
"""
plotting sanbox module for merlin.
This module is to be used for testing or development work.
"""
import plotbase
import copy
import plot1d
import getroot
import math
import plotresponse
import plotfractions
import plot2d
import plot_tagging
import fit
import os
def recogen_alpha_ptbins(files, opt):
""" recogen vs alpha as well as Z pT vs alpha in pT bins. """
zptbins = [
"1",
"zpt>30 && zpt<50",
"zpt>50 && zpt<70",
"zpt>70 && zpt<120",
"zpt>120"
]
texts = [
"$\mathrm{inclusive}$",
"$30 < \mathrm{Z} p_\mathrm{T} < 50\ \mathrm{GeV}$",
"$50 < \mathrm{Z} p_\mathrm{T} < 70\ \mathrm{GeV}$",
"$70 < \mathrm{Z} p_\mathrm{T} < 120\ \mathrm{GeV}$",
"$\mathrm{Z}\ p_\mathrm{T} > 120\ \mathrm{GeV}$",
]
fig, axes = plotbase.newPlot(subplots = len(zptbins * 2), subplots_X = len(zptbins))
settings = plotbase.getSettings(opt, quantity='recogen_alpha')
for ax1, ax2, selection, text in zip(axes[:(len(axes)/2)], axes[(len(axes)/2):], zptbins, texts):
plot1d.datamcplot("recogen_alpha", files, opt, fig_axes = [fig, ax1],
changes={
'allalpha': True,
'y': [0.99, 1.1],
'subplot': True,
'nbins': 6,
'fit': 'slope',
'x': [0, 0.3],
'text': text,
'selection': [selection],
}
)
plot1d.datamcplot("zpt_alpha", files, opt, fig_axes = [fig, ax2],
changes={
'allalpha': True,
'y': [0, 300],
'subplot': True,
'nbins': 6,
'x': [0, 0.3],
'text': text,
'selection': [selection],
}
)
plotbase.Save(fig, settings)
def corrs(files, opt):
fig, ax = plotbase.newPlot()
settings = plotbase.getSettings(opt, quantity='recogen_genpt')
for quantity, marker, color, label in zip(
['raw/recogen_genpt', 'l1/recogen_genpt', 'l1l2l3/recogen_genpt'],
['o', 'D', '-'],
['black', '#7293cb', '#e1974c'],
['raw', 'L1', 'L1L2L3']
):
plot1d.datamcplot(quantity, files, opt, fig_axes = [fig, ax], changes={
'algorithm': "",
'markers':[marker],
'colors':[color],
'labels':[label, ""],
'correction':"",
'subplot':True,
'grid': True,
'y': [0.9, 1.5],
'legloc': 'upper right',
'x': [20, 100],
'yname': 'recogen',
'xname':'genpt'
})
settings['filename'] = plotbase.getDefaultFilename('recogen', opt, settings)
plotbase.Save(fig, settings)
def corrbins(files, opt):
fig, ax = plotbase.newPlot()
settings = plotbase.getSettings(opt, quantity='recogen')
for quantity, marker, color, label, n in zip(
['l1l2l3/recogen3040', 'l1l2l3/recogen5080', 'l1l2l3/recogen100'],
['o', 'f', '-'],
['black', '#7293cb', '#e1974c'],
['pT 20-40', 'pT 50-80', 'pT >100'],
range(10)
):
plot1d.datamcplot(quantity, files, opt, fig_axes = [fig, ax], changes={
'algorithm': "",
'markers':[marker],
'colors':[color],
'labels':[label, ""],
'correction':"",
'subplot':True,
'grid': True,
'fitlabel_offset':-0.07*n,
'legloc': 'upper right',
'x': [0, 2],
'xname':'recogen'
})
settings['filename'] = plotbase.getDefaultFilename('recogen-bins', opt, settings)
plotbase.Save(fig, settings)
def zmassFitted(files, opt, changes=None, settings=None):
""" Plots the FITTED Z mass peak position depending on pT, NPV, y."""
quantity = "zmass"
# iterate over raw vs corr electrons
for mode in ['raw', 'corr']:
filenames = ['work/data_ee_%s.root' % mode, 'work/mc_ee_powheg_%s.root' % mode]
files, opt = plotbase.openRootFiles(filenames, opt)
# iterate over quantities
for xq, xbins in zip(
['npv', 'zpt', 'zy'],
[
[a - 0.5 for a, b in opt.npv] + [opt.npv[-1][1] - 0.5],
opt.zbins,
[(i/2.)-2. for i in range(0, 9)],
]
):
# iterate over Z pt (inclusive/low,medium,high)
for ptregion, ptselection, ptstring in zip(["_inclusivept", "_lowpt", "_mediumpt", "_highpt"],
[
"1",
"zpt<60",
"zpt>60 && zpt < 120",
"zpt>120",
],
[
"",
"Z $p_\mathrm{T}$ < 60 GeV",
"60 < Z $p_\mathrm{T}$ < 120 GeV",
"Z $p_\mathrm{T}$ > 120 GeV",
]):
# iterate over electron eta regions
for etaregion, etaselection, etastring in zip(
["_all", "_EBEB", "_EBEE", "_EEEE"],
[
"1",
"abs(eminuseta) < 1.5 && abs(epluseta) < 1.5",
"((abs(eminuseta) < 1.5 && abs(epluseta) > 1.6) || (abs(epluseta) < 1.5 && abs(eminuseta) > 1.6))",
"abs(eminuseta) > 1.6 && abs(epluseta) > 1.6",
],
[
"",
"EB-EB",
"EB-EE & EE-EB",
"EE-EE",
]):
# we dont need pt-binned Z pT plots:
if xq == 'zpt' and ptselection is not "1":
continue
rootobjects, rootobjects2 = [], []
fig = plotbase.plt.figure(figsize=[7, 10])
ax = plotbase.plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax.number = 1
ax2 = plotbase.plt.subplot2grid((3, 1), (2, 0))
ax2.number = 2
fig.add_axes(ax)
fig.add_axes(ax2)
# print the Z pt and electron eta region on the plot
ax.text(0.98, 0.98, ptstring, va='top', ha='right', transform=ax.transAxes)
ax.text(0.98, 0.9, etastring, va='top', ha='right', transform=ax.transAxes)
changes = {
'y': [90.8, 94.8],
'yname': r'$m^{\mathrm{Z}}$ (peak position from Breit-Wigner fit) / GeV',
'legloc': 'upper left',
'title': mode + " electrons",
'labels': ['Data', 'Powheg'],
}
settings = plotbase.getSettings(opt, changes=changes, quantity=quantity + "_" + xq)
# iterate over files
markers = ['o', 'D']
ys, yerrs, xs = [], [], []
for i, f in enumerate(files):
bins = xbins
y, yerr, x = [], [], []
# iterate over bins
for lower, upper in zip(bins[:-1], bins[1:]):
changes = {
'selection': ['(%s > %s && %s < %s) && (%s) && (%s)' % (xq,
lower, xq, upper, ptselection, etaselection)],
'nbins': 40,
'folder': 'zcuts',
'x': [71, 101],
}
local_settings = plotbase.getSettings(opt, changes, None, quantity)
# get the zmass, fit, get the xq distribution; append to lists
rootobjects += [getroot.histofromfile(quantity, f, local_settings, index=i)]
p0, p0err, p1, p1err, p2, p2err, chi2, ndf, conf_intervals = fit.fitline2(rootobjects[-1], breitwigner=True, limits=local_settings['x'])
y += [p1]
yerr += [p1err]
changes['x'] = [lower, upper]
local_settings = plotbase.getSettings(opt, changes, None, quantity)
rootobjects2 += [getroot.histofromfile(xq, f, local_settings, index=i)]
x += [rootobjects2[-1].GetMean()]
# fine line to indicate bin borders
ax.add_line(plotbase.matplotlib.lines.Line2D((lower, upper), (y[-1],y[-1]), color='black', alpha=0.05))
ys.append(y)
yerrs.append(yerr)
xs.append(x)
#plot
ax.errorbar(x, y, yerr, drawstyle='steps-mid', color=settings['colors'][i],
fmt=markers[i], capsize=0, label=settings['labels'][i])
# format and save
if xq == 'zpt':
settings['xlog'] = True
settings['x'] = [30, 1000]
settings['xticks'] = [30, 50, 70, 100, 200, 400, 1000]
plot1d.formatting(ax, settings, opt, [], [])
# calculate ratio values
ratio_y = [d/m for d, m in zip(ys[0], ys[1])]
ratio_yerrs = [math.sqrt((derr/d)**2 + (merr/m)**2)for d, derr, m, merr in zip(ys[0], yerrs[0], ys[1], yerrs[1])]
ratio_x = [0.5 * (d + m) for d, m in zip(xs[0], xs[1])]
#format ratio plot
ax2.errorbar(ratio_x, ratio_y, ratio_yerrs, drawstyle='steps-mid', color='black',
fmt='o', capsize=0, label='ratio')
ax.axhline(1.0)
fig.subplots_adjust(hspace=0.1)
ax.set_xticklabels([])
ax.set_xlabel("")
settings['ratio'] = True
settings['legloc'] = None
settings['xynames'][1] = 'ratio'
plot1d.formatting(ax2, settings, opt, [], [])
ax2.set_ylim(0.99, 1.01)
settings['filename'] = plotbase.getDefaultFilename(quantity + "_" + xq + "_" + mode + ptregion + etaregion, opt, settings)
plotbase.Save(fig, settings)
def zmassEBEE(files, opt):
""" Plot the Z mass depending on where the electrons are reconstructed.
3 bins: EB-EB, EB-EE, EE-EE
"""
selections = [
'abs(eminuseta)<1.5 && abs(epluseta)<1.5',
'(abs(eminuseta)>1.5 && abs(epluseta)<1.5) || abs(eminuseta)<1.5 && abs(epluseta)>1.5',
'abs(eminuseta)>1.5 && abs(epluseta)>1.5',
]
filenames = ['zmass_ebeb', 'zmass_ebee', 'zmass_eeee']
titles = ['Barrel electrons only', 'One electron barrel, one endcap', 'Endcap electrons only']
for selection, filename, title in zip(selections, filenames, titles):
plot1d.plot1dratiosubplot("zmass", files, opt, changes = {
'x': [81, 101],
'selection': [selection, "hlt * (%s)" % selection],
'fit': 'bw',
'nbins': 40,
'filename': filename,
'title': title,
'folder': 'zcuts',
})
def eid(files, opt):
quantity = 'mvaid'
"""changes = {
'x': [0, 1.0001],
#'log': True,
'folder': 'electron_all',
'nbins':50,
'subplot':True,
'markers': ['f'],
}
settings = plotbase.getSettings(opt, quantity=quantity)
fig, ax = plotbase.newPlot()
for c, l, s in zip(['#236BB2', '#E5AD3D'],
['fake', 'true'],
['1', 'deltar < 0.3 && deltar>0']):
changes.update({
'labels': [l],
'colors': [c],
'selection': s,
})
plot1d.datamcplot(quantity, files, opt, fig_axes = [fig, ax], changes=changes)
settings['filename'] = plotbase.getDefaultFilename(quantity, opt, settings)
plotbase.Save(fig, settings)"""
## id vs deltar
for quantity in ["mvaid", "mvatrigid", "looseid", "mediumid", "tightid"]:
plot1d.datamcplot("%s_deltar" % quantity, files, opt, changes = {
'folder': 'electron_all',
'nbins': 50,
'xynames': ['$\Delta$R(reco, gen)', quantity],
'x': [0, 0.5],
'legloc': None,
})
def plots_2014_07_03(files, opt):
""" Plots for JEC presentation 03.07. """
#### 2D histograms
for obj, x, nbins in zip(['muon', 'jet', 'electron'],
[[-2.5, 2.5], [-5.3, 5.3]]*2,
[400, 1000, 300]):
changes = {
'out': 'out/2014_07_03',
'y': [-3.2, 3.2],
}
changes.update({
'folder': obj + "_all",
'nbins': nbins,
'x':x,
'filename': obj + '_phi_eta',
'xynames': ['%s eta' % obj,
'%s phi' % obj, obj + 's'],
})
if obj is 'electron':
filenames = ["data_ee_noc", "mc_ee_corr_test"]
else:
filenames = ["data_noc", "mc_rundep_noc"]
files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames]
plot2d.twoD("phi_eta", files, opt, changes = changes)
if obj is not 'electron':
changes.update({
'year': 2011,
'filename': obj + '_phi_eta_2011',
'lumi': 5.1,
'energy': 7,
})
filenames = ["data11_noc"]
files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames]
plot2d.twoD("phi_eta", files, opt, changes = changes)
##### PU Jet ID
filenames = ["dataPUJETID", "data"]
files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames]
changes = {
'normalize': False,
'ratiosubplot': 'True',
'ratiosubploty': [0.8, 1.2],
'out': 'out/2014_07_03',
'x': [30, 250],
'title': 'Data',
'labels': ['PUJetID applied', 'default'],
}
plot1d.datamcplot('zpt', files, opt, changes=changes)
for typ in ['mpf', 'ptbalance']:
plotresponse.responseratio(files, opt, over='zpt', types=[typ], changes={
'labels': ['PUJetID applied', 'default'],
'out': 'out/2014_07_03',
'x': [30, 1000],
'xlog': True,
})
##### timedep
filenames = ["data", "mc_rundep"]
files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames]
changes = {
'out': 'out/2014_07_03',
'filename': "timedep",
}
timedep(files, opt, changes=changes)
###### MPF fix
filenames = [
"/storage/a/dhaitz/excalibur/artus/mc_rundep_2014-06-18_10-41/out.root",
"/storage/a/dhaitz/excalibur/artus/mc_rundep_2014-06-06_14-26/out.root"
]
files = [getroot.openfile(f) for f in filenames]
plotresponse.responseratio(files, opt, over='zpt', types=['mpf'], changes={
'labels': ['MCRD-fixed', 'MCRD'],
'xlog': True,
'filename': "mpf_zpt-fixed",
'out': 'out/2014_07_03',
'x': [30, 1000],
'xticks': [30, 50, 70, 100, 200, 400, 1000],
})
# mpf slopes
filenames = ["data", "mc_rundep"]
files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames]
changes = {
'filename': "mpfslopes-fixed",
'labels': ['data', 'MCRD'],
'out': 'out/2014_07_03',
'allalpha': True,
'selection': 'alpha<0.3',
}
mpfslopes(files, opt, changes)
changes.update({
'filename': "mpfslopes",
'labels': ['data', 'MCRD'],
})
filenames = [
'/storage/a/dhaitz/excalibur/artus/data_2014-04-10_21-21/out.root',
'/storage/a/dhaitz/excalibur/artus/mc_rundep_2014-06-06_14-26/out.root'
]
files = [getroot.openfile(f) for f in filenames]
mpfslopes(files, opt, changes)
# SYNC
os.system("rsync ${EXCALIBUR_BASE}/out/2014_07_03 ekplx26:plots -r")
def timedep(files, opt, changes = None):
""" Plots for the time dependence, requested by Mikko 2014-06-25."""
settings = plotbase.getSettings(opt, quantity="response_run", changes=changes)
fig, ax = plotbase.newPlot()
factor = 2e4
methods = ['mpf', 'ptbalance']
labels = ['MPF', '$p_T$ balance']
for q, c, l, m, in zip(methods,
settings['colors'], labels, settings['markers']):
slopes, serrs, x = [], [], []
for eta1, eta2 in zip(opt.eta[:-1], opt.eta[1:]):
changes = {
'alleta': True,
'allalpha': True,
'selection': 'alpha<0.3 && abs(jet1eta) > %s && abs(jet1eta) < %s' % (eta1, eta2),
'fit': 'slope',
}
rootobject = getroot.histofromfile("%s_run" % q, files[0], settings, changes=changes)
# get fit parameters
slope, serr = fit.fitline2(rootobject)[2:4]
slopes += [slope*factor]
serrs += [serr*factor]
changes['x'] = [0, 6]
x += [getroot.histofromfile("abs(jet1eta)", files[0], settings, changes=changes).GetMean()]
ax.errorbar(x, slopes, serrs, drawstyle='steps-mid', color=c,
fmt='o', capsize=0, label=l)
#formatting stuff
settings['x'] = [0, 5]
plotbase.setAxisLimits(ax, settings)
plotbase.labels(ax, opt, settings)
plotbase.axislabels(ax, 'Leading jet $\eta$', 'Response vs run: linear fit slope (muliplied with 20 000)', settings=settings)
ax.set_ylim(-0.1, 0.05)
ax.set_xlim(0, 5.25)
ax.grid(True)
ax.set_xticks([float("%1.2f" % eta) for eta in opt.eta])
for label in ax.get_xticklabels():
label.set_rotation(45)
ax.axhline(0.0, color='black', linestyle='--')
settings['filename'] = quantity="response_run"
plotbase.Save(fig, settings)
def npuplot(files, opt):
""" Plots for the JEC paper that Mikko requested 24.4.: npv and rho in bins of npu."""
settings = plotbase.getSettings(opt, quantity='npv')
settings['x'] = [-0.5, 99.5]
settings['nbins'] = 100
tgraphs = []
for f in files:
if files.index(f) == 0: # flag bad runs in data
runs = "run!=191411 && run!=198049 && run!=198050 && run!=198063 && run!=201727 && run!=203830 && run!=203832 && run!=203833 && run!=203834 && run!=203835 && run!=203987 && run!=203992 && run!=203994 && run!=204100 && run!=204101 && run!=208509"
else:
runs = 1
npuhisto = getroot.histofromfile('nputruth', f, settings)
for i in range(100):
if npuhisto.GetBinContent(i) > 0:
npu = i
tgraph = ROOT.TGraphErrors()
for n in range(npu):
changes = {'selection': 'nputruth>%s && nputruth<%s && %s' % (n-0.5, n+0.5, runs)}
npv = getroot.histofromfile('npv', f, settings, changes=changes).GetMean()
npverr = getroot.histofromfile('npv', f, settings, changes=changes).GetMeanError()
rho = getroot.histofromfile('rho', f, settings, changes=changes).GetMean()
rhoerr = getroot.histofromfile('rho', f, settings, changes=changes).GetMeanError()
tgraph.SetPoint(n, npv, rho)
tgraph.SetPointError(n, npverr, rhoerr)
tgraphs.append(tgraph)
settings['root'] = settings['root'] or settings['filename']
getroot.saveasroot(tgraphs, opt, settings)
def electronupdate(files, opt):
"""Plots for the Zee update 26.06.2014."""
# Reco/gen electron pt vs eta
filenames = ['mc_ee_raw', 'mc_ee_corr']
files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames]
changes={
'x': [0, 2.5],
'y': [0.9, 1.1],
'nbins': 25,
'labels': ['raw', 'corrected'],
'markers': ['o', '-'],
'colors': ['maroon', 'blue'],
'folder':'zcuts',
'y': [0.94, 1.06],
'title': 'Madgraph',
'xynames': [
r"$|\eta_{e^{-}} \| $",
r'$\mathrm{e}^{-} p_\mathrm{T}$ Reco/Gen'
]
}
plot1d.datamcplot('eminuspt/geneminuspt_abs(eminuseta)', files, opt, changes=changes)
changes={
'ratiosubplot': True,
'title': 'Madgraph',
'x': [0, 1000],
'log': True,
'labels': ['raw', 'corrected'],
'folder': 'all',
'ratiosubplotfit': 'chi2',
}
plot1d.datamcplot('zpt', files, opt, changes=changes)
#LHE information
fig, ax = plotbase.newPlot()
fig2, ax2 = plotbase.newPlot()
changes ={
'folder':'all',
'x': [-4, 4],
'y': [0, 200000],
'subplot': True,
'nbins':50,
'normalize': False,
'xynames': ['Z rapidity', 'Events'],
'log':True,
}
for q, c, m, l in zip(
['zy', 'genzy', 'lhezy'],
['black', 'lightskyblue', 'FireBrick'],
['o', 'f', '-'],
['RecoZ', 'GenZ', 'LHE-Z'],
):
changes['labels'] = [l]
changes['markers'] = [m]
changes['colors'] = [c]
plot1d.datamcplot(q, files[1:], opt, changes=changes, fig_axes=[fig, ax])
settings = plotbase.getSettings(opt, None, None, 'rapidity')
settings['filename'] = 'rapidity'
plotbase.Save(fig, settings)
# Data-MC comparisons ######################################################
# basic quantities
filenames = ['data_ee_corr', 'mc_ee_corr']
files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames]
changes = {
'x': [-3, 3],
'y': [-3.2, 3.2],
'folder': 'all',
'nbins': 200,
}
plot2d.twoD('eminusphi_eminuseta', files, opt, changes=changes)
for q, c in zip(['eminuspt', 'eminuseta', 'zy', 'zpt', 'zmass'],
[
{},
{'x': [-2.5, 2.5]},
{},
{'x': [0, 500], 'log':True},
{'x': [80, 102], 'ratiosubploty':[0.9, 1.1]},
]):
changes = {
'labels': ['Data', 'Madgraph'],
'ratiosubplot': True,
'folder':'zcuts',
'nbins': 50,
}
changes.update(c)
plot1d.datamcplot(q, files, opt, changes=changes)
# scale factors
changes = {
'x': [0, 100],
'y': [0, 3],
'z': [0.8, 1.2],
'folder': 'all',
'nbins': 100,
'selection': 'sfminus>0',
'colormap': 'bwr',
}
plot2d.twoD('sfminus_abs(eminuseta)_eminuspt', files[1:], opt, changes=changes)
# zpt in rapidities
for ybin in [[i/2., (i+1)/2.] for i in range(5)]:
changes = {
'x': [0, 600],
'nbins': 30,
'folder':'zcuts',
'title': "%s < $y_Z$ < %s" % tuple(ybin),
'log': 'True',
'ratiosubplot': True,
'selection': 'abs(zy)>%s && abs(zy)<%s' % (ybin[0], ybin[1]),
'filename': ('zpt_rap-%s-%s' % (ybin[0], ybin[1])).replace('.', '_'),
}
plot1d.datamcplot('zpt', files, opt, changes=changes)
# scale factor
changes = {
'labels': ['Madgraph'],
'ratiosubplot': True,
'xynames':['eminuspt', r"$|\eta_{e^{-}} \| $"],
'folder':'all',
'x': [0, 60],
'y': [0, 3],
'colormap': 'bwr',
'z': [0.5, 1],
}
q = 'sfminus_abs(eminuseta)_eminuspt'
plot2d.twoD(q, files[1:], opt, changes=changes)
##############
# Plot for ID acceptance
fig, ax = plotbase.newPlot()
changes ={
'folder':'all',
'x': [0, 150],
'y': [0, 1],
'subplot': True,
'normalize': False,
'legloc': 'lower right',
'xynames': ['eminuspt', 'Acceptance']
}
filenames = ['mc_ee_corr_noid']
files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames]
for q, c, m, l in zip(
['eminusidtight', 'eminusidmedium', 'eminusidloose', 'eminusidveto',
'eminusid'],
['lightskyblue', 'FireBrick', 'green', 'black', 'blue'],
['f', '_', '-', "o", "*"],
['Tight ID', 'Medium ID', 'Loose ID', "Veto ID", "MVA ID"],
):
changes['labels'] = [l]
changes['markers'] = [m]
changes['colors'] = [c]
plot1d.datamcplot("%s_eminuspt" % q, files, opt, changes=changes, fig_axes=[fig, ax])
settings = plotbase.getSettings(opt, None, None, 'id')
settings['filename'] = 'id'
settings['title'] = 'MC'
plotbase.Save(fig, settings)
def mpfslopes(files, opt, changes=None):
""" Plot the slope of a linear fit on MPF vs NPV, in Z pT bins."""
quantity="mpf_npv"
settings = plotbase.getSettings(opt, quantity=quantity, changes=changes)
settings['special_binning'] = True
print opt.zbins
fig, ax = plotbase.newPlot()
for f, c, l, m, in zip(files, settings['colors'], settings['labels'],
settings['markers']):
slopes, serrs, x = [], [], []
# iterate over Z pT bins
for ptlow, pthigh in zip(opt.zbins[:-1], opt.zbins[1:]):
changes = {'selection':'zpt>%s && zpt<%s' % (ptlow, pthigh)}
rootobject = getroot.histofromfile(quantity, f, settings, changes=changes)
# get fit parameters and mean Z pT; append to lists
slope, serr = fit.fitline2(rootobject)[2:4]
slopes += [slope]
serrs += [serr]
x += [getroot.histofromfile("zpt", f, settings, changes=changes).GetMean()]
ax.errorbar(x, slopes, serrs, drawstyle='steps-mid', color=c,
fmt='o', capsize=0, label=l)
#formatting stuff
settings['x'] = [30, 100]
plotbase.setAxisLimits(ax, settings)
plotbase.labels(ax, opt, settings)
ax.set_xscale('log')
settings['xticks'] = opt.zbins
plotbase.axislabels(ax, 'zpt', 'slope from fit on MPF vs NPV', settings=settings)
ax.set_ylim(-0.002, 0.002)
ax.grid(True)
ax.axhline(0.0, color='black', linestyle='--')
plotbase.Save(fig, settings)
def pileup(files, opt):
for ptlow, pthigh in zip(opt.zbins[:-1], opt.zbins[1:]):
plotresponse.responseratio(files, opt, over='npv', types=['mpf'], changes={
'allalpha':True,
'selection':'alpha<0.3 && zpt>%s && zpt<%s' % (ptlow, pthigh),
'filename': "mpf_npv_%s-%s" % (ptlow, pthigh)
}
)
def emucomparison(files, opt):
values = []
valueerrs = []
for filenames in [['data', 'mc'], ['data_ee', 'mc_ee']]:
files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames]
for quantity in ['mpf', 'ptbalance']:
settings = plotbase.getSettings(opt, None, None, quantity)
settings['nbins'] = 40
settings['correction'] = 'L1L2L3'
if 'ee' in filenames[0]:
if settings['selection']:
settings['selection'] = 'abs(epluseta<1.0) && abs(eminuseta)<1.0 && %s' % settings['selection']
else:
settings['selection'] = 'abs(epluseta<1.0) && abs(eminuseta)<1.0'
datamc = []
rootobjects = []
fitvalues = []
for f in files:
rootobjects += [getroot.histofromfile(quantity, f, settings)]
p0, p0err, p1, p1err, p2, p2err, chi2, ndf, conf_intervals = fit.fitline2(rootobjects[-1],
gauss=True, limits=[0, 2])
fitvalues += [p1, p1err]
ratio = fitvalues[0] / fitvalues[2]
ratioerr = math.sqrt(fitvalues[1] ** 2 + fitvalues[3] ** 2)
values.append(ratio)
valueerrs.append(ratioerr)
fig, ax = plotbase.newPlot()
ax.errorbar(range(4), values, valueerrs, drawstyle='steps-mid', color='black',
fmt='o', capsize=0,)
ax.set_xticks([0, 1, 2, 3])
ax.set_xticklabels(['Zmm\nMPF', 'Zmm\npT balance', 'Zee\nMPF', 'Zee\npT balance'])
ax.set_xlim(-0.5, 3.5)
ax.set_ylim(0.96, 1.001)
ax.axhline(1.0, color='black', linestyle=':')
ax.set_ylabel('Jet response Data/MC ratio', ha="right", x=1)
plotbase.Save(fig, settings)
def electrons(files, opt):
""" Standard set of plots for the dielectron analysis. """
filenames = ['data_ee', 'mc_ee']
files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames]
base_changes = {
'out': 'out/ee2014',
'folder': 'zcuts', # no additional restrictions on jets
'normalize': False, # no normalizing to check if the lumi reweighting works
'factor': 1., # on the fly lumi reweighting
'efficiency': 1., # no trigger reweighting for electrons
'ratiosubplot': True,
}
# zmass with fit
changes = {
'legloc': 'center right',
'nbins': 50,
'fit': 'gauss'
}
changes.update(base_changes)
plot1d.datamcplot('zmass', files, opt, changes=changes)
#electron quantities
for charge in ['plus', 'minus']:
changes = {
'x': [0, 150],
'nbins': 40,
}
changes.update(base_changes)
plot1d.datamcplot('e%spt' % charge, files, opt, changes=changes)
changes['x'] = [-2.5, 2.5]
plot1d.datamcplot('e%seta' % charge, files, opt, changes=changes)
changes['x'] = None
plot1d.datamcplot('e%sphi' % charge, files, opt, changes=changes)
changes['legloc'] = 'center right'
changes['filename'] = 'zmass_barrel'
changes['selection'] = 'abs(epluseta)<1.0 && abs(eminuseta)<1.0'
changes['title'] = '|eta(e)| < 1.0'
changes['fit'] = 'gauss'
plot1d.datamcplot('zmass', files, opt, changes=changes)
changes['filename'] = 'zmass_endcap'
changes['selection'] = 'abs(epluseta)>1.0 && abs(eminuseta)>1.0'
changes['title'] = '|eta(e)| > 1.0'
changes['fit'] = 'gauss'
plot1d.datamcplot('zmass', files, opt, changes=changes)
#electron quantities
for charge in ['plus', 'minus']:
changes = {
'x': [0, 150],
'nbins': 40,
}
changes.update(base_changes)
plot1d.datamcplot('e%spt' % charge, files, opt, changes=changes)
changes['x'] = [-2.5, 2.5]
plot1d.datamcplot('e%seta' % charge, files, opt, changes=changes)
changes['x'] = None
plot1d.datamcplot('e%sphi' % charge, files, opt, changes=changes)
# Z pT in rapidity bins
rapbins = ['abs(zy)<1', 'abs(zy)>1 && abs(zy)<2', 'abs(zy)>2 && abs(zy)<3']
raplabels = ['|Y(Z)|<1', '1<|Y(Z)|<2', '2<|Y(Z)|<3']
rapname = ['0zy1', '1zy2', '2zy3']
for rbin, rlabel, rname in zip(rapbins, raplabels, rapname):
changes = {
'selection': rbin,
'filename': 'zpt-%s' % rname,
'x': [30, 750],
'log': True,
'title': rlabel,
'nbins': 40,
}
changes.update(base_changes)
plot1d.datamcplot('zpt', files, opt, changes=changes)
#electron quantities
for charge in ['plus', 'minus']:
changes = {
'x': [0, 150],
'nbins': 40,
}
changes.update(base_changes)
plot1d.datamcplot('e%spt' % charge, files, opt, changes=changes)
changes['x'] = [-2.5, 2.5]
plot1d.datamcplot('e%seta' % charge, files, opt, changes=changes)
changes['x'] = None
plot1d.datamcplot('e%sphi' % charge, files, opt, changes=changes)
# npv
changes = {
'folder': 'all',
}
changes.update(base_changes)
changes['folder'] = 'all'
plot1d.datamcplot('npv', files, opt, changes=changes)
changes['noweighting'] = True
changes['factor'] = 3503.71 / 30459503 * 1000
changes['filename'] = 'npv_noweights'
plot1d.datamcplot('npv', files, opt, changes=changes)
changes['noweighting'] = True
changes['factor'] = 3503.71 / 30459503 * 1000
changes['filename'] = 'npv_noweights'
plot1d.datamcplot('npv', files, opt, changes=changes)
# z pt and rapidity
changes = {
'nbins': 40,
}
changes.update(base_changes)
plot1d.datamcplot('zy', files, opt, changes=changes)
plot1d.datamcplot('zeta', files, opt, changes=changes)
changes['x'] = [30, 750]
changes['log'] = True
plot1d.datamcplot('zpt', files, opt, changes=changes)
#powheg comparison
filenames = ['data_ee', 'mc_ee', 'mc_ee_powheg']
files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames]
changes = {
'log': True,
'x': [30, 750],
'nbins': 40,
'filename': 'zpt_mad-pow',
'labels': ['Data', 'Madgraph', 'Powheg'],
}
changes.update(base_changes)
plot1d.datamcplot('zpt', files, opt, changes=changes)
changes = {
'nbins': 40,
'filename': 'zmass_mad-pow',
'labels': ['Data', 'Madgraph', 'Powheg'],
}
changes.update(base_changes)
plot1d.datamcplot('zmass', files, opt, changes=changes)
files = files[::2]
filenames = filenames[::2]
changes = {
'log':True,
'x': [30, 750],
'nbins': 40,
'filename': 'zpt_pow',
'labels':['Data', 'Powheg'],
}
changes.update(base_changes)
plot1d.Datamcplot('zpt', files, opt, changes=changes)
#backgrounds
filenames = ['Data_ee', 'mc_ee', 'background_ee']
files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames]
changes = {
'log': True,
'x': [30, 750],
'filename': 'zpt_backgrounds',
'labels': ['Data', 'MC', 'Backgrounds'],
'markers': ['o', 'f', 'f'],
'stacked': True,
'ratiosubplot': False,
}
changes.update(base_changes)
changes['ratiosubplot'] = False
plot1d.datamcplot('zpt', files, opt, changes=changes)
changes.pop('x', None)
changes['filename'] = 'zmass_backgrounds'
changes['log'] = False
changes['ratiosubplot'] = False
plot1d.datamcplot('zmass', files, opt, changes=changes)
# sync the plots
import subprocess
subprocess.call(['rsync out/ee2014 dhaitz@ekplx26:plots/ -u -r --progress'], shell=True)
"""
merlin 2D_zmass_zpt --files $DATAEE $ARGS -x 0 50 --nbins 100 -y 80 100 -o $OUT
merlin eemass -o $OUT --files $DATAEE $ARGS --nbins 100 -x 0 120 -C lightskyblue -m f --folder all
merlin eemass -o $OUT --files $DATAEE $ARGS --nbins 100 -x 0 15 --filename eemass_low -C lightskyblue -m f --folder all
merlin 2D_zpt_zy -o $OUT --files $DATAEE $ARGS -y 0 100 --nbins 100
"""
def an(files, opt):
""" Plots for the 2014 Z->mumu JEC AN."""
"""
#MET
for quantity in ['METpt', 'METphi']:
plot1d.datamcplot(quantity, files, opt, changes = {'title': 'CMS preliminary'})
plot1d.datamcplot("npv", files, opt, changes = {'folder': 'all', 'title': 'CMS preliminary'})
for n in ['1', '2']:
for quantity in ['pt', 'eta', 'phi']:
plot1d.datamcplot('mu%s%s' % (n, quantity), files, opt, changes = {'title': 'CMS preliminary'})
if n is '2' and quantity is 'eta':
plot1d.datamcplot('jet%s%s' % (n, quantity), files, opt, changes = {'nbins': 10, 'correction': 'L1L2L3', 'title': 'CMS preliminary'})
else:
plot1d.datamcplot('jet%s%s' % (n, quantity), files, opt, changes = {'correction': 'L1L2L3', 'title': 'CMS preliminary'})
for quantity in ['zpt', 'zeta', 'zy', 'zphi', 'zmass']:
plot1d.datamcplot(quantity, files, opt, changes = {'title': 'CMS preliminary'})
#response stuff
plotresponse.responseratio(files, opt, over='zpt', types=['mpf'],
changes={'y': [0.98, 1.03, 0.96, 1.03], 'x': [0, 400, 0, 400]})
plotresponse.responseratio(files, opt, over='jet1abseta', types=['mpf'],
changes={'y': [0.95, 1.1, 0.93, 1.1]})
plotresponse.responseratio(files, opt, over='npv', types=['mpf'],
changes={'y': [0.95, 1.05, 0.92, 1.03], 'x': [0, 35, 0, 35]})
plotresponse.responseratio(files, opt, over='zpt', types=['ptbalance'],
changes={'y': [0.93, 1.01, 0.96, 1.03], 'x': [0, 400, 0, 400]})
plotresponse.responseratio(files, opt, over='jet1abseta', types=['ptbalance'],
changes={'y': [0.91, 1.01, 0.93, 1.1]})
plotresponse.responseratio(files, opt, over='npv', types=['ptbalance'],
changes={'y': [0.91, 1.01, 0.92, 1.03], 'x': [0, 35, 0, 35]})
"""
for q in ['mpf', 'ptbalance']:
plot1d.datamcplot(q, files, opt, changes={'correction': 'L1L2L3',
'legloc': 'center right',
'nbins': 100,
'fit': 'gauss'})
plotresponse.extrapol(files, opt, changes={'save_individually': True,
'correction': 'L1L2L3'})
"""
plotfractions.fractions(files, opt, over='zpt', changes={'x': [0, 400], 'title': 'CMS preliminary'})
plotfractions.fractions(files, opt, over='jet1abseta', changes = {'title': 'CMS preliminary'})
plotfractions.fractions(files, opt, over='npv', changes = {'title': 'CMS preliminary'})
for changes in [{'rebin':10, 'title':'|$\eta^{\mathrm{jet}}$|<1.3'},
{'alleta':True, 'rebin':10,
'selection':'jet1abseta>2.5 && jet1abseta<2.964',
'title':'2.5<|$\eta^{\mathrm{jet}}$|<2.964'}]:
if 'alleta' in changes:
opt.out += '/ECOT'
opt.user_options['out'] += '/ECOT'
plotfractions.fractions_run(files, opt, diff=True, response=True, changes=changes, nbr=6)
plotfractions.fractions_run(files, opt, diff=False, response=True, changes=changes, nbr=6)
plotfractions.fractions_run(files, opt, diff=True, response=False, changes=changes, nbr=6)
plotresponse.response_run(files, opt, changes=changes)
opt.out = opt.out[:-5]
opt.user_options['out'] = opt.user_options['out'][:-5]
else:
plotfractions.fractions_run(files, opt, diff=True, response=True, changes=changes)
plotfractions.fractions_run(files, opt, diff=False, response=True, changes=changes)
plotfractions.fractions_run(files, opt, diff=True, response=False, changes=changes)
plotresponse.response_run(files, opt, changes=changes)
changes['y'] = [0.84, 1.2]
plot2d.twoD("qgtag_btag", files, opt,
changes = {'title': 'CMS Preliminary', 'nbins':50}
)
plot_tagging.tagging_response(files, opt)
plot_tagging.tagging_response_corrected(files, opt)
"""
## MCONLY
if len(files) > 1:
files = files[1:]
"""
# PF composition as function of mc flavour
flavour_comp(files, opt, changes={'title': 'CMS Simulation','mconly':True})
# response vs flavour
for var in [True, False]:
plotresponse.response_physflavour(files, opt,
changes={'title': 'CMS Simulation','mconly':True},
add_neutrinopt=var,
restrict_neutrals=var,
extrapolation=var)
plotfractions.flavour_composition(files, opt, changes={'title': 'CMS Simulation','mconly':True})
plotfractions.flavour_composition_eta(files, opt, changes={'title': 'CMS Simulation','mconly':True, 'selection': 'zpt>95 && zpt<110'})
changes = {'cutlabel' : 'ptetaalpha',
'labels' : ['Pythia 6 Tune Z2*', 'Herwig++ Tune EE3C'],
'y' : [0.98, 1.05],
'markers' : ['o', 'd'],
'colors' : ['red', 'blue'],
'title' : 'CMS Simulation',
'mconly' : True,
'legloc' : 'lower left',
'filename': 'recogen_physflavour_pythia-herwig'}
files += [getroot.openfile("/storage/a/dhaitz/excalibur/work/mc_herwig/out/closure.root")]
plot1d.datamcplot("recogen_physflavour", files, opt, changes=changes)
"""
def eleven(files, opt):
""" Summary of the plots for the response studies with 2011 rereco. """
runrange = [160000, 183000]
plot1d.datamcplot('npv', files, opt, changes={'rebin': 1})
plot1d.datamcplot('zmass', files, opt, changes={'fit': 'vertical', 'legloc': 'center right'})
plotresponse.extrapol(files, opt)
plotresponse.responseratio(files, opt, over='zpt', types=['mpf'],
changes={'y': [0.98, 1.03, 0.96, 1.03], 'uncertaintyband': True, 'x': [0, 400, 0, 400]})
plotresponse.responseratio(files, opt, over='jet1abseta', types=['mpf'],
changes={'y': [0.95, 1.1, 0.93, 1.1], 'uncertaintyband': True})
plotresponse.responseratio(files, opt, over='npv', types=['mpf'],
changes={'y': [0.95, 1.05, 0.92, 1.03], 'uncertaintyband': True, 'x': [0, 18, 0, 18]})
plotresponse.responseratio(files, opt, over='zpt', types=['ptbalance'],
changes={'y': [0.93, 1.01, 0.96, 1.03], 'x': [0, 400, 0, 400], 'uncertaintyband': True})
plotresponse.responseratio(files, opt, over='jet1abseta', types=['ptbalance'],
changes={'y': [0.91, 1.01, 0.93, 1.1], 'uncertaintyband': True})
plotresponse.responseratio(files, opt, over='npv', types=['ptbalance'],
changes={'y': [0.91, 1.01, 0.92, 1.03], 'x': [0, 18, 0, 18], 'uncertaintyband': True})
plot1d.datamcplot('npv_run', files, opt, changes={'x': runrange,
'y': [0, 15], 'run': True, 'fit': True})
plotfractions.fractions(files, opt, over='zpt', changes={'x': [0, 400]})
plotfractions.fractions(files, opt, over='jet1abseta')
plotfractions.fractions(files, opt, over='npv', changes={'x': [-0.5, 24.5]})
for changes in [{'x': runrange, 'rebin':10, 'title':'|$\eta^{\mathrm{jet}}$|<1.3'},
{'x': runrange, 'alleta':True, 'rebin':10,
'selection':'jet1abseta>2.5 && jet1abseta<2.964',
'title':'2.5<|$\eta^{\mathrm{jet}}$|<2.964'}]:
if 'alleta' in changes:
opt.out += '/ECOT'
opt.user_options['out'] += '/ECOT'
plotfractions.fractions_run(files, opt, diff=True, response=True, changes=changes, nbr=6)
plotfractions.fractions_run(files, opt, diff=False, response=True, changes=changes, nbr=6)
plotfractions.fractions_run(files, opt, diff=True, response=False, changes=changes, nbr=6)
else:
plotfractions.fractions_run(files, opt, diff=True, response=True, changes=changes)
plotfractions.fractions_run(files, opt, diff=False, response=True, changes=changes)
plotfractions.fractions_run(files, opt, diff=True, response=False, changes=changes)
changes['y'] = [0.84, 1.2]
plotresponse.response_run(files, opt, changes=changes)
def rootfile(files, opt):
"""Function for the rootfile sent to the JEC group in early August 2013."""
list_of_quantities = ['ptbalance_alpha', 'mpf_alpha',
'ptbalance', 'mpf', 'zpt', 'npv', 'zmass', 'zpt_alpha', 'npv_alpha',
'ptbalance_zpt', 'mpf_zpt',
'ptbalance_npv', 'mpf_npv',
]
for muon in [["zmumu", "1"], ["zmumu_muoncuts",
"(mupluspt>25 && muminuspt>25 && abs(mupluseta)<1.0 && abs(muminuseta)<1.0)"]]:
for alpha in [[0, "alpha<0.2", "alpha0_2"], [1, "alpha<0.3", "alpha0_3"],
[1, "alpha<0.4", "alpha0_4"]]:
for quantity in list_of_quantities:
changes = {'rebin': 1,
'out': 'out/root/',
'allalpha': True,
'root': "__".join([quantity, alpha[2]]),
'filename': muon[0],
'selection': "&&".join([alpha[1], muon[1]]),
}
if ("_zpt" in quantity) or ("_npv" in quantity):
changes['special_binning'] = True
if "alpha" in quantity:
changes['rebin'] = 10
plot1d.datamcplot(quantity, files, opt, changes=changes)
changes['ratio'] = True
changes['labels'] = ['ratio']
plot1d.datamcplot(quantity, files, opt, changes=changes)
def ineff(files, opt):
settings = plotbase.getSettings(opt, changes=None, settings=None, quantity="flavour_zpt")
fig, ax = plotbase.newPlot()
labels = ["no matching partons", "two matching partons"]
colors = ['red', 'blue']
markers = ['o', 'd']
changes = {'subplot': True,
'lumi': 0,
'xynames': ['zpt', 'physflavourfrac'],
'legloc': 'upper left',
}
for n, l, c, m in zip([0, 2], labels, colors, markers):
quantity = "(nmatchingpartons3==%s)_zpt" % n
changes['labels'] = [l]
changes['colors'] = c
changes['markers'] = m
plot1d.datamcplot(quantity, files, opt, fig_axes=(fig, ax), changes=changes, settings=settings)
settings['filename'] = plotbase.getDefaultFilename("physflavourfrac_zpt", opt, settings)
plotbase.Save(fig, settings['filename'], opt)
def flav(files, opt):
etabins = [0, 1.3, 2.5, 3, 3.2, 5.2]
etastrings = ['0-1_3', '1_3-2_5', '2_5-3', '3-3_2', '3_2-5_2']
flavourdefs = ["algoflavour", "physflavour"]
flavourdefinitions = ["algorithmic", "physics"]
flist = ["(flavour>0&&flavour<4)", "(flavour==1)", "(flavour==2)", "(flavour==3)",
"(flavour==4)", "(flavour==5)", "(flavour==21)", "(flavour==0)"]
q_names = ['uds', 'u', 'd', 's', 'c', 'b', 'gluon', 'unmatched']
changes = {}
############### FLAVOUR NOT 0!!!!!
# barrel:
"""changes['rebin'] = 1
changes['filename']="flavour"
changes['filename']="flavour"
for f_id, quantity in zip(['uds','c','b','gluon'], flist):
changes['root']=f_id
plot1d.datamcplot("%s_zpt" % quantity, files, opt, changes=changes)
"""
for flavourdef, flavourdefinition in zip(flavourdefs, flavourdefinitions):
# iterate over eta bins:
for filename, selection in zip(etastrings, getroot.etacuts(etabins)):
changes['filename'] = "_".join([filename, flavourdefinition])
changes['alleta'] = True
changes['selection'] = "%s && %s" % (selection,
"alpha<0.2")
changes['rebin'] = 1
for f_id, quantity in zip(q_names, flist):
changes['root'] = f_id
plot1d.datamcplot("%s_zpt" % quantity.replace("flavour",
flavourdef), files, opt, changes=changes)
def gif(files, opt):
local_opt = copy.deepcopy(opt)
runlist = listofruns.runlist[::10]
for run, number in zip(runlist, range(len(runlist))):
local_opt.lumi = (run - 190456) * 19500 / (209465 - 190456)
print
plotbase.plot1d.datamcplot('balresp', files, local_opt,
changes={'var': 'var_RunRange_0to%s' % run}, filename="%03d" % number)
def closure(files, opt):
def divide((a, a_err), (b, b_err)):
if (b != 0.0):
R = a / b
else:
R = 0
Rerr = R * math.sqrt((a_err / a) ** 2 + (b_err / b) ** 2)
return R, Rerr
def multiply((a, a_err), (b, b_err)):
R = a * b
Rerr = R * math.sqrt((a_err / a) ** 2 + (b_err / b) ** 2)
return R, Rerr
changes = {}
changes = plotbase.getchanges(opt, changes)
#get extrapol factors with alpha 035
#changes['var']='var_CutSecondLeadingToZPt_0_4'
#changes['correction']='L1L2L3'
balresp = (getroot.getobjectfromnick('balresp', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('balresp', files[0], changes, rebin=1).GetMeanError())
mpfresp = (getroot.getobjectfromnick('mpfresp', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('mpfresp', files[0], changes, rebin=1).GetMeanError())
genbal = (getroot.getobjectfromnick('genbal', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('genbal', files[0], changes, rebin=1).GetMeanError())
intercept, ierr, slope, serr, chi2, ndf, conf_intervals = getroot.fitline2(getroot.getobjectfromnick('ptbalance_alpha', files[0], changes, rebin=1))
balresp_extrapol = (intercept, conf_intervals[0])
extrapol_reco_factor = divide(balresp_extrapol, balresp)
intercept2, ierr2, slope2, serr2, chi22, ndf2, conf_intervals2 = getroot.fitline2(getroot.getobjectfromnick('genbalance_genalpha', files[0], changes, rebin=1))
genbal_extrapol = (intercept2, conf_intervals2[0])
extrapol_gen_factor = divide(genbal_extrapol, genbal)
intercept3, ierr3, slope3, serr3, chi23, ndf3, conf_intervals3 = getroot.fitline2(getroot.getobjectfromnick('mpf_alpha', files[0], changes, rebin=1))
mpf_extrapol = (intercept3, conf_intervals3[0])
extrapol_mpf_factor = divide(mpf_extrapol, mpfresp)
#del changes['var']
#del changes['correction']
#other quantities with alpha 02
recogen = (getroot.getobjectfromnick('recogen', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('recogen', files[0], changes, rebin=1).GetMeanError())
zresp = (getroot.getobjectfromnick('zresp', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('zresp', files[0], changes, rebin=1).GetMeanError())
balresp = (getroot.getobjectfromnick('balresp', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('balresp', files[0], changes, rebin=1).GetMeanError())
mpfresp = (getroot.getobjectfromnick('mpfresp', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('mpfresp', files[0], changes, rebin=1).GetMeanError())
mpfresp_raw = (getroot.getobjectfromnick('mpfresp-raw', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('mpfresp-raw', files[0], changes, rebin=1).GetMeanError())
genbal = (getroot.getobjectfromnick('genbal', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('genbal', files[0], changes, rebin=1).GetMeanError())
balparton = (getroot.getobjectfromnick('balparton', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('balparton', files[0], changes, rebin=1).GetMeanError())
partoncorr = divide(balparton, genbal)
format = "%1.4f"
print changes
print ""
print (r"balresp reco %s +- %s" % (format, format)) % balresp
print (r"mpf %s +- %s" % (format, format)) % mpfresp
print (r"balparton %s +- %s" % (format, format)) % balparton
print (r"zresp %s +- %s" % (format, format)) % zresp
print (r"recogen %s +- %s" % (format, format)) % recogen
print (r"extrapolReco_factor %s +- %s" % (format, format)) % extrapol_reco_factor
print (r"extrapolGen_factor %s +- %s" % (format, format)) % extrapol_gen_factor
print (r"extrapolMPF_factor %s +- %s" % (format, format)) % extrapol_mpf_factor
print (r"parton/genjet %s +- %s" % (format, format)) % divide(balparton, genbal)
print ""
print (r"pTgenjet / pTgenZ %s +- %s" % (format, format)) % genbal
genbal = multiply(genbal, extrapol_gen_factor)
print (r"* gen Level extrapolation %s +- %s" % (format, format)) % genbal
#genbal = multiply(genbal, partoncorr)
#print (r"* pTparton/pTgenjet correction %s +- %s" % (format, format) ) % genbal
#genbal = divide(genbal, balparton)
#print (r"* pTparton/pTZ correction %s +- %s" % (format, format) ) % genbal
reco_bal = divide(multiply(genbal, recogen), zresp)
print (r"* GenToReco for Jet and Z %s +- %s" % (format, format)) % reco_bal
print ""
print (r"pTrecojet / pTrecoZ %s +- %s" % (format, format)) % balresp
balresp = multiply(balresp, extrapol_reco_factor)
print (r"* reco Level extrapolation %s +- %s" % (format, format)) % balresp
print ""
print (r"MPF (typeI) %s +- %s" % (format, format)) % mpfresp
#mpfresp = divide(mpfresp, zresp)
#print (r"MPF (GenZ) %s +- %s" % (format, format) ) % mpfresp
mpfresp = multiply(mpfresp, extrapol_mpf_factor)
print (r"MPF (extrapol) %s +- %s" % (format, format)) % mpfresp
print (r"MPF (Raw) %s +- %s" % (format, format)) % mpfresp_raw
def extrapola(files, opt):
fig, ax = plotbase.newPlot()
changes = {}
changes['var'] = "_var_CutSecondLeadingToZPt_0_3"
local_opt = copy.deepcopy(opt)
rebin = 5
if opt.rebin is not None:
rebin = opt.rebin
plot1d.datamcplot('ptbalance_alpha', files, local_opt, legloc='upper center',
changes=changes, rebin=rebin, subplot=True,
subtext="", fig_axes=(fig, ax), fit='intercept', ratio=False)
local_opt.colors = ['red', 'maroon']
plot1d.datamcplot('mpf_alpha', files, local_opt, legloc='upper center',
changes=changes, rebin=rebin, subplot=True, xy_names=['alpha', 'response'],
subtext="", fig_axes=(fig, ax), fit='intercept', ratio=False, fit_offset=-0.1)
file_name = plotbase.getDefaultFilename("extrapolation_", opt, changes)
plotbase.Save(fig, file_name, opt)
# function for comparing old and new corrections
def comparison(datamc, opt):
"""file_names = [
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/data_2012/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_L1Offset/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/data_2012_L1Offset/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_L1Offset/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_L1Offset/out/closure.root'
]"""
colors = ['red', 'blue', 'blue', 'red']
markers = ['*', 'o', 'o', '*']
#labels = [['MC_52xFast', 'data_52xFast'], ['MC_52xOff', 'data_52xOff'], ['MC_53xFast', 'data_53xFast'], ['MC_53xOff', 'data_53xOff']]
rebin = 1
import copy
file_names = [
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/data_2012/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_L1Offset/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/data_2012_L1Offset/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_L1Offset/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_L1Offset/out/closure.root',
]
labels = [['MC_52xFast', 'data_52xFast'], ['MC_53xFast', 'data_53xFast'], ['MC_52xOff', 'data_52xOff'], ['MC_53xOff', 'data_53xOff']]
files = []
for f in file_names:
files += [getroot.openfile(f, opt.verbose)]
local_opt = copy.deepcopy(opt)
local_opt.style = markers
local_opt.colors = colors
quantity = 'L1abs_npv'
# ALL
fig, axes = plotbase.newPlot(subplots=4)
for a, f1, f2, l in zip(axes, files[::2], files[1::2], labels):
local_opt.labels = l
datamcplot(quantity, (f1, f2), local_opt, 'upper center', changes={'correction': ''}, fig_axes=(fig, a),
rebin=rebin, subplot=True, subtext="")
filename = "L1_all__" + opt.algorithm
plotbase.Save(fig, filename, opt)
"""
#Fastjet vs Offset
fig = plotbase.plt.figure(figsize=(14,7))
axes = [fig.add_subplot(1,2,n) for n in [1,2]]
local_opt.labels = labels[0]
local_opt.colors = ['blue', 'blue']
datamcplot(quantity, (files[0], files[1]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[0]),
rebin=rebin, subplot=True, subtext="")
local_opt.labels = labels[1]
local_opt.colors = ['red', 'red']
datamcplot(quantity, (files[2], files[3]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[0]),
rebin=rebin, subplot=True, subtext="")
#53
local_opt.labels = labels[2]
local_opt.colors = ['blue', 'blue']
datamcplot(quantity, (files[4], files[5]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[1]),
rebin=rebin, subplot=True, subtext="")
local_opt.labels = labels[3]
local_opt.colors = ['red', 'red']
datamcplot(quantity, (files[6], files[7]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[1]),
rebin=rebin, subplot=True, subtext="")
filename = "L1_Fastjet_vs_Offset__"+opt.algorithm
plotbase.Save(fig, filename, opt)
#52X vs 53X
fig = plotbase.plt.figure(figsize=(14,7))
axes = [fig.add_subplot(1,2,n) for n in [1,2]]
local_opt.labels = labels[0]
local_opt.colors = ['blue', 'blue']
datamcplot(quantity, (files[0], files[1]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[0]),
rebin=rebin, subplot=True, subtext="")
local_opt.labels = labels[2]
local_opt.colors = ['red', 'red']
datamcplot(quantity, (files[4], files[5]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[0]),
rebin=rebin, subplot=True, subtext="")
local_opt.labels = labels[1]
local_opt.colors = ['blue', 'blue']
datamcplot(quantity, (files[2], files[3]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[1]),
rebin=rebin, subplot=True, subtext="")
#
local_opt.labels = labels[3]
local_opt.colors = ['red', 'red']
datamcplot(quantity, (files[6], files[7]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[1]),
rebin=rebin, subplot=True, subtext="")
filename = "L1_52X_vs_53X__"+opt.algorithm
plotbase.Save(fig, filename, opt)
import plotresponse
file_names = [
'/storage/8/dhaitz/CalibFW/work/data_2012/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/data_2012_L1Offset/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_L1Offset/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_L1Offset/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_L1Offset/out/closure.root',
]
labels = [['data_52xFast', 'MC_52xFast'], [ 'data_53xFast', 'MC_53xFast'], [ 'data_52xOff', 'MC_52xOff'], ['data_53xOff', 'MC_53xOff']]
files=[]
for f in file_names:
files += [getroot.openfile(f, opt.verbose)]
for over, fit in zip(['zpt', 'jet1eta', 'npv'], [True, False, True]):
fig, axes= plotbase.newPlot(subplots=4)
fig2, axes2= plotbase.newPlot(subplots=4)
for a1, a2, f1, f2, l in zip(axes, axes2, files[::2], files[1::2], labels):
local_opt.labels = l
changes ={}# {'correction':'L1L2L3'}
plotresponse.responseplot((f1, f2), local_opt, ['bal', 'mpf'], over=over, changes=changes, figaxes=(fig,a1),
subplot=True, subtext="")
plotresponse.ratioplot((f1, f2), local_opt, ['bal', 'mpf'], over=over, changes=changes, figaxes=(fig2 ,a2), fit=fit,
subplot=True, subtext="")
filename = "Response_"+over+"_all__"+opt.algorithm
plotbase.Save(fig, filename, opt)
filename = "Ratio_"+over+"_all__"+opt.algorithm
plotbase.Save(fig2, filename, opt)"""
# function for 2d grid plots
"""def twoD_all_grid(quantity, datamc, opt):
pt_thresholds = [12, 16, 20, 24, 28, 32, 36]
var_list = ['var_JetPt_%1.fto%1.f' % (s1, s2) for (s1, s2) in zip(pt_thresholds, [1000, 1000, 1000, 1000, 1000, 1000, 1000])]
var_list_2 = getroot.npvstrings(opt.npv)
fig = plt.figure(figsize=(10.*len(var_list), 7.*len(var_list_2)))
grid = AxesGrid(fig, 111,
nrows_ncols = (len(var_list), len(var_list_2)),
axes_pad = 0.4,
share_all=True,
label_mode = "L",
#aspect = True,
#cbar_pad = 0,
#cbar_location = "right",
#cbar_mode='single',
)
for n1, var1 in enumerate(var_list):
for n2, var2 in enumerate(var_list_2):
change = {'var':var1+"_"+var2}
index = len(var_list_2)*n1 + n2
change['incut']='allevents'
twoD(quantity, datamc, opt, changes=change, fig_axes = [fig, grid[index]], subplot = True, axtitle = change['var'].replace('var_', ''))
for grid_element, var_strings in zip(grid, opt.npv):
text = r"$%s\leq\mathrm{NPV}\leq%s$" % var_strings
grid_element.text(0.5, 5.5, text, ha='center', va='center', size ='40')
for grid_element, pt_threshold in zip(grid[::len(var_list_2)], pt_thresholds):
text = r"$p_\mathrm{T}^\mathrm{Jet1}$"+"\n"+r"$\geq%s\mathrm{GeV}$" % pt_threshold
grid_element.text(-8.7, 0, text, ha='left', va='center', size ='30')
#fig.suptitle("%s leading jet $\eta-\phi$ distribution ($before$ cuts) for %s %s" % (opt.labels[0], opt.algorithm, opt.correction), size='50')
fig.suptitle("%s %s $\eta-\phi$ distribution ($before$ cuts) for %s %s" % (opt.labels[0], quantity[7:-16], opt.algorithm, opt.correction), size='30')
file_name = "grid_"+opt.labels[0]+"_"+quantity +"_"+opt.algorithm + opt.correction
fig.set_figwidth(fig.get_figwidth() * 1.2)
plotbase.Save(fig, file_name, opt, crop=False, pad=1.5)"""
def Fall12(files, opt):
local_opt = copy.deepcopy(opt)
filelist = [
['/storage/8/dhaitz/CalibFW/work/data_2012/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12/out/closure.root'],
['/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root'],
['/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_V4/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4/out/closure.root']
]
labellist = [['data_Summer12', 'MC_Summer12'], ['data_Fall12V1', 'MC_Fall12V1'], ['data_Fall12V4', 'MC_Fall12V4']]
over = 'zpt'
for over in ['zpt', 'npv', 'jet1eta']:
fig = plotbase.plt.figure(figsize=[21, 14])
fig.suptitle(opt.title, size='xx-large')
for typ, row in zip(['bal', 'mpf'], [0, 4]):
for filenames, labels, col in zip(filelist, labellist, [0, 1, 2]):
ax1 = plotbase.plt.subplot2grid((7, 3), (row, col), rowspan=2)
ax2 = plotbase.plt.subplot2grid((7, 3), (row + 2, col))
fig.add_axes(ax1)
fig.add_axes(ax2)
if over == 'jet1eta' and typ == 'bal':
legloc = 'upper right'
else:
legloc = 'lower left'
local_opt.labels = labels
files = []
for f in filenames:
files += [getroot.openfile(f, opt.verbose)]
plotresponse.responseplot(files, local_opt, [typ], over=over, figaxes=(fig, ax1), legloc=legloc, subplot=True)
plotresponse.ratioplot(files, local_opt, [typ], binborders=True, fit=True, over=over, subplot=True, figaxes=(fig, ax2), ratiosubplot=True)
fig.subplots_adjust(hspace=0.05)
ax1.set_xticks([])
ax1.set_xlabel("")
ax2.set_yticks([1.00, 0.95, 0.90])
if col > 0:
ax1.set_ylabel("")
ax2.set_ylabel("")
title = "" # " Jet Response ($p_T$ balance / MPF) vs. Z $p_T$, $N_{vtx}$ , Jet $\eta$ (" +opt.algorithm+" "+opt.correction+")"
fig.suptitle(title, size='x-large')
file_name = "comparison_ALL_" + over + opt.algorithm + opt.correction
plotbase.Save(fig, file_name, opt)
def factors(files, opt):
local_opt = copy.deepcopy(opt)
filelist = [
['/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC_L1Offset/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_L1Offset/out/closure.root'],
['/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC_V4/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC_V4_L1Offset/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4_L1Offset/out/closure.root']
]
labellist = [
['Data FastJet V1', 'MC FastJet V1', 'Data Offset V1', 'MC Offset V1'],
['Data FastJet V4', 'MC FastJet V4', 'Data Offset V4', 'MC Offset V4']]
"""filelistt = [
['/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC_V4/out/closure.root'],
['/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4/out/closure.root'],
['/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC_L1Offset/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC_V4_L1Offset/out/closure.root'],
['/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_L1Offset/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4_L1Offset/out/closure.root']
]
labellistt = ['Data FastJet V1', 'Data FastJet V4'], ['MC FastJet V1', 'MC FastJet V4'], ['Data Offset V1', 'Data Offset V4'], ['MC Offset V1','MC Offset V4'
]]
names = ['DataV1', 'MCV1', 'DataV4', 'MCV4' ]"""
files = []
#for sublist in filelist:
# rootfiles = [getroot.openfile(f, opt.verbose) for f in sublist]
# files.append( rootfiles)
for sublist in filelist:
files.append([getroot.openfile(f, opt.verbose) for f in sublist])
fit = None
rebin = 1
# for files, labellist, name in zip(files, labellist, names)
fig, axes = plotbase.newPlot(subplots=2)
quantity = 'L1abs_npv'
local_opt.style = ['o', '*', 'o', '*']
local_opt.labels = labellist[0]
local_opt.colors = ['blue', 'blue', 'red', 'red']
plot1d.datamcplot(quantity, files[0], local_opt, 'upper center', changes={'correction': ''}, fig_axes=(fig, axes[0]), fit=fit,
rebin=rebin, subplot=True, subtext="")
local_opt.labels = labellist[1]
plot1d.datamcplot(quantity, files[1], local_opt, 'upper center', changes={'correction': ''}, fig_axes=(fig, axes[1]), fit=fit,
rebin=rebin, subplot=True, subtext="")
file_name = "L1_comparison_" # +name
plotbase.Save(fig, file_name, opt)
def factors2(files, opt):
local_opt = copy.deepcopy(opt)
filelist = [
['/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_V4/out/closure.root'],
['/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4/out/closure.root'],
['/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_L1Offset/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_V4_L1Offset/out/closure.root'],
['/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_L1Offset/out/closure.root',
'/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4_L1Offset/out/closure.root']
]
labellistt = [['data FastJet V1', 'data FastJet V4'], ['MC FastJet V1', 'MC FastJet V4'], ['data Offset V1', 'data Offset V4'], ['MC Offset V1', 'MC Offset V4']
]
names = ['dataV1', 'MCV1', 'dataV4', 'MCV4']
files = []
for sublist in filelist:
rootfiles = [getroot.openfile(f, opt.verbose) for f in sublist]
files.append(rootfiles)
#print files
fit = 'chi2_linear'
rebin = 1
fit_offset = -0.1
for files, labellist, name in zip(files, labellistt, names):
print labellist
fig, axes = plotbase.newPlot(subplots=2)
quantity = 'L1abs_npv'
local_opt.style = ['o', '*', 'o', '*']
local_opt.labels = [labellist[0]]
local_opt.colors = ['blue', 'blue', 'red', 'red']
plot1d.datamcplot(quantity, [files[0]], local_opt, 'upper center', changes={'correction': ''}, fig_axes=(fig, axes[0]), fit=fit,
rebin=rebin, fit_offset=fit_offset, subplot=True, subtext="")
local_opt.labels = [labellist[1]]
plot1d.datamcplot(quantity, [files[1]], local_opt, 'upper center', changes={'correction': ''}, fig_axes=(fig, axes[1]), fit=fit,
rebin=rebin, fit_offset=fit_offset, subplot=True, subtext="")
file_name = "L1_comparison_" + name
plotbase.Save(fig, file_name, opt)
import ROOT
def allpu(files, opt, truth=True):
print files
settings = plotbase.getSettings(opt, quantity='npu')
#print settings
print settings['folder']
name = "_".join([settings['folder'], settings['algorithm'] + settings['correction']])
print name, files[1]
name = name.replace("Res", "")
t = files[1].Get(name)
if not t:
print "no tree", name, t.GetName()
exit(1)
# raw wei data weight
if truth:
histos = [getroot.getobject("pileup", files[2])]
else:
histos = [getroot.getobject("pileup;2", files[2])]
histos[-1].Rebin(10)
print histos[-1].GetNbinsX(), "pu2"
histos[0].SetTitle("Data")
histos += [ROOT.TH1D("mcraw", "MC", 1600, 0, 80)]
if truth:
histos += [ROOT.TH1D("mcraw", "MC", 1600, 0, 80)]
t.Project("mcraw", "nputruth")
else:
histos += [ROOT.TH1D("mcraw", "MC", 80, 0, 80)]
t.Project("mcraw", "npu")
if truth:
histos += [ROOT.TH1D("mcwei", "MC'", 1600, 0, 80)]
t.Project("mcwei", "nputruth", "weight")
else:
histos += [ROOT.TH1D("mcwei", "MC'", 80, 0, 80)]
t.Project("mcwei", "npu")
binning = [[0, 1, 2, 3.5, 5], range(45, 80)]
for h in histos:
if h.GetNbinsX() > 1000:
h.Rebin()
if h.GetNbinsX() > 82:
print h.GetNbinsX(), ">82! in", h.GetTitle()
if not truth:
break
print "rebin:", binning
b = binning
if histos.index(h) == 1:
b = binning + [range(5, 46)]
print b
for l in b:
for a, b in zip(l[:-1], l[1:]):
x1 = h.FindBin(a)
x2 = h.FindBin(b)
sumh = sum([h.GetBinContent(i) for i in range(x1, x2)]) / (x2 - x1)
for i in range(x1, x2):
h.SetBinContent(i, sumh)
if truth:
f = histos[1].Integral() / histos[1].Integral(histos[1].FindBin(8), histos[1].FindBin(40))
for i in range(3 + 0 * len(histos)):
#histos[i].Rebin(4)
print i
ff = f / histos[i].Integral(histos[i].FindBin(8), histos[i].FindBin(40))
ff = 1.0 / histos[i].Integral()
histos[i].Scale(ff)
histos += [histos[0].Clone("dataraw")]
histos[-1].SetTitle("Data/MC")
histos[-1].Divide(histos[1])
if len(files) > 3:
histos += [getroot.getobject("pileup", files[3])]
histos[-1].SetTitle("weight")
histos += [histos[2].Clone("rawmc")]
histos[-1].Divide(histos[1])
histos[-1].SetTitle("MC'/MC")
histos += [histos[0].Clone("datamc")]
histos[-1].Divide(histos[2])
histos[-1].SetTitle("Data/MC'")
plots = [getroot.root2histo(h) for h in histos]
fig, ax, ratio = plotbase.newPlot(ratio=True)
fig = plotbase.plt.figure(figsize=[7, 10])
ax = plotbase.plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax.number = 1
ratio = plotbase.plt.subplot2grid((3, 1), (2, 0))
ratio.number = 2
fig.add_axes(ax)
fig.add_axes(ratio)
fig.subplots_adjust(hspace=0.05)
colors = ['black', 'navy', 'red', 'green']
for p, c in zip(plots[:3], colors):
ax.errorbar(p.x, p.y, label=p.title, drawstyle='steps-post', color=c, lw=1.6)
colors[1] = 'gray'
for p, c in zip(plots[3:], colors):
r = ratio.errorbar(p.x, p.y, label=p.title, drawstyle='steps-post', color=c, lw=1.6)
plotbase.labels(ax, opt, settings, settings['subplot'])
plotbase.axislabels(ax, r"$n_\mathrm{PU}", settings['xynames'][1], settings=settings)
xaxistext = r"observed number of pile-up interactions $n_\mathrm{PU}$"
if truth:
xaxistext = xaxistext.replace("observed", "true")
plotbase.axislabels(ratio, xaxistext, "ratio", settings=settings)
print ratio.number, r
plotbase.setAxisLimits(ax, settings)
plotbase.labels(ratio, opt, settings, settings['subplot'])
plotbase.setAxisLimits(ratio, settings)
#handles, labels = ratio.get_legend_handles_labels()
ratio.legend(bbox_to_anchor=[0.8, 1], loc='upper center')
ax.set_xticklabels([])
ax.set_xlabel("")
settings['filename'] = plotbase.getDefaultFilename("npus", opt, settings)
plotbase.Save(fig, settings)
def pu(files, opt):
allpu(files, opt)
def puobserved(files, opt):
allpu(files, opt, False)
| gpl-2.0 |
thypad/brew | skensemble/generation/bagging.py | 3 | 2140 | import numpy as np
from sklearn.ensemble import BaggingClassifier
from brew.base import Ensemble
from brew.combination.combiner import Combiner
import sklearn
from .base import PoolGenerator
class Bagging(PoolGenerator):
def __init__(self,
base_classifier=None,
n_classifiers=100,
combination_rule='majority_vote'):
self.base_classifier = base_classifier
self.n_classifiers = n_classifiers
self.ensemble = None
self.combiner = Combiner(rule=combination_rule)
def fit(self, X, y):
self.ensemble = Ensemble()
for _ in range(self.n_classifiers):
# bootstrap
idx = np.random.choice(X.shape[0], X.shape[0], replace=True)
data, target = X[idx, :], y[idx]
classifier = sklearn.base.clone(self.base_classifier)
classifier.fit(data, target)
self.ensemble.add(classifier)
return
def predict(self, X):
out = self.ensemble.output(X)
return self.combiner.combine(out)
class BaggingSK(PoolGenerator):
""""
This class should not be used, use brew.generation.bagging.Bagging instead.
"""
def __init__(self,
base_classifier=None,
n_classifiers=100,
combination_rule='majority_vote'):
self.base_classifier = base_classifier
self.n_classifiers = n_classifiers
# using the sklearn implementation of bagging for now
self.sk_bagging = BaggingClassifier(base_estimator=base_classifier,
n_estimators=n_classifiers,
max_samples=1.0,
max_features=1.0)
self.ensemble = Ensemble()
self.combiner = Combiner(rule=combination_rule)
def fit(self, X, y):
self.sk_bagging.fit(X, y)
self.ensemble.add_classifiers(self.sk_bagging.estimators_)
# self.classes_ = set(y)
def predict(self, X):
out = self.ensemble.output(X)
return self.combiner.combine(out)
| mit |
steven-murray/pydftools | setup.py | 1 | 2408 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
import io
import os
import re
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst") as history_file:
history = history_file.read()
requirements = [
"scipy",
"numpy>=1.6.2",
"Click>=6.0",
"attrs>=17.0",
"cached_property",
"chainconsumer",
"matplotlib"
# TODO: put package requirements here
]
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8"),
) as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup_requirements = [
"pytest-runner",
# TODO(steven-murray): put setup requirements (distutils extensions, etc.) here
]
test_requirements = [
"pytest",
# TODO: put package test requirements here
]
setup(
name="pydftools",
version=find_version("pydftools", "__init__.py"),
description="A pure-python port of the dftools R package.",
long_description=readme + "\n\n" + history,
author="Steven Murray",
author_email="steven.murray@curtin.edu.au",
url="https://github.com/steven-murray/pydftools",
packages=find_packages(include=["pydftools"]),
entry_points={"console_scripts": ["pydftools=pydftools.cli:main"]},
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords="pydftools",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
],
test_suite="tests",
tests_require=test_requirements,
setup_requires=setup_requirements,
)
| mit |
mxjl620/scikit-learn | benchmarks/bench_tree.py | 297 | 3617 | """
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import pylab as pl
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
pl.figure('scikit-learn tree benchmark results')
pl.subplot(211)
pl.title('Learning with varying number of samples')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
pl.subplot(212)
pl.title('Learning in high dimensional spaces')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of dimensions')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
wenhuchen/ETHZ-Bootstrapped-Captioning | visual-concepts/coco/PythonAPI/pycocotools/coco.py | 1 | 16953 | __author__ = 'tylin'
__version__ = '2.0'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# segToMask - Convert polygon segmentation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load algorithm results and create API for accessing them.
# download - Download COCO images from mscoco.org server.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>segToMask, COCO>showAnns
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import time
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
import urllib
import copy
import itertools
import mask
import os
from collections import defaultdict
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
if not annotation_file == None:
print 'loading annotations into memory...'
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
assert type(dataset)==dict, "annotation file format %s not supported"%(type(dataset))
print 'Done (t=%0.2fs)'%(time.time()- tic)
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print 'creating index...'
anns,cats,imgs = dict(),dict(),dict()
imgToAnns,catToImgs = defaultdict(list),defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']].append(ann)
anns[ann['id']] = ann
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']].append(ann['image_id'])
print 'index created!'
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print '%s: %s'%(key, value)
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0] or 'keypoints' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
else:
raise Exception("datasetType not supported")
if datasetType == 'instances':
ax = plt.gca()
ax.set_autoscale_on(False)
polygons = []
color = []
for ann in anns:
c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]
if 'segmentation' in ann:
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((len(seg)/2, 2))
polygons.append(Polygon(poly))
color.append(c)
else:
# mask
t = self.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
rle = mask.frPyObjects([ann['segmentation']], t['height'], t['width'])
else:
rle = [ann['segmentation']]
m = mask.decode(rle)
img = np.ones( (m.shape[0], m.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
if 'keypoints' in ann and type(ann['keypoints']) == list:
# turn skeleton into zero-based index
sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1
kp = np.array(ann['keypoints'])
x = kp[0::3]
y = kp[1::3]
v = kp[2::3]
for sk in sks:
if np.all(v[sk]>0):
plt.plot(x[sk],y[sk], linewidth=3, color=c)
plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2)
plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2)
p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)
ax.add_collection(p)
p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print ann['caption']
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
print 'Loading and preparing results... '
tic = time.time()
if type(resFile) == str or type(resFile) == unicode:
anns = json.load(open(resFile))
elif type(resFile) == np.ndarray:
anns = self.loadNumpyAnnotations(resFile)
else:
anns = resFile
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id+1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = mask.area([ann['segmentation']])[0]
if not 'bbox' in ann:
ann['bbox'] = mask.toBbox([ann['segmentation']])[0]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'keypoints' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
s = ann['keypoints']
x = s[0::3]
y = s[1::3]
x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)
ann['area'] = (x1-x0)*(y1-y0)
ann['id'] = id + 1
ann['bbox'] = [x0,y0,x1-x0,y1-y0]
print 'DONE (t=%0.2fs)'%(time.time()- tic)
res.dataset['annotations'] = anns
res.createIndex()
return res
def download( self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print 'Please specify target directory'
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urllib.urlretrieve(img['coco_url'], fname)
print 'downloaded %d/%d images (t=%.1fs)'%(i, N, time.time()- tic)
def loadNumpyAnnotations(self, data):
"""
Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class}
:param data (numpy.ndarray)
:return: annotations (python nested list)
"""
print("Converting ndarray to lists...")
assert(type(data) == np.ndarray)
print(data.shape)
assert(data.shape[1] == 7)
N = data.shape[0]
ann = []
for i in range(N):
if i % 1000000 == 0:
print("%d/%d" % (i,N))
ann += [{
'image_id' : int(data[i, 0]),
'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ],
'score' : data[i, 5],
'category_id': int(data[i, 6]),
}]
return ann
| bsd-3-clause |
wanggang3333/scikit-learn | examples/svm/plot_svm_anova.py | 250 | 2000 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
sammcveety/incubator-beam | sdks/python/apache_beam/examples/complete/juliaset/juliaset/juliaset.py | 8 | 4457 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A Julia set computing workflow: https://en.wikipedia.org/wiki/Julia_set.
We use the quadratic polinomial f(z) = z*z + c, with c = -.62772 +.42193i
"""
from __future__ import absolute_import
import argparse
import apache_beam as beam
from apache_beam.io import WriteToText
def from_pixel(x, y, n):
"""Converts a NxN pixel position to a (-1..1, -1..1) complex number."""
return complex(2.0 * x / n - 1.0, 2.0 * y / n - 1.0)
def get_julia_set_point_color(element, c, n, max_iterations):
"""Given an pixel, convert it into a point in our julia set."""
x, y = element
z = from_pixel(x, y, n)
for i in xrange(max_iterations):
if z.real * z.real + z.imag * z.imag > 2.0:
break
z = z * z + c
return x, y, i # pylint: disable=undefined-loop-variable
def generate_julia_set_colors(pipeline, c, n, max_iterations):
"""Compute julia set coordinates for each point in our set."""
def point_set(n):
for x in range(n):
for y in range(n):
yield (x, y)
julia_set_colors = (pipeline
| 'add points' >> beam.Create(point_set(n))
| beam.Map(
get_julia_set_point_color, c, n, max_iterations))
return julia_set_colors
def generate_julia_set_visualization(data, n, max_iterations):
"""Generate the pixel matrix for rendering the julia set as an image."""
import numpy as np # pylint: disable=wrong-import-order, wrong-import-position
colors = []
for r in range(0, 256, 16):
for g in range(0, 256, 16):
for b in range(0, 256, 16):
colors.append((r, g, b))
xy = np.zeros((n, n, 3), dtype=np.uint8)
for x, y, iteration in data:
xy[x, y] = colors[iteration * len(colors) / max_iterations]
return xy
def save_julia_set_visualization(out_file, image_array):
"""Save the fractal image of our julia set as a png."""
from matplotlib import pyplot as plt # pylint: disable=wrong-import-order, wrong-import-position
plt.imsave(out_file, image_array, format='png')
def run(argv=None): # pylint: disable=missing-docstring
parser = argparse.ArgumentParser()
parser.add_argument('--grid_size',
dest='grid_size',
default=1000,
help='Size of the NxN matrix')
parser.add_argument(
'--coordinate_output',
dest='coordinate_output',
required=True,
help='Output file to write the color coordinates of the image to.')
parser.add_argument('--image_output',
dest='image_output',
default=None,
help='Output file to write the resulting image to.')
known_args, pipeline_args = parser.parse_known_args(argv)
with beam.Pipeline(argv=pipeline_args) as p:
n = int(known_args.grid_size)
coordinates = generate_julia_set_colors(p, complex(-.62772, .42193), n, 100)
# Group each coordinate triplet by its x value, then write the coordinates
# to the output file with an x-coordinate grouping per line.
# pylint: disable=expression-not-assigned
(coordinates
| 'x coord key' >> beam.Map(lambda (x, y, i): (x, (x, y, i)))
| 'x coord' >> beam.GroupByKey()
| 'format' >> beam.Map(
lambda (k, coords): ' '.join('(%s, %s, %s)' % c for c in coords))
| WriteToText(known_args.coordinate_output))
# Optionally render the image and save it to a file.
# TODO(silviuc): Add this functionality.
# if p.options.image_output is not None:
# julia_set_image = generate_julia_set_visualization(
# file_with_coordinates, n, 100)
# save_julia_set_visualization(p.options.image_output, julia_set_image)
| apache-2.0 |
equialgo/scikit-learn | examples/linear_model/plot_lasso_and_elasticnet.py | 73 | 2074 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, color='lightgreen', linewidth=2,
label='Elastic net coefficients')
plt.plot(lasso.coef_, color='gold', linewidth=2,
label='Lasso coefficients')
plt.plot(coef, '--', color='navy', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
| bsd-3-clause |
massmutual/scikit-learn | examples/plot_kernel_approximation.py | 262 | 8004 | """
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
The second plot visualized the decision surfaces of the RBF kernel SVM and
the linear SVM with approximate kernel maps.
The plot shows decision surfaces of the classifiers projected onto
the first two principal components of the data. This visualization should
be taken with a grain of salt since it is just an interesting slice through
the decision surface in 64 dimensions. In particular note that
a datapoint (represented as a dot) does not necessarily be classified
into the region it is lying in, since it will not lie on the plane
that the first two principal components span.
The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
in :ref:`kernel_approximation`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import (RBFSampler,
Nystroem)
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2]
# Now predict the value of the digit on the second half:
data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:]
#data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(8, 8))
accuracy = plt.subplot(211)
# second y axis for timeings
timescale = plt.subplot(212)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Gemerate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
plt.tight_layout()
plt.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.tight_layout()
plt.show()
| bsd-3-clause |
fspaolo/scikit-learn | sklearn/utils/extmath.py | 3 | 18665 | """
Extended math utilities.
"""
# Authors: G. Varoquaux, A. Gramfort, A. Passos, O. Grisel
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from distutils.version import LooseVersion
from . import check_random_state
from .fixes import qr_economic
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .validation import array2d, NonBLASDotWarning
def norm(v):
v = np.asarray(v)
__nrm2, = linalg.get_blas_funcs(['nrm2'], [v])
return __nrm2(v)
def _fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(np.linalg.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
# XXX: Should be implemented as in numpy, using ATLAS
# http://projects.scipy.org/numpy/browser/ \
# trunk/numpy/linalg/linalg.py#L1559
ld = np.sum(np.log(np.diag(A)))
a = np.exp(ld / A.shape[0])
d = np.linalg.det(A / a)
ld += np.log(d)
if not np.isfinite(ld):
return -np.inf
return ld
def _fast_logdet_numpy(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
# Numpy >= 1.5 provides a fast logdet
if hasattr(np.linalg, 'slogdet'):
fast_logdet = _fast_logdet_numpy
else:
fast_logdet = _fast_logdet
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return array2d(X.T, copy=False, order='F'), True
else:
return array2d(X, copy=False, order='F'), False
def _fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
input matrices. Matrices are supposed to be of the same types
and to have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.utils.validation import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
msg = ('Invalid array shapes: A.shape[%d] should be the same as '
'B.shape[0]. Got A.shape=%r B.shape=%r' % (A.ndim - 1,
A.shape,
B.shape))
raise ValueError(msg)
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Data must be of same type. Supported types '
'are 32 and 64 bit float. '
'Falling back to np.dot.', NonBLASDotWarning)
return np.dot(A, B)
if ((min(A.shape) == 1) or (min(B.shape) == 1) or
(A.ndim != 2) or (B.ndim != 2)):
warnings.warn('Data must be 2D with more than one colum / row.'
'Falling back to np.dot', NonBLASDotWarning)
return np.dot(A, B)
dot = linalg.get_blas_funcs('gemm', (A, B))
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
# only try to use fast_dot for older numpy versions.
# the related issue has been tackled meanwhile. Also, depending on the build
# the current numpy master's dot can about 3 times faster.
if LooseVersion(np.__version__) < '1.7.2': # backported
try:
linalg.get_blas_funcs('gemm')
fast_dot = _fast_dot
except (ImportError, AttributeError):
fast_dot = np.dot
warnings.warn('Could not import BLAS, falling back to np.dot')
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
from scipy import sparse
if sparse.issparse(a) or sparse.issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = qr_economic(Y)
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0,
n_iterations=None):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
if n_iterations is not None:
warnings.warn("n_iterations was renamed to n_iter for consistency "
"and will be removed in 0.16.", DeprecationWarning)
n_iter = n_iterations
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond, rcond : float or None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> from numpy import *
>>> a = random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> allclose(a, dot(a, dot(B, a)))
True
>>> allclose(B, dot(B, dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
References
----------
http://stackoverflow.com/q/1208118
"""
arrays = [np.asarray(x).ravel() for x in arrays]
dtype = arrays[0].dtype
n = np.prod([x.size for x in arrays])
if out is None:
out = np.empty([n, len(arrays)], dtype=dtype)
m = n / arrays[0].size
out[:, 0] = np.repeat(arrays[0], m)
if arrays[1:]:
cartesian(arrays[1:], out=out[0:m, 1:])
for j in xrange(1, arrays[0].size):
out[j * m:(j + 1) * m, 1:] = out[0:m, 1:]
return out
def svd_flip(u, v):
"""Sign correction to ensure deterministic output from SVD
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v: arrays
The output of `linalg.svd` or `sklearn.utils.extmath.randomized_svd`,
with matching inner dimensions so one can compute `np.dot(u * s, v)`.
Returns
-------
u_adjusted, s, v_adjusted: arrays with the same dimensions as the input.
"""
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def logistic_sigmoid(X, log=False, out=None):
"""
Implements the logistic function, ``1 / (1 + e ** -x)`` and its log.
This implementation is more stable by splitting on positive and negative
values and computing::
1 / (1 + exp(-x_i)) if x_i > 0
exp(x_i) / (1 + exp(x_i)) if x_i <= 0
The log is computed using::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
log: boolean, default: False
Whether to compute the logarithm of the logistic function.
out: array-like, shape: (M, N), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N)
Value of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = array2d(X, dtype=np.float)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
if log:
_log_logistic_sigmoid(n_samples, n_features, X, out)
else:
# logistic(x) = (1 + tanh(x / 2)) / 2
out[:] = X
out *= .5
np.tanh(out, out)
out += 1
out *= .5
if is_1d:
return np.squeeze(out)
return out
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
| bsd-3-clause |
ishanic/scikit-learn | sklearn/metrics/metrics.py | 233 | 1262 | import warnings
warnings.warn("sklearn.metrics.metrics is deprecated and will be removed in "
"0.18. Please import from sklearn.metrics",
DeprecationWarning)
from .ranking import auc
from .ranking import average_precision_score
from .ranking import label_ranking_average_precision_score
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
| bsd-3-clause |
barak/autograd | examples/fluidsim/wing.py | 1 | 6136 | from __future__ import absolute_import
from __future__ import print_function
import autograd.numpy as np
from autograd import value_and_grad
from scipy.optimize import minimize
import matplotlib.pyplot as plt
import os
from builtins import range
rows, cols = 40, 60
# Fluid simulation code based on
# "Real-Time Fluid Dynamics for Games" by Jos Stam
# http://www.intpowertechcorp.com/GDC03.pdf
def occlude(f, occlusion):
return f * (1 - occlusion)
def project(vx, vy, occlusion):
"""Project the velocity field to be approximately mass-conserving,
using a few iterations of Gauss-Seidel."""
p = np.zeros(vx.shape)
div = -0.5 * (np.roll(vx, -1, axis=1) - np.roll(vx, 1, axis=1)
+ np.roll(vy, -1, axis=0) - np.roll(vy, 1, axis=0))
div = make_continuous(div, occlusion)
for k in range(50):
p = (div + np.roll(p, 1, axis=1) + np.roll(p, -1, axis=1)
+ np.roll(p, 1, axis=0) + np.roll(p, -1, axis=0))/4.0
p = make_continuous(p, occlusion)
vx = vx - 0.5*(np.roll(p, -1, axis=1) - np.roll(p, 1, axis=1))
vy = vy - 0.5*(np.roll(p, -1, axis=0) - np.roll(p, 1, axis=0))
vx = occlude(vx, occlusion)
vy = occlude(vy, occlusion)
return vx, vy
def advect(f, vx, vy):
"""Move field f according to x and y velocities (u and v)
using an implicit Euler integrator."""
rows, cols = f.shape
cell_xs, cell_ys = np.meshgrid(np.arange(cols), np.arange(rows))
center_xs = (cell_xs - vx).ravel()
center_ys = (cell_ys - vy).ravel()
# Compute indices of source cells.
left_ix = np.floor(center_ys).astype(np.int)
top_ix = np.floor(center_xs).astype(np.int)
rw = center_ys - left_ix # Relative weight of right-hand cells.
bw = center_xs - top_ix # Relative weight of bottom cells.
left_ix = np.mod(left_ix, rows) # Wrap around edges of simulation.
right_ix = np.mod(left_ix + 1, rows)
top_ix = np.mod(top_ix, cols)
bot_ix = np.mod(top_ix + 1, cols)
# A linearly-weighted sum of the 4 surrounding cells.
flat_f = (1 - rw) * ((1 - bw)*f[left_ix, top_ix] + bw*f[left_ix, bot_ix]) \
+ rw * ((1 - bw)*f[right_ix, top_ix] + bw*f[right_ix, bot_ix])
return np.reshape(flat_f, (rows, cols))
def make_continuous(f, occlusion):
non_occluded = 1 - occlusion
num = np.roll(f, 1, axis=0) * np.roll(non_occluded, 1, axis=0)\
+ np.roll(f, -1, axis=0) * np.roll(non_occluded, -1, axis=0)\
+ np.roll(f, 1, axis=1) * np.roll(non_occluded, 1, axis=1)\
+ np.roll(f, -1, axis=1) * np.roll(non_occluded, -1, axis=1)
den = np.roll(non_occluded, 1, axis=0)\
+ np.roll(non_occluded, -1, axis=0)\
+ np.roll(non_occluded, 1, axis=1)\
+ np.roll(non_occluded, -1, axis=1)
return f * non_occluded + (1 - non_occluded) * num / ( den + 0.001)
def sigmoid(x):
return 0.5*(np.tanh(x) + 1.0) # Output ranges from 0 to 1.
def simulate(vx, vy, num_time_steps, occlusion, ax=None, render=False):
occlusion = sigmoid(occlusion)
# Disallow occlusion outside a certain area.
mask = np.zeros((rows, cols))
mask[10:30, 10:30] = 1.0
occlusion = occlusion * mask
# Initialize smoke bands.
red_smoke = np.zeros((rows, cols))
red_smoke[rows/4:rows/2] = 1
blue_smoke = np.zeros((rows, cols))
blue_smoke[rows/2:3*rows/4] = 1
print("Running simulation...")
vx, vy = project(vx, vy, occlusion)
for t in range(num_time_steps):
plot_matrix(ax, red_smoke, occlusion, blue_smoke, t, render)
vx_updated = advect(vx, vx, vy)
vy_updated = advect(vy, vx, vy)
vx, vy = project(vx_updated, vy_updated, occlusion)
red_smoke = advect(red_smoke, vx, vy)
red_smoke = occlude(red_smoke, occlusion)
blue_smoke = advect(blue_smoke, vx, vy)
blue_smoke = occlude(blue_smoke, occlusion)
plot_matrix(ax, red_smoke, occlusion, blue_smoke, num_time_steps, render)
return vx, vy
def plot_matrix(ax, r, g, b, t, render=False):
if ax:
plt.cla()
ax.imshow(np.concatenate((r[...,np.newaxis], g[...,np.newaxis], b[...,np.newaxis]), axis=2))
ax.set_xticks([])
ax.set_yticks([])
plt.draw()
if render:
plt.savefig('step{0:03d}.png'.format(t), bbox_inches='tight')
plt.pause(0.001)
if __name__ == '__main__':
simulation_timesteps = 20
print("Loading initial and target states...")
init_vx = np.ones((rows, cols))
init_vy = np.zeros((rows, cols))
# Initialize the occlusion to be a block.
init_occlusion = -np.ones((rows, cols))
init_occlusion[15:25, 15:25] = 0.0
init_occlusion = init_occlusion.ravel()
def drag(vx): return np.mean(init_vx - vx)
def lift(vy): return np.mean(vy - init_vy)
def objective(params):
cur_occlusion = np.reshape(params, (rows, cols))
final_vx, final_vy = simulate(init_vx, init_vy, simulation_timesteps, cur_occlusion)
return -lift(final_vy) / drag(final_vx)
# Specify gradient of objective function using autograd.
objective_with_grad = value_and_grad(objective)
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, frameon=False)
def callback(weights):
cur_occlusion = np.reshape(weights, (rows, cols))
simulate(init_vx, init_vy, simulation_timesteps, cur_occlusion, ax)
print("Rendering initial flow...")
callback(init_occlusion)
print("Optimizing initial conditions...")
result = minimize(objective_with_grad, init_occlusion, jac=True, method='CG',
options={'maxiter':50, 'disp':True}, callback=callback)
print("Rendering optimized flow...")
final_occlusion = np.reshape(result.x, (rows, cols))
simulate(init_vx, init_vy, simulation_timesteps, final_occlusion, ax, render=True)
print("Converting frames to an animated GIF...") # Using imagemagick.
os.system("convert -delay 5 -loop 0 step*.png "
"-delay 250 step{0:03d}.png wing.gif".format(simulation_timesteps))
os.system("rm step*.png")
| mit |
alekz112/statsmodels | statsmodels/stats/anova.py | 25 | 13433 | from statsmodels.compat.python import lrange, lmap
import numpy as np
from scipy import stats
from pandas import DataFrame, Index
from statsmodels.formula.formulatools import (_remove_intercept_patsy,
_has_intercept, _intercept_idx)
def _get_covariance(model, robust):
if robust is None:
return model.cov_params()
elif robust == "hc0":
se = model.HC0_se
return model.cov_HC0
elif robust == "hc1":
se = model.HC1_se
return model.cov_HC1
elif robust == "hc2":
se = model.HC2_se
return model.cov_HC2
elif robust == "hc3":
se = model.HC3_se
return model.cov_HC3
else: # pragma: no cover
raise ValueError("robust options %s not understood" % robust)
#NOTE: these need to take into account weights !
def anova_single(model, **kwargs):
"""
ANOVA table for one fitted linear model.
Parameters
----------
model : fitted linear model results instance
A fitted linear model
typ : int or str {1,2,3} or {"I","II","III"}
Type of sum of squares to use.
**kwargs**
scale : float
Estimate of variance, If None, will be estimated from the largest
model. Default is None.
test : str {"F", "Chisq", "Cp"} or None
Test statistics to provide. Default is "F".
Notes
-----
Use of this function is discouraged. Use anova_lm instead.
"""
test = kwargs.get("test", "F")
scale = kwargs.get("scale", None)
typ = kwargs.get("typ", 1)
robust = kwargs.get("robust", None)
if robust:
robust = robust.lower()
endog = model.model.endog
exog = model.model.exog
nobs = exog.shape[0]
response_name = model.model.endog_names
design_info = model.model.data.design_info
exog_names = model.model.exog_names
# +1 for resids
n_rows = (len(design_info.terms) - _has_intercept(design_info) + 1)
pr_test = "PR(>%s)" % test
names = ['df', 'sum_sq', 'mean_sq', test, pr_test]
table = DataFrame(np.zeros((n_rows, 5)), columns = names)
if typ in [1,"I"]:
return anova1_lm_single(model, endog, exog, nobs, design_info, table,
n_rows, test, pr_test, robust)
elif typ in [2, "II"]:
return anova2_lm_single(model, design_info, n_rows, test, pr_test,
robust)
elif typ in [3, "III"]:
return anova3_lm_single(model, design_info, n_rows, test, pr_test,
robust)
elif typ in [4, "IV"]:
raise NotImplemented("Type IV not yet implemented")
else: # pragma: no cover
raise ValueError("Type %s not understood" % str(typ))
def anova1_lm_single(model, endog, exog, nobs, design_info, table, n_rows, test,
pr_test, robust):
"""
ANOVA table for one fitted linear model.
Parameters
----------
model : fitted linear model results instance
A fitted linear model
**kwargs**
scale : float
Estimate of variance, If None, will be estimated from the largest
model. Default is None.
test : str {"F", "Chisq", "Cp"} or None
Test statistics to provide. Default is "F".
Notes
-----
Use of this function is discouraged. Use anova_lm instead.
"""
#maybe we should rethink using pinv > qr in OLS/linear models?
effects = getattr(model, 'effects', None)
if effects is None:
q,r = np.linalg.qr(exog)
effects = np.dot(q.T, endog)
arr = np.zeros((len(design_info.terms), len(design_info.column_names)))
slices = [design_info.slice(name) for name in design_info.term_names]
for i,slice_ in enumerate(slices):
arr[i, slice_] = 1
sum_sq = np.dot(arr, effects**2)
#NOTE: assumes intercept is first column
idx = _intercept_idx(design_info)
sum_sq = sum_sq[~idx]
term_names = np.array(design_info.term_names) # want boolean indexing
term_names = term_names[~idx]
index = term_names.tolist()
table.index = Index(index + ['Residual'])
table.ix[index, ['df', 'sum_sq']] = np.c_[arr[~idx].sum(1), sum_sq]
if test == 'F':
table.ix[:n_rows, test] = ((table['sum_sq']/table['df'])/
(model.ssr/model.df_resid))
table.ix[:n_rows, pr_test] = stats.f.sf(table["F"], table["df"],
model.df_resid)
# fill in residual
table.ix['Residual', ['sum_sq','df', test, pr_test]] = (model.ssr,
model.df_resid,
np.nan, np.nan)
table['mean_sq'] = table['sum_sq'] / table['df']
return table
#NOTE: the below is not agnostic about formula...
def anova2_lm_single(model, design_info, n_rows, test, pr_test, robust):
"""
ANOVA type II table for one fitted linear model.
Parameters
----------
model : fitted linear model results instance
A fitted linear model
**kwargs**
scale : float
Estimate of variance, If None, will be estimated from the largest
model. Default is None.
test : str {"F", "Chisq", "Cp"} or None
Test statistics to provide. Default is "F".
Notes
-----
Use of this function is discouraged. Use anova_lm instead.
Type II
Sum of Squares compares marginal contribution of terms. Thus, it is
not particularly useful for models with significant interaction terms.
"""
terms_info = design_info.terms[:] # copy
terms_info = _remove_intercept_patsy(terms_info)
names = ['sum_sq', 'df', test, pr_test]
table = DataFrame(np.zeros((n_rows, 4)), columns = names)
cov = _get_covariance(model, None)
robust_cov = _get_covariance(model, robust)
col_order = []
index = []
for i, term in enumerate(terms_info):
# grab all varaibles except interaction effects that contain term
# need two hypotheses matrices L1 is most restrictive, ie., term==0
# L2 is everything except term==0
cols = design_info.slice(term)
L1 = lrange(cols.start, cols.stop)
L2 = []
term_set = set(term.factors)
for t in terms_info: # for the term you have
other_set = set(t.factors)
if term_set.issubset(other_set) and not term_set == other_set:
col = design_info.slice(t)
# on a higher order term containing current `term`
L1.extend(lrange(col.start, col.stop))
L2.extend(lrange(col.start, col.stop))
L1 = np.eye(model.model.exog.shape[1])[L1]
L2 = np.eye(model.model.exog.shape[1])[L2]
if L2.size:
LVL = np.dot(np.dot(L1,robust_cov),L2.T)
from scipy import linalg
orth_compl,_ = linalg.qr(LVL)
r = L1.shape[0] - L2.shape[0]
# L1|2
# use the non-unique orthogonal completion since L12 is rank r
L12 = np.dot(orth_compl[:,-r:].T, L1)
else:
L12 = L1
r = L1.shape[0]
#from IPython.core.debugger import Pdb; Pdb().set_trace()
if test == 'F':
f = model.f_test(L12, cov_p=robust_cov)
table.ix[i, test] = test_value = f.fvalue
table.ix[i, pr_test] = f.pvalue
# need to back out SSR from f_test
table.ix[i, 'df'] = r
col_order.append(cols.start)
index.append(term.name())
table.index = Index(index + ['Residual'])
table = table.ix[np.argsort(col_order + [model.model.exog.shape[1]+1])]
# back out sum of squares from f_test
ssr = table[test] * table['df'] * model.ssr/model.df_resid
table['sum_sq'] = ssr
# fill in residual
table.ix['Residual', ['sum_sq','df', test, pr_test]] = (model.ssr,
model.df_resid,
np.nan, np.nan)
return table
def anova3_lm_single(model, design_info, n_rows, test, pr_test, robust):
n_rows += _has_intercept(design_info)
terms_info = design_info.terms
names = ['sum_sq', 'df', test, pr_test]
table = DataFrame(np.zeros((n_rows, 4)), columns = names)
cov = _get_covariance(model, robust)
col_order = []
index = []
for i, term in enumerate(terms_info):
# grab term, hypothesis is that term == 0
cols = design_info.slice(term)
L1 = np.eye(model.model.exog.shape[1])[cols]
L12 = L1
r = L1.shape[0]
if test == 'F':
f = model.f_test(L12, cov_p=cov)
table.ix[i, test] = test_value = f.fvalue
table.ix[i, pr_test] = f.pvalue
# need to back out SSR from f_test
table.ix[i, 'df'] = r
#col_order.append(cols.start)
index.append(term.name())
table.index = Index(index + ['Residual'])
#NOTE: Don't need to sort because terms are an ordered dict now
#table = table.ix[np.argsort(col_order + [model.model.exog.shape[1]+1])]
# back out sum of squares from f_test
ssr = table[test] * table['df'] * model.ssr/model.df_resid
table['sum_sq'] = ssr
# fill in residual
table.ix['Residual', ['sum_sq','df', test, pr_test]] = (model.ssr,
model.df_resid,
np.nan, np.nan)
return table
def anova_lm(*args, **kwargs):
"""
ANOVA table for one or more fitted linear models.
Parameters
----------
args : fitted linear model results instance
One or more fitted linear models
scale : float
Estimate of variance, If None, will be estimated from the largest
model. Default is None.
test : str {"F", "Chisq", "Cp"} or None
Test statistics to provide. Default is "F".
typ : str or int {"I","II","III"} or {1,2,3}
The type of ANOVA test to perform. See notes.
robust : {None, "hc0", "hc1", "hc2", "hc3"}
Use heteroscedasticity-corrected coefficient covariance matrix.
If robust covariance is desired, it is recommended to use `hc3`.
Returns
-------
anova : DataFrame
A DataFrame containing.
Notes
-----
Model statistics are given in the order of args. Models must have
been fit using the formula api.
See Also
--------
model_results.compare_f_test, model_results.compare_lm_test
Examples
--------
>>> import statsmodels.api as sm
>>> from statsmodels.formula.api import ols
>>> moore = sm.datasets.get_rdataset("Moore", "car",
... cache=True) # load data
>>> data = moore.data
>>> data = data.rename(columns={"partner.status" :
... "partner_status"}) # make name pythonic
>>> moore_lm = ols('conformity ~ C(fcategory, Sum)*C(partner_status, Sum)',
... data=data).fit()
>>> table = sm.stats.anova_lm(moore_lm, typ=2) # Type 2 ANOVA DataFrame
>>> print table
"""
typ = kwargs.get('typ', 1)
### Farm Out Single model ANOVA Type I, II, III, and IV ###
if len(args) == 1:
model = args[0]
return anova_single(model, **kwargs)
try:
assert typ in [1,"I"]
except:
raise ValueError("Multiple models only supported for type I. "
"Got type %s" % str(typ))
### COMPUTE ANOVA TYPE I ###
# if given a single model
if len(args) == 1:
return anova_single(*args, **kwargs)
# received multiple fitted models
test = kwargs.get("test", "F")
scale = kwargs.get("scale", None)
n_models = len(args)
model_formula = []
pr_test = "Pr(>%s)" % test
names = ['df_resid', 'ssr', 'df_diff', 'ss_diff', test, pr_test]
table = DataFrame(np.zeros((n_models, 6)), columns = names)
if not scale: # assume biggest model is last
scale = args[-1].scale
table["ssr"] = lmap(getattr, args, ["ssr"]*n_models)
table["df_resid"] = lmap(getattr, args, ["df_resid"]*n_models)
table.ix[1:, "df_diff"] = -np.diff(table["df_resid"].values)
table["ss_diff"] = -table["ssr"].diff()
if test == "F":
table["F"] = table["ss_diff"] / table["df_diff"] / scale
table[pr_test] = stats.f.sf(table["F"], table["df_diff"],
table["df_resid"])
# for earlier scipy - stats.f.sf(np.nan, 10, 2) -> 0 not nan
table[pr_test][table['F'].isnull()] = np.nan
return table
if __name__ == "__main__":
import pandas
from statsmodels.formula.api import ols
# in R
#library(car)
#write.csv(Moore, "moore.csv", row.names=FALSE)
moore = pandas.read_table('moore.csv', delimiter=",", skiprows=1,
names=['partner_status','conformity',
'fcategory','fscore'])
moore_lm = ols('conformity ~ C(fcategory, Sum)*C(partner_status, Sum)',
data=moore).fit()
mooreB = ols('conformity ~ C(partner_status, Sum)', data=moore).fit()
# for each term you just want to test vs the model without its
# higher-order terms
# using Monette-Fox slides and Marden class notes for linear algebra /
# orthogonal complement
# https://netfiles.uiuc.edu/jimarden/www/Classes/STAT324/
table = anova_lm(moore_lm, typ=2)
| bsd-3-clause |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/build/lib.linux-i686-2.7/matplotlib/table.py | 2 | 17111 | """
Place a table below the x-axis at location loc.
The table consists of a grid of cells.
The grid need not be rectangular and can have holes.
Cells are added by specifying their row and column.
For the purposes of positioning the cell at (0, 0) is
assumed to be at the top left and the cell at (max_row, max_col)
is assumed to be at bottom right.
You can add additional cells outside this range to have convenient
ways of positioning more interesting grids.
Author : John Gill <jng@europe.renre.com>
Copyright : 2004 John Gill and John Hunter
License : matplotlib license
"""
from __future__ import division, print_function
import warnings
import artist
from artist import Artist, allow_rasterization
from patches import Rectangle
from cbook import is_string_like
from matplotlib import docstring
from text import Text
from transforms import Bbox
class Cell(Rectangle):
"""
A cell is a Rectangle with some associated text.
"""
PAD = 0.1 # padding between text and rectangle
def __init__(self, xy, width, height,
edgecolor='k', facecolor='w',
fill=True,
text='',
loc=None,
fontproperties=None
):
# Call base
Rectangle.__init__(self, xy, width=width, height=height,
edgecolor=edgecolor, facecolor=facecolor)
self.set_clip_on(False)
# Create text object
if loc is None:
loc = 'right'
self._loc = loc
self._text = Text(x=xy[0], y=xy[1], text=text,
fontproperties=fontproperties)
self._text.set_clip_on(False)
def set_transform(self, trans):
Rectangle.set_transform(self, trans)
# the text does not get the transform!
def set_figure(self, fig):
Rectangle.set_figure(self, fig)
self._text.set_figure(fig)
def get_text(self):
'Return the cell Text intance'
return self._text
def set_fontsize(self, size):
self._text.set_fontsize(size)
def get_fontsize(self):
'Return the cell fontsize'
return self._text.get_fontsize()
def auto_set_font_size(self, renderer):
""" Shrink font size until text fits. """
fontsize = self.get_fontsize()
required = self.get_required_width(renderer)
while fontsize > 1 and required > self.get_width():
fontsize -= 1
self.set_fontsize(fontsize)
required = self.get_required_width(renderer)
return fontsize
@allow_rasterization
def draw(self, renderer):
if not self.get_visible():
return
# draw the rectangle
Rectangle.draw(self, renderer)
# position the text
self._set_text_position(renderer)
self._text.draw(renderer)
def _set_text_position(self, renderer):
""" Set text up so it draws in the right place.
Currently support 'left', 'center' and 'right'
"""
bbox = self.get_window_extent(renderer)
l, b, w, h = bbox.bounds
# draw in center vertically
self._text.set_verticalalignment('center')
y = b + (h / 2.0)
# now position horizontally
if self._loc == 'center':
self._text.set_horizontalalignment('center')
x = l + (w / 2.0)
elif self._loc == 'left':
self._text.set_horizontalalignment('left')
x = l + (w * self.PAD)
else:
self._text.set_horizontalalignment('right')
x = l + (w * (1.0 - self.PAD))
self._text.set_position((x, y))
def get_text_bounds(self, renderer):
""" Get text bounds in axes co-ordinates. """
bbox = self._text.get_window_extent(renderer)
bboxa = bbox.inverse_transformed(self.get_data_transform())
return bboxa.bounds
def get_required_width(self, renderer):
""" Get width required for this cell. """
l, b, w, h = self.get_text_bounds(renderer)
return w * (1.0 + (2.0 * self.PAD))
def set_text_props(self, **kwargs):
'update the text properties with kwargs'
self._text.update(kwargs)
class Table(Artist):
"""
Create a table of cells.
Table can have (optional) row and column headers.
Each entry in the table can be either text or patches.
Column widths and row heights for the table can be specifified.
Return value is a sequence of text, line and patch instances that make
up the table
"""
codes = {'best': 0,
'upper right': 1, # default
'upper left': 2,
'lower left': 3,
'lower right': 4,
'center left': 5,
'center right': 6,
'lower center': 7,
'upper center': 8,
'center': 9,
'top right': 10,
'top left': 11,
'bottom left': 12,
'bottom right': 13,
'right': 14,
'left': 15,
'top': 16,
'bottom': 17,
}
FONTSIZE = 10
AXESPAD = 0.02 # the border between the axes and table edge
def __init__(self, ax, loc=None, bbox=None):
Artist.__init__(self)
if is_string_like(loc) and loc not in self.codes:
warnings.warn('Unrecognized location %s. Falling back on '
'bottom; valid locations are\n%s\t' %
(loc, '\n\t'.join(self.codes.iterkeys())))
loc = 'bottom'
if is_string_like(loc):
loc = self.codes.get(loc, 1)
self.set_figure(ax.figure)
self._axes = ax
self._loc = loc
self._bbox = bbox
# use axes coords
self.set_transform(ax.transAxes)
self._texts = []
self._cells = {}
self._autoRows = []
self._autoColumns = []
self._autoFontsize = True
self._cachedRenderer = None
def add_cell(self, row, col, *args, **kwargs):
""" Add a cell to the table. """
xy = (0, 0)
cell = Cell(xy, *args, **kwargs)
cell.set_figure(self.figure)
cell.set_transform(self.get_transform())
cell.set_clip_on(False)
self._cells[(row, col)] = cell
def _approx_text_height(self):
return (self.FONTSIZE / 72.0 * self.figure.dpi /
self._axes.bbox.height * 1.2)
@allow_rasterization
def draw(self, renderer):
# Need a renderer to do hit tests on mouseevent; assume the last one
# will do
if renderer is None:
renderer = self._cachedRenderer
if renderer is None:
raise RuntimeError('No renderer defined')
self._cachedRenderer = renderer
if not self.get_visible():
return
renderer.open_group('table')
self._update_positions(renderer)
keys = self._cells.keys()
keys.sort()
for key in keys:
self._cells[key].draw(renderer)
#for c in self._cells.itervalues():
# c.draw(renderer)
renderer.close_group('table')
def _get_grid_bbox(self, renderer):
"""Get a bbox, in axes co-ordinates for the cells.
Only include those in the range (0,0) to (maxRow, maxCol)"""
boxes = [self._cells[pos].get_window_extent(renderer)
for pos in self._cells.iterkeys()
if pos[0] >= 0 and pos[1] >= 0]
bbox = Bbox.union(boxes)
return bbox.inverse_transformed(self.get_transform())
def contains(self, mouseevent):
"""Test whether the mouse event occurred in the table.
Returns T/F, {}
"""
if callable(self._contains):
return self._contains(self, mouseevent)
# TODO: Return index of the cell containing the cursor so that the user
# doesn't have to bind to each one individually.
if self._cachedRenderer is not None:
boxes = [self._cells[pos].get_window_extent(self._cachedRenderer)
for pos in self._cells.iterkeys()
if pos[0] >= 0 and pos[1] >= 0]
bbox = Bbox.union(boxes)
return bbox.contains(mouseevent.x, mouseevent.y), {}
else:
return False, {}
def get_children(self):
'Return the Artists contained by the table'
return self._cells.values()
get_child_artists = get_children # backward compatibility
def get_window_extent(self, renderer):
'Return the bounding box of the table in window coords'
boxes = [cell.get_window_extent(renderer)
for cell in self._cells.values()]
return Bbox.union(boxes)
def _do_cell_alignment(self):
""" Calculate row heights and column widths.
Position cells accordingly.
"""
# Calculate row/column widths
widths = {}
heights = {}
for (row, col), cell in self._cells.iteritems():
height = heights.setdefault(row, 0.0)
heights[row] = max(height, cell.get_height())
width = widths.setdefault(col, 0.0)
widths[col] = max(width, cell.get_width())
# work out left position for each column
xpos = 0
lefts = {}
cols = widths.keys()
cols.sort()
for col in cols:
lefts[col] = xpos
xpos += widths[col]
ypos = 0
bottoms = {}
rows = heights.keys()
rows.sort()
rows.reverse()
for row in rows:
bottoms[row] = ypos
ypos += heights[row]
# set cell positions
for (row, col), cell in self._cells.iteritems():
cell.set_x(lefts[col])
cell.set_y(bottoms[row])
def auto_set_column_width(self, col):
self._autoColumns.append(col)
def _auto_set_column_width(self, col, renderer):
""" Automagically set width for column.
"""
cells = [key for key in self._cells if key[1] == col]
# find max width
width = 0
for cell in cells:
c = self._cells[cell]
width = max(c.get_required_width(renderer), width)
# Now set the widths
for cell in cells:
self._cells[cell].set_width(width)
def auto_set_font_size(self, value=True):
""" Automatically set font size. """
self._autoFontsize = value
def _auto_set_font_size(self, renderer):
if len(self._cells) == 0:
return
fontsize = self._cells.values()[0].get_fontsize()
cells = []
for key, cell in self._cells.iteritems():
# ignore auto-sized columns
if key[1] in self._autoColumns:
continue
size = cell.auto_set_font_size(renderer)
fontsize = min(fontsize, size)
cells.append(cell)
# now set all fontsizes equal
for cell in self._cells.itervalues():
cell.set_fontsize(fontsize)
def scale(self, xscale, yscale):
""" Scale column widths by xscale and row heights by yscale. """
for c in self._cells.itervalues():
c.set_width(c.get_width() * xscale)
c.set_height(c.get_height() * yscale)
def set_fontsize(self, size):
"""
Set the fontsize of the cell text
ACCEPTS: a float in points
"""
for cell in self._cells.itervalues():
cell.set_fontsize(size)
def _offset(self, ox, oy):
'Move all the artists by ox,oy (axes coords)'
for c in self._cells.itervalues():
x, y = c.get_x(), c.get_y()
c.set_x(x + ox)
c.set_y(y + oy)
def _update_positions(self, renderer):
# called from renderer to allow more precise estimates of
# widths and heights with get_window_extent
# Do any auto width setting
for col in self._autoColumns:
self._auto_set_column_width(col, renderer)
if self._autoFontsize:
self._auto_set_font_size(renderer)
# Align all the cells
self._do_cell_alignment()
bbox = self._get_grid_bbox(renderer)
l, b, w, h = bbox.bounds
if self._bbox is not None:
# Position according to bbox
rl, rb, rw, rh = self._bbox
self.scale(rw / w, rh / h)
ox = rl - l
oy = rb - b
self._do_cell_alignment()
else:
# Position using loc
(BEST, UR, UL, LL, LR, CL, CR, LC, UC, C,
TR, TL, BL, BR, R, L, T, B) = range(len(self.codes))
# defaults for center
ox = (0.5 - w / 2) - l
oy = (0.5 - h / 2) - b
if self._loc in (UL, LL, CL): # left
ox = self.AXESPAD - l
if self._loc in (BEST, UR, LR, R, CR): # right
ox = 1 - (l + w + self.AXESPAD)
if self._loc in (BEST, UR, UL, UC): # upper
oy = 1 - (b + h + self.AXESPAD)
if self._loc in (LL, LR, LC): # lower
oy = self.AXESPAD - b
if self._loc in (LC, UC, C): # center x
ox = (0.5 - w / 2) - l
if self._loc in (CL, CR, C): # center y
oy = (0.5 - h / 2) - b
if self._loc in (TL, BL, L): # out left
ox = - (l + w)
if self._loc in (TR, BR, R): # out right
ox = 1.0 - l
if self._loc in (TR, TL, T): # out top
oy = 1.0 - b
if self._loc in (BL, BR, B): # out bottom
oy = - (b + h)
self._offset(ox, oy)
def get_celld(self):
'return a dict of cells in the table'
return self._cells
def table(ax,
cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None):
"""
TABLE(cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None)
Factory function to generate a Table instance.
Thanks to John Gill for providing the class and table.
"""
# Check we have some cellText
if cellText is None:
# assume just colours are needed
rows = len(cellColours)
cols = len(cellColours[0])
cellText = [[''] * rows] * cols
rows = len(cellText)
cols = len(cellText[0])
for row in cellText:
assert len(row) == cols
if cellColours is not None:
assert len(cellColours) == rows
for row in cellColours:
assert len(row) == cols
else:
cellColours = ['w' * cols] * rows
# Set colwidths if not given
if colWidths is None:
colWidths = [1.0 / cols] * cols
# Check row and column labels
rowLabelWidth = 0
if rowLabels is None:
if rowColours is not None:
rowLabels = [''] * cols
rowLabelWidth = colWidths[0]
elif rowColours is None:
rowColours = 'w' * rows
if rowLabels is not None:
assert len(rowLabels) == rows
offset = 0
if colLabels is None:
if colColours is not None:
colLabels = [''] * rows
offset = 1
elif colColours is None:
colColours = 'w' * cols
offset = 1
if rowLabels is not None:
assert len(rowLabels) == rows
# Set up cell colours if not given
if cellColours is None:
cellColours = ['w' * cols] * rows
# Now create the table
table = Table(ax, loc, bbox)
height = table._approx_text_height()
# Add the cells
for row in xrange(rows):
for col in xrange(cols):
table.add_cell(row + offset, col,
width=colWidths[col], height=height,
text=cellText[row][col],
facecolor=cellColours[row][col],
loc=cellLoc)
# Do column labels
if colLabels is not None:
for col in xrange(cols):
table.add_cell(0, col,
width=colWidths[col], height=height,
text=colLabels[col], facecolor=colColours[col],
loc=colLoc)
# Do row labels
if rowLabels is not None:
for row in xrange(rows):
table.add_cell(row + offset, -1,
width=rowLabelWidth or 1e-15, height=height,
text=rowLabels[row], facecolor=rowColours[row],
loc=rowLoc)
if rowLabelWidth == 0:
table.auto_set_column_width(-1)
ax.add_table(table)
return table
docstring.interpd.update(Table=artist.kwdoc(Table))
| mit |
tienjunhsu/trading-with-python | sandbox/spreadCalculations.py | 78 | 1496 | '''
Created on 28 okt 2011
@author: jev
'''
from tradingWithPython import estimateBeta, Spread, returns, Portfolio, readBiggerScreener
from tradingWithPython.lib import yahooFinance
from pandas import DataFrame, Series
import numpy as np
import matplotlib.pyplot as plt
import os
symbols = ['SPY','IWM']
y = yahooFinance.HistData('temp.csv')
y.startDate = (2007,1,1)
df = y.loadSymbols(symbols,forceDownload=False)
#df = y.downloadData(symbols)
res = readBiggerScreener('CointPairs.csv')
#---check with spread scanner
#sp = DataFrame(index=symbols)
#
#sp['last'] = df.ix[-1,:]
#sp['targetCapital'] = Series({'SPY':100,'IWM':-100})
#sp['targetShares'] = sp['targetCapital']/sp['last']
#print sp
#The dollar-neutral ratio is about 1 * IWM - 1.7 * IWM. You will get the spread = zero (or probably very near zero)
#s = Spread(symbols, histClose = df)
#print s
#s.value.plot()
#print 'beta (returns)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='returns')
#print 'beta (log)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='log')
#print 'beta (standard)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='standard')
#p = Portfolio(df)
#p.setShares([1, -1.7])
#p.value.plot()
quote = yahooFinance.getQuote(symbols)
print quote
s = Spread(symbols,histClose=df, estimateBeta = False)
s.setLast(quote['last'])
s.setShares(Series({'SPY':1,'IWM':-1.7}))
print s
#s.value.plot()
#s.plot()
fig = figure(2)
s.plot()
| bsd-3-clause |
DTOcean/dtocean-core | tests/test_data_definitions_timetable.py | 1 | 7507 | import pytest
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from aneris.control.factory import InterfaceFactory
from dtocean_core.core import (AutoFileInput,
AutoFileOutput,
AutoPlot,
AutoQuery,
Core)
from dtocean_core.data import CoreMetaData
from dtocean_core.data.definitions import TimeTable, TimeTableColumn
def test_TimeTable_available():
new_core = Core()
all_objs = new_core.control._store._structures
assert "TimeTable" in all_objs.keys()
def test_TimeTable():
dates = []
dt = datetime(2010, 12, 01)
end = datetime(2010, 12, 02, 23, 59, 59)
step = timedelta(seconds=3600)
while dt < end:
dates.append(dt)
dt += step
values = np.random.rand(len(dates))
raw = {"DateTime": dates,
"a": values,
"b": values}
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"labels": ["a", "b"],
"units": ["kg", None]})
test = TimeTable()
a = test.get_data(raw, meta)
b = test.get_value(a)
assert "a" in b
assert len(b) == len(dates)
assert len(b.resample('D').mean()) == 2
def test_get_None():
test = TimeTable()
result = test.get_value(None)
assert result is None
@pytest.mark.parametrize("fext", [".csv", ".xls", ".xlsx"])
def test_TimeTable_auto_file(tmpdir, fext):
test_path = tmpdir.mkdir("sub").join("test{}".format(fext))
test_path_str = str(test_path)
dates = []
dt = datetime(2010, 12, 01)
end = datetime(2010, 12, 02, 23, 59, 59)
step = timedelta(seconds=3600)
while dt < end:
dates.append(dt)
dt += step
values = np.random.rand(len(dates))
raw = {"DateTime": dates,
"a": values,
"b": values}
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"labels": ["a", "b"],
"units": ["kg", None]})
test = TimeTable()
fout_factory = InterfaceFactory(AutoFileOutput)
FOutCls = fout_factory(meta, test)
fout = FOutCls()
fout._path = test_path_str
fout.data.result = test.get_data(raw, meta)
fout.connect()
assert len(tmpdir.listdir()) == 1
fin_factory = InterfaceFactory(AutoFileInput)
FInCls = fin_factory(meta, test)
fin = FInCls()
fin._path = test_path_str
fin.connect()
result = test.get_data(fin.data.result, meta)
assert "a" in result
assert len(result) == len(dates)
assert len(result.resample('D').mean()) == 2
def test_TimeTable_auto_plot(tmpdir):
dates = []
dt = datetime(2010, 12, 01)
end = datetime(2010, 12, 02, 23, 59, 59)
step = timedelta(seconds=3600)
while dt < end:
dates.append(dt)
dt += step
values = np.random.rand(len(dates))
raw = {"DateTime": dates,
"a": values,
"b": values}
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"labels": ["a", "b"],
"units": ["kg", None]})
test = TimeTable()
fout_factory = InterfaceFactory(AutoPlot)
PlotCls = fout_factory(meta, test)
plot = PlotCls()
plot.data.result = test.get_data(raw, meta)
plot.meta.result = meta
plot.connect()
assert len(plt.get_fignums()) == 1
plt.close("all")
def test_TimeTableColumn_available():
new_core = Core()
all_objs = new_core.control._store._structures
assert "TimeTableColumn" in all_objs.keys()
def test_TimeTableColumn_auto_db(mocker):
dates = []
dt = datetime(2010, 12, 01)
end = datetime(2010, 12, 02, 23, 59, 59)
step = timedelta(seconds=3600)
while dt < end:
dates.append(dt)
dt += step
values = np.random.rand(len(dates))
mock_dict = {"date": [x.date() for x in dates],
"time": [x.time() for x in dates],
"a": values,
"b": values}
mock_df = pd.DataFrame(mock_dict)
mocker.patch('dtocean_core.data.definitions.get_table_df',
return_value=mock_df,
autospec=True)
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"labels": ["a", "b"],
"units": ["kg", None],
"tables": ["mock.mock", "date", "time", "a", "b"]})
test = TimeTableColumn()
query_factory = InterfaceFactory(AutoQuery)
QueryCls = query_factory(meta, test)
query = QueryCls()
query.meta.result = meta
query.connect()
result = test.get_data(query.data.result, meta)
assert "a" in result
assert len(result) == len(dates)
assert len(result.resample('D').mean()) == 2
def test_TimeSeriesColumn_auto_db_empty(mocker):
mock_dict = {"date": [],
"time": [],
"a": [],
"b": []}
mock_df = pd.DataFrame(mock_dict)
mocker.patch('dtocean_core.data.definitions.get_table_df',
return_value=mock_df,
autospec=True)
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"labels": ["a", "b"],
"units": ["kg", None],
"tables": ["mock.mock", "date", "time", "a", "b"]})
test = TimeTableColumn()
query_factory = InterfaceFactory(AutoQuery)
QueryCls = query_factory(meta, test)
query = QueryCls()
query.meta.result = meta
query.connect()
assert query.data.result is None
def test_TimeSeriesColumn_auto_db_none(mocker):
dates = []
dt = datetime(2010, 12, 01)
end = datetime(2010, 12, 02, 23, 59, 59)
step = timedelta(seconds=3600)
while dt < end:
dates.append(dt)
dt += step
values = np.random.rand(len(dates))
mock_dict = {"date": [None] * len(dates),
"time": [x.time() for x in dates],
"a": values,
"b": values}
mock_df = pd.DataFrame(mock_dict)
mocker.patch('dtocean_core.data.definitions.get_table_df',
return_value=mock_df,
autospec=True)
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"labels": ["a", "b"],
"units": ["kg", None],
"tables": ["mock.mock", "date", "time", "a", "b"]})
test = TimeTableColumn()
query_factory = InterfaceFactory(AutoQuery)
QueryCls = query_factory(meta, test)
query = QueryCls()
query.meta.result = meta
query.connect()
assert query.data.result is None
| gpl-3.0 |
kevinyu98/spark | python/pyspark/sql/tests/test_pandas_map.py | 8 | 4346 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import time
import unittest
if sys.version >= '3':
unicode = str
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
if have_pandas:
import pandas as pd
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
class MapInPandasTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
ReusedSQLTestCase.setUpClass()
# Synchronize default timezone between Python and Java
cls.tz_prev = os.environ.get("TZ", None) # save current tz if set
tz = "America/Los_Angeles"
os.environ["TZ"] = tz
time.tzset()
cls.sc.environment["TZ"] = tz
cls.spark.conf.set("spark.sql.session.timeZone", tz)
@classmethod
def tearDownClass(cls):
del os.environ["TZ"]
if cls.tz_prev is not None:
os.environ["TZ"] = cls.tz_prev
time.tzset()
ReusedSQLTestCase.tearDownClass()
def test_map_partitions_in_pandas(self):
def func(iterator):
for pdf in iterator:
assert isinstance(pdf, pd.DataFrame)
assert pdf.columns == ['id']
yield pdf
df = self.spark.range(10)
actual = df.mapInPandas(func, 'id long').collect()
expected = df.collect()
self.assertEquals(actual, expected)
def test_multiple_columns(self):
data = [(1, "foo"), (2, None), (3, "bar"), (4, "bar")]
df = self.spark.createDataFrame(data, "a int, b string")
def func(iterator):
for pdf in iterator:
assert isinstance(pdf, pd.DataFrame)
assert [d.name for d in list(pdf.dtypes)] == ['int32', 'object']
yield pdf
actual = df.mapInPandas(func, df.schema).collect()
expected = df.collect()
self.assertEquals(actual, expected)
def test_different_output_length(self):
def func(iterator):
for _ in iterator:
yield pd.DataFrame({'a': list(range(100))})
df = self.spark.range(10)
actual = df.repartition(1).mapInPandas(func, 'a long').collect()
self.assertEquals(set((r.a for r in actual)), set(range(100)))
def test_empty_iterator(self):
def empty_iter(_):
return iter([])
self.assertEqual(
self.spark.range(10).mapInPandas(empty_iter, 'a int, b string').count(), 0)
def test_empty_rows(self):
def empty_rows(_):
return iter([pd.DataFrame({'a': []})])
self.assertEqual(
self.spark.range(10).mapInPandas(empty_rows, 'a int').count(), 0)
def test_chain_map_partitions_in_pandas(self):
def func(iterator):
for pdf in iterator:
assert isinstance(pdf, pd.DataFrame)
assert pdf.columns == ['id']
yield pdf
df = self.spark.range(10)
actual = df.mapInPandas(func, 'id long').mapInPandas(func, 'id long').collect()
expected = df.collect()
self.assertEquals(actual, expected)
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_map import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
cpbl/cpblUtilities | matplotlib_utils.py | 1 | 7242 | #!/usr/bin/python
import matplotlib.pyplot as plt
def prepare_figure_for_publication(ax=None,
width_cm=None,
width_inches=None,
height_cm=None,
height_inches=None,
fontsize=None,
fontsize_labels=None,
fontsize_ticklabels=None,
fontsize_legend=None,
fontsize_annotations =None,
TeX = True, # Used for ax=None case (setup)
):
"""
Two ways to use this:
(1) Before creating a figure, with ax=None
(2) To fine-tune a figure, using ax
One reasonable option for making compact figures like for Science/Nature is to create everything at double scale.
This works a little more naturally with Matplotlib's default line/axis/etc sizes.
Also, if you change sizes of, e.g. xticklabels and x-axis labels after they've been created, they will not necessarily be relocated appropriately.
So you can call prepare_figure_for_publication with no ax/fig argument to set up figure defaults
prior to creating the figure in the first place.
Some wisdom on graphics:
- 2015: How to produce PDFs of a given width, with chosen font size, etc:
(1) Fix width to journal specifications from the beginning / early. Adjust height as you go, according to preferences for aspect ratio:
figure(figsize=(11.4/2.54, chosen height))
(2) Do not use 'bbox_inches="tight"' in savefig('fn.pdf'). Instead, use the subplot_adjust options to manually adjust edges to get the figure content to fit in the PDF output
(3) Be satisfied with that. If you must get something exactly tight and exactly the right size, you do this in Inkscape. But you cannot scale the content and bbox in the same step. Load PDF, select all, choose the units in the box at the top of the main menu bar, click on the lock htere, set the width. Then, in File Properties dialog, resize file to content. Save.
"""
if ax is None: # Set up plot settings, prior to creation fo a figure
params = { 'axes.labelsize': fontsize_labels if fontsize_labels is not None else fontsize,
'font.size': fontsize,
'legend.fontsize': fontsize_legend if fontsize_legend is not None else fontsize,
'xtick.labelsize': fontsize_ticklabels if fontsize_ticklabels is not None else fontsize_labels if fontsize_labels is not None else fontsize,
'ytick.labelsize': fontsize_ticklabels if fontsize_ticklabels is not None else fontsize_labels if fontsize_labels is not None else fontsize,
'figure.figsize': (width_inches, height_inches),
}
if TeX:
params.update({
'text.usetex': TeX,
'text.latex.preamble': r'\usepackage{amsmath} \usepackage{amssymb}',
'text.latex.unicode': True,
})
if not TeX:
params.update({'text.latex.preamble':''})
plt.rcParams.update(params)
return
fig = ax.get_figure()
if width_inches:
fig.set_figwidth(width_inches)
assert width_cm is None
if height_inches:
fig.set_figheight(height_inches)
assert height_cm is None
if width_cm:
fig.set_figwidth(width_cm/2.54)
assert width_inches is None
if height_cm:
fig.set_figheight(height_cm/2.54)
assert height_inches is None
#ax = plt.subplot(111, xlabel='x', ylabel='y', title='title')
for item in fig.findobj(plt.Text) + [ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels():
if fontsize:
item.set_fontsize(fontsize)
def plot_diagonal(xdata=None, ydata=None, ax=None, **args):
""" Plot a 45-degree line
"""
import pandas as pd
if ax is None: ax = plt.gca()
#LL = min(min(df[xv]), min(df[yv])), max(max(df[xv]), max(df[yv]))
if xdata is None and ydata is None:
xl, yl = ax.get_xlim(), ax.get_ylim()
LL = max(min(xl), min(yl)), min(max(xl), max(yl)),
elif xdata is not None and ydata is None:
assert isinstance(xdata, pd.DataFrame)
dd = xdata.dropna()
LL = dd.min().max(), dd.max().min()
else:
assert xdata is not None
assert ydata is not None
#if isinstance(xdata, pd.Series): xdata = xdata.vlu
xl, yl = xdata, ydata
LL = max(min(xl), min(yl)), min(max(xl), max(yl)),
ax.plot(LL, LL, **args)
def figureFontSetup(uniform=12,figsize='paper', amsmath=True):
"""
This is deprecated. Use prepare_figure_for_publication
Set font size settings for matplotlib figures so that they are reasonable for exporting to PDF to use in publications / presentations..... [different!]
If not for paper, this is not yet useful.
Here are some good sizes for paper:
figure(468,figsize=(4.6,2)) # in inches
figureFontSetup(uniform=12) # 12 pt font
for a subplot(211)
or for a single plot (?)
figure(127,figsize=(4.6,4)) # in inches. Only works if figure is not open from last run!
why does the following not work to deal with the bad bounding-box size problem?!
inkscape -f GSSseries-happyLife-QC-bw.pdf --verb=FitCanvasToDrawing -A tmp.pdf .: Due to inkscape cli sucks! bug.
--> See savefigall for an inkscape implementation.
2012 May: new matplotlib has tight_layout(). But it rejigs all subplots etc. My inkscape solution is much better, since it doesn't change the layout. Hoewever, it does mean that the original size is not respected! ... Still, my favourite way from now on to make figures is to append the font size setting to the name, ie to make one for a given intended final size, and to do no resaling in LaTeX. Use tight_layout() if it looks okay, but the inkscape solution in general.
n.b. a clf() erases size settings on a figure!
"""
figsizelookup={'paper':(4.6,4),'quarter':(1.25,1) ,None:None}
try:
figsize=figsizelookup[figsize]
except KeyError,TypeError:
pass
params = {#'backend': 'ps',
'axes.labelsize': 16,
#'text.fontsize': 14,
'font.size': 14,
'legend.fontsize': 10,
'xtick.labelsize': 16,
'ytick.labelsize': 16,
'text.usetex': True,
'figure.figsize': figsize
}
#'figure.figsize': fig_size}
if uniform is not None:
assert isinstance(uniform,int)
params = {#'backend': 'ps',
'axes.labelsize': uniform,
#'text.fontsize': uniform,
'font.size': uniform,
'legend.fontsize': uniform,
'xtick.labelsize': uniform,
'ytick.labelsize': uniform,
'text.usetex': True,
'text.latex.unicode': True,
'text.latex.preamble':r'\usepackage{amsmath},\usepackage{amssymb}',
'figure.figsize': figsize
}
if not amsmath:
params.update({'text.latex.preamble':''})
plt.rcParams.update(params)
plt.rcParams['text.latex.unicode']=True
#if figsize:
# plt.rcParams[figure.figsize]={'paper':(4.6,4)}[figsize]
return(params)
| gpl-3.0 |
hayd/SimpleCV | SimpleCV/Features/Features.py | 10 | 68992 | # SimpleCV Feature library
#
# Tools return basic features in feature sets
# # x = 0.00
# y = 0.00
# _mMaxX = None
# _mMaxY = None
# _mMinX = None
# _mMinY = None
# _mWidth = None
# _mHeight = None
# _mSrcImgW = None
# mSrcImgH = None
#load system libraries
from SimpleCV.base import *
from SimpleCV.Color import *
import copy
class FeatureSet(list):
"""
**SUMMARY**
FeatureSet is a class extended from Python's list which has special functions so that it is useful for handling feature metadata on an image.
In general, functions dealing with attributes will return numpy arrays, and functions dealing with sorting or filtering will return new FeatureSets.
**EXAMPLE**
>>> image = Image("/path/to/image.png")
>>> lines = image.findLines() #lines are the feature set
>>> lines.draw()
>>> lines.x()
>>> lines.crop()
"""
def __getitem__(self,key):
"""
**SUMMARY**
Returns a FeatureSet when sliced. Previously used to
return list. Now it is possible to use FeatureSet member
functions on sub-lists
"""
if type(key) is types.SliceType: #Or can use 'try:' for speed
return FeatureSet(list.__getitem__(self, key))
else:
return list.__getitem__(self,key)
def __getslice__(self, i, j):
"""
Deprecated since python 2.0, now using __getitem__
"""
return self.__getitem__(slice(i,j))
def count(self):
'''
This function returns the length / count of the all the items in the FeatureSet
'''
return len(self)
def draw(self, color = Color.GREEN,width=1, autocolor = False, alpha=-1):
"""
**SUMMARY**
Call the draw() method on each feature in the FeatureSet.
**PARAMETERS**
* *color* - The color to draw the object. Either an BGR tuple or a member of the :py:class:`Color` class.
* *width* - The width to draw the feature in pixels. A value of -1 usually indicates a filled region.
* *autocolor* - If true a color is randomly selected for each feature.
**RETURNS**
Nada. Nothing. Zilch.
**EXAMPLE**
>>> img = Image("lenna")
>>> feats = img.findBlobs()
>>> feats.draw(color=Color.PUCE, width=3)
>>> img.show()
"""
for f in self:
if(autocolor):
color = Color().getRandom()
if alpha != -1:
f.draw(color=color,width=width,alpha=alpha)
else:
f.draw(color=color,width=width)
def show(self, color = Color.GREEN, autocolor = False,width=1):
"""
**EXAMPLE**
This function will automatically draw the features on the image and show it.
It is a basically a shortcut function for development and is the same as:
**PARAMETERS**
* *color* - The color to draw the object. Either an BGR tuple or a member of the :py:class:`Color` class.
* *width* - The width to draw the feature in pixels. A value of -1 usually indicates a filled region.
* *autocolor* - If true a color is randomly selected for each feature.
**RETURNS**
Nada. Nothing. Zilch.
**EXAMPLE**
>>> img = Image("logo")
>>> feat = img.findBlobs()
>>> if feat: feat.draw()
>>> img.show()
"""
self.draw(color, width, autocolor)
self[-1].image.show()
def reassignImage(self, newImg):
"""
**SUMMARY**
Return a new featureset where the features are assigned to a new image.
**PARAMETERS**
* *img* - the new image to which to assign the feature.
.. Warning::
THIS DOES NOT PERFORM A SIZE CHECK. IF YOUR NEW IMAGE IS NOT THE EXACT SAME SIZE YOU WILL CERTAINLY CAUSE ERRORS.
**EXAMPLE**
>>> img = Image("lenna")
>>> img2 = img.invert()
>>> l = img.findLines()
>>> l2 = img.reassignImage(img2)
>>> l2.show()
"""
retVal = FeatureSet()
for i in self:
retVal.append(i.reassign(newImg))
return retVal
def x(self):
"""
**SUMMARY**
Returns a numpy array of the x (horizontal) coordinate of each feature.
**RETURNS**
A numpy array.
**EXAMPLE**
>>> img = Image("lenna")
>>> feats = img.findBlobs()
>>> xs = feats.x()
>>> print xs
"""
return np.array([f.x for f in self])
def y(self):
"""
**SUMMARY**
Returns a numpy array of the y (vertical) coordinate of each feature.
**RETURNS**
A numpy array.
**EXAMPLE**
>>> img = Image("lenna")
>>> feats = img.findBlobs()
>>> xs = feats.y()
>>> print xs
"""
return np.array([f.y for f in self])
def coordinates(self):
"""
**SUMMARY**
Returns a 2d numpy array of the x,y coordinates of each feature. This
is particularly useful if you want to use Scipy's Spatial Distance module
**RETURNS**
A numpy array of all the positions in the featureset.
**EXAMPLE**
>>> img = Image("lenna")
>>> feats = img.findBlobs()
>>> xs = feats.coordinates()
>>> print xs
"""
return np.array([[f.x, f.y] for f in self])
def center(self):
return self.coordinates()
def area(self):
"""
**SUMMARY**
Returns a numpy array of the area of each feature in pixels.
**RETURNS**
A numpy array of all the positions in the featureset.
**EXAMPLE**
>>> img = Image("lenna")
>>> feats = img.findBlobs()
>>> xs = feats.area()
>>> print xs
"""
return np.array([f.area() for f in self])
def sortArea(self):
"""
**SUMMARY**
Returns a new FeatureSet, with the largest area features first.
**RETURNS**
A featureset sorted based on area.
**EXAMPLE**
>>> img = Image("lenna")
>>> feats = img.findBlobs()
>>> feats = feats.sortArea()
>>> print feats[-1] # biggest blob
>>> print feats[0] # smallest blob
"""
return FeatureSet(sorted(self, key = lambda f: f.area()))
def sortX(self):
"""
**SUMMARY**
Returns a new FeatureSet, with the smallest x coordinates features first.
**RETURNS**
A featureset sorted based on area.
**EXAMPLE**
>>> img = Image("lenna")
>>> feats = img.findBlobs()
>>> feats = feats.sortX()
>>> print feats[-1] # biggest blob
>>> print feats[0] # smallest blob
"""
return FeatureSet(sorted(self, key = lambda f: f.x))
def sortY(self):
"""
**SUMMARY**
Returns a new FeatureSet, with the smallest y coordinates features first.
**RETURNS**
A featureset sorted based on area.
**EXAMPLE**
>>> img = Image("lenna")
>>> feats = img.findBlobs()
>>> feats = feats.sortY()
>>> print feats[-1] # biggest blob
>>> print feats[0] # smallest blob
"""
return FeatureSet(sorted(self, key = lambda f: f.y))
def distanceFrom(self, point = (-1, -1)):
"""
**SUMMARY**
Returns a numpy array of the distance each Feature is from a given coordinate.
Default is the center of the image.
**PARAMETERS**
* *point* - A point on the image from which we will calculate distance.
**RETURNS**
A numpy array of distance values.
**EXAMPLE**
>>> img = Image("lenna")
>>> feats = img.findBlobs()
>>> d = feats.distanceFrom()
>>> d[0] #show the 0th blobs distance to the center.
**TO DO**
Make this accept other features to measure from.
"""
if (point[0] == -1 or point[1] == -1 and len(self)):
point = self[0].image.size()
return spsd.cdist(self.coordinates(), [point])[:,0]
def sortDistance(self, point = (-1, -1)):
"""
**SUMMARY**
Returns a sorted FeatureSet with the features closest to a given coordinate first.
Default is from the center of the image.
**PARAMETERS**
* *point* - A point on the image from which we will calculate distance.
**RETURNS**
A numpy array of distance values.
**EXAMPLE**
>>> img = Image("lenna")
>>> feats = img.findBlobs()
>>> d = feats.sortDistance()
>>> d[-1].show() #show the 0th blobs distance to the center.
"""
return FeatureSet(sorted(self, key = lambda f: f.distanceFrom(point)))
def distancePairs(self):
"""
**SUMMARY**
Returns the square-form of pairwise distances for the featureset.
The resulting N x N array can be used to quickly look up distances
between features.
**RETURNS**
A NxN np matrix of distance values.
**EXAMPLE**
>>> img = Image("lenna")
>>> feats = img.findBlobs()
>>> d = feats.distancePairs()
>>> print d
"""
return spsd.squareform(spsd.pdist(self.coordinates()))
def angle(self):
"""
**SUMMARY**
Return a numpy array of the angles (theta) of each feature.
Note that theta is given in degrees, with 0 being horizontal.
**RETURNS**
An array of angle values corresponding to the features.
**EXAMPLE**
>>> img = Image("lenna")
>>> l = img.findLines()
>>> angs = l.angle()
>>> print angs
"""
return np.array([f.angle() for f in self])
def sortAngle(self, theta = 0):
"""
Return a sorted FeatureSet with the features closest to a given angle first.
Note that theta is given in radians, with 0 being horizontal.
**RETURNS**
An array of angle values corresponding to the features.
**EXAMPLE**
>>> img = Image("lenna")
>>> l = img.findLines()
>>> l = l.sortAngle()
>>> print angs
"""
return FeatureSet(sorted(self, key = lambda f: abs(f.angle() - theta)))
def length(self):
"""
**SUMMARY**
Return a numpy array of the length (longest dimension) of each feature.
**RETURNS**
A numpy array of the length, in pixels, of eatch feature object.
**EXAMPLE**
>>> img = Image("Lenna")
>>> l = img.findLines()
>>> lengt = l.length()
>>> lengt[0] # length of the 0th element.
"""
return np.array([f.length() for f in self])
def sortLength(self):
"""
**SUMMARY**
Return a sorted FeatureSet with the longest features first.
**RETURNS**
A sorted FeatureSet.
**EXAMPLE**
>>> img = Image("Lenna")
>>> l = img.findLines().sortLength()
>>> lengt[-1] # length of the 0th element.
"""
return FeatureSet(sorted(self, key = lambda f: f.length()))
def meanColor(self):
"""
**SUMMARY**
Return a numpy array of the average color of the area covered by each Feature.
**RETURNS**
Returns an array of RGB triplets the correspond to the mean color of the feature.
**EXAMPLE**
>>> img = Image("lenna")
>>> kp = img.findKeypoints()
>>> c = kp.meanColor()
"""
return np.array([f.meanColor() for f in self])
def colorDistance(self, color = (0, 0, 0)):
"""
**SUMMARY**
Return a numpy array of the distance each features average color is from
a given color tuple (default black, so colorDistance() returns intensity)
**PARAMETERS**
* *color* - The color to calculate the distance from.
**RETURNS**
The distance of the average color for the feature from given color as a numpy array.
**EXAMPLE**
>>> img = Image("lenna")
>>> circs = img.findCircle()
>>> d = circs.colorDistance(color=Color.BLUE)
>>> print d
"""
return spsd.cdist(self.meanColor(), [color])[:,0]
def sortColorDistance(self, color = (0, 0, 0)):
"""
Return a sorted FeatureSet with features closest to a given color first.
Default is black, so sortColorDistance() will return darkest to brightest
"""
return FeatureSet(sorted(self, key = lambda f: f.colorDistance(color)))
def filter(self, filterarray):
"""
**SUMMARY**
Return a FeatureSet which is filtered on a numpy boolean array. This
will let you use the attribute functions to easily screen Features out
of return FeatureSets.
**PARAMETERS**
* *filterarray* - A numpy array, matching the size of the feature set,
made of Boolean values, we return the true values and reject the False value.
**RETURNS**
The revised feature set.
**EXAMPLE**
Return all lines < 200px
>>> my_lines.filter(my_lines.length() < 200) # returns all lines < 200px
>>> my_blobs.filter(my_blobs.area() > 0.9 * my_blobs.length**2) # returns blobs that are nearly square
>>> my_lines.filter(abs(my_lines.angle()) < numpy.pi / 4) #any lines within 45 degrees of horizontal
>>> my_corners.filter(my_corners.x() - my_corners.y() > 0) #only return corners in the upper diagonal of the image
"""
return FeatureSet(list(np.array(self)[np.array(filterarray)]))
def width(self):
"""
**SUMMARY**
Returns a nparray which is the width of all the objects in the FeatureSet.
**RETURNS**
A numpy array of width values.
**EXAMPLE**
>>> img = Image("NotLenna")
>>> l = img.findLines()
>>> l.width()
"""
return np.array([f.width() for f in self])
def height(self):
"""
Returns a nparray which is the height of all the objects in the FeatureSet
**RETURNS**
A numpy array of width values.
**EXAMPLE**
>>> img = Image("NotLenna")
>>> l = img.findLines()
>>> l.height()
"""
return np.array([f.height() for f in self])
def crop(self):
"""
**SUMMARY**
Returns a nparray with the cropped features as SimpleCV image.
**RETURNS**
A SimpleCV image cropped to each image.
**EXAMPLE**
>>> img = Image("Lenna")
>>> blobs = img.findBlobs(128)
>>> for b in blobs:
>>> newImg = b.crop()
>>> newImg.show()
>>> time.sleep(1)
"""
return np.array([f.crop() for f in self])
def inside(self,region):
"""
**SUMMARY**
Return only the features inside the region. where region can be a bounding box,
bounding circle, a list of tuples in a closed polygon, or any other featutres.
**PARAMETERS**
* *region*
* A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
* A bounding circle of the form (x,y,r)
* A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
* Any two dimensional feature (e.g. blobs, circle ...)
**RETURNS**
Returns a featureset of features that are inside the region.
**EXAMPLE**
>>> img = Image("Lenna")
>>> blobs = img.findBlobs()
>>> b = blobs[-1]
>>> lines = img.findLines()
>>> inside = lines.inside(b)
**NOTE**
This currently performs a bounding box test, not a full polygon test for speed.
"""
fs = FeatureSet()
for f in self:
if(f.isContainedWithin(region)):
fs.append(f)
return fs
def outside(self,region):
"""
**SUMMARY**
Return only the features outside the region. where region can be a bounding box,
bounding circle, a list of tuples in a closed polygon, or any other featutres.
**PARAMETERS**
* *region*
* A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
* A bounding circle of the form (x,y,r)
* A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
* Any two dimensional feature (e.g. blobs, circle ...)
**RETURNS**
Returns a featureset of features that are outside the region.
**EXAMPLE**
>>> img = Image("Lenna")
>>> blobs = img.findBlobs()
>>> b = blobs[-1]
>>> lines = img.findLines()
>>> outside = lines.outside(b)
**NOTE**
This currently performs a bounding box test, not a full polygon test for speed.
"""
fs = FeatureSet()
for f in self:
if(f.isNotContainedWithin(region)):
fs.append(f)
return fs
def overlaps(self,region):
"""
**SUMMARY**
Return only the features that overlap or the region. Where region can be a bounding box,
bounding circle, a list of tuples in a closed polygon, or any other featutres.
**PARAMETERS**
* *region*
* A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
* A bounding circle of the form (x,y,r)
* A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
* Any two dimensional feature (e.g. blobs, circle ...)
**RETURNS**
Returns a featureset of features that overlap the region.
**EXAMPLE**
>>> img = Image("Lenna")
>>> blobs = img.findBlobs()
>>> b = blobs[-1]
>>> lines = img.findLines()
>>> outside = lines.overlaps(b)
**NOTE**
This currently performs a bounding box test, not a full polygon test for speed.
"""
fs = FeatureSet()
for f in self:
if( f.overlaps(region) ):
fs.append(f)
return fs
def above(self,region):
"""
**SUMMARY**
Return only the features that are above a region. Where region can be a bounding box,
bounding circle, a list of tuples in a closed polygon, or any other featutres.
**PARAMETERS**
* *region*
* A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
* A bounding circle of the form (x,y,r)
* A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
* Any two dimensional feature (e.g. blobs, circle ...)
**RETURNS**
Returns a featureset of features that are above the region.
**EXAMPLE**
>>> img = Image("Lenna")
>>> blobs = img.findBlobs()
>>> b = blobs[-1]
>>> lines = img.findLines()
>>> outside = lines.above(b)
**NOTE**
This currently performs a bounding box test, not a full polygon test for speed.
"""
fs = FeatureSet()
for f in self:
if(f.above(region)):
fs.append(f)
return fs
def below(self,region):
"""
**SUMMARY**
Return only the features below the region. where region can be a bounding box,
bounding circle, a list of tuples in a closed polygon, or any other featutres.
**PARAMETERS**
* *region*
* A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
* A bounding circle of the form (x,y,r)
* A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
* Any two dimensional feature (e.g. blobs, circle ...)
**RETURNS**
Returns a featureset of features that are below the region.
**EXAMPLE**
>>> img = Image("Lenna")
>>> blobs = img.findBlobs()
>>> b = blobs[-1]
>>> lines = img.findLines()
>>> inside = lines.below(b)
**NOTE**
This currently performs a bounding box test, not a full polygon test for speed.
"""
fs = FeatureSet()
for f in self:
if(f.below(region)):
fs.append(f)
return fs
def left(self,region):
"""
**SUMMARY**
Return only the features left of the region. where region can be a bounding box,
bounding circle, a list of tuples in a closed polygon, or any other featutres.
**PARAMETERS**
* *region*
* A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
* A bounding circle of the form (x,y,r)
* A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
* Any two dimensional feature (e.g. blobs, circle ...)
**RETURNS**
Returns a featureset of features that are left of the region.
**EXAMPLE**
>>> img = Image("Lenna")
>>> blobs = img.findBlobs()
>>> b = blobs[-1]
>>> lines = img.findLines()
>>> left = lines.left(b)
**NOTE**
This currently performs a bounding box test, not a full polygon test for speed.
"""
fs = FeatureSet()
for f in self:
if(f.left(region)):
fs.append(f)
return fs
def right(self,region):
"""
**SUMMARY**
Return only the features right of the region. where region can be a bounding box,
bounding circle, a list of tuples in a closed polygon, or any other featutres.
**PARAMETERS**
* *region*
* A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
* A bounding circle of the form (x,y,r)
* A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
* Any two dimensional feature (e.g. blobs, circle ...)
**RETURNS**
Returns a featureset of features that are right of the region.
**EXAMPLE**
>>> img = Image("Lenna")
>>> blobs = img.findBlobs()
>>> b = blobs[-1]
>>> lines = img.findLines()
>>> right = lines.right(b)
**NOTE**
This currently performs a bounding box test, not a full polygon test for speed.
"""
fs = FeatureSet()
for f in self:
if(f.right(region)):
fs.append(f)
return fs
def onImageEdge(self, tolerance=1):
"""
**SUMMARY**
The method returns a feature set of features that are on or "near" the edge of
the image. This is really helpful for removing features that are edge effects.
**PARAMETERS**
* *tolerance* - the distance in pixels from the edge at which a feature
qualifies as being "on" the edge of the image.
**RETURNS**
Returns a featureset of features that are on the edge of the image.
**EXAMPLE**
>>> img = Image("./sampleimages/EdgeTest1.png")
>>> blobs = img.findBlobs()
>>> es = blobs.onImageEdge()
>>> es.draw(color=Color.RED)
>>> img.show()
"""
fs = FeatureSet()
for f in self:
if(f.onImageEdge(tolerance)):
fs.append(f)
return fs
def notOnImageEdge(self, tolerance=1):
"""
**SUMMARY**
The method returns a feature set of features that are not on or "near" the edge of
the image. This is really helpful for removing features that are edge effects.
**PARAMETERS**
* *tolerance* - the distance in pixels from the edge at which a feature
qualifies as being "on" the edge of the image.
**RETURNS**
Returns a featureset of features that are not on the edge of the image.
**EXAMPLE**
>>> img = Image("./sampleimages/EdgeTest1.png")
>>> blobs = img.findBlobs()
>>> es = blobs.notOnImageEdge()
>>> es.draw(color=Color.RED)
>>> img.show()
"""
fs = FeatureSet()
for f in self:
if(f.notOnImageEdge(tolerance)):
fs.append(f)
return fs
def topLeftCorners(self):
"""
**SUMMARY**
This method returns the top left corner of each feature's bounding box.
**RETURNS**
A numpy array of x,y position values.
**EXAMPLE**
>>> img = Image("./sampleimages/EdgeTest1.png")
>>> blobs = img.findBlobs()
>>> tl = img.topLeftCorners()
>>> print tl[0]
"""
return np.array([f.topLeftCorner() for f in self])
def bottomLeftCorners(self):
"""
**SUMMARY**
This method returns the bottom left corner of each feature's bounding box.
**RETURNS**
A numpy array of x,y position values.
**EXAMPLE**
>>> img = Image("./sampleimages/EdgeTest1.png")
>>> blobs = img.findBlobs()
>>> bl = img.bottomLeftCorners()
>>> print bl[0]
"""
return np.array([f.bottomLeftCorner() for f in self])
def topLeftCorners(self):
"""
**SUMMARY**
This method returns the top left corner of each feature's bounding box.
**RETURNS**
A numpy array of x,y position values.
**EXAMPLE**
>>> img = Image("./sampleimages/EdgeTest1.png")
>>> blobs = img.findBlobs()
>>> tl = img.bottomLeftCorners()
>>> print tl[0]
"""
return np.array([f.topLeftCorner() for f in self])
def topRightCorners(self):
"""
**SUMMARY**
This method returns the top right corner of each feature's bounding box.
**RETURNS**
A numpy array of x,y position values.
**EXAMPLE**
>>> img = Image("./sampleimages/EdgeTest1.png")
>>> blobs = img.findBlobs()
>>> tr = img.topRightCorners()
>>> print tr[0]
"""
return np.array([f.topRightCorner() for f in self])
def bottomRightCorners(self):
"""
**SUMMARY**
This method returns the bottom right corner of each feature's bounding box.
**RETURNS**
A numpy array of x,y position values.
**EXAMPLE**
>>> img = Image("./sampleimages/EdgeTest1.png")
>>> blobs = img.findBlobs()
>>> br = img.bottomRightCorners()
>>> print br[0]
"""
return np.array([f.bottomRightCorner() for f in self])
def aspectRatios(self):
"""
**SUMMARY**
Return the aspect ratio of all the features in the feature set, For our purposes
aspect ration is max(width,height)/min(width,height).
**RETURNS**
A numpy array of the aspect ratio of the features in the featureset.
**EXAMPLE**
>>> img = Image("OWS.jpg")
>>> blobs = img.findBlobs(128)
>>> print blobs.aspectRatio()
"""
return np.array([f.aspectRatio() for f in self])
def cluster(self,method="kmeans",properties=None,k=3):
"""
**SUMMARY**
This function clusters the blobs in the featureSet based on the properties. Properties can be "color", "shape" or "position" of blobs.
Clustering is done using K-Means or Hierarchical clustering(Ward) algorithm.
**PARAMETERS**
* *properties* - It should be a list with any combination of "color", "shape", "position". properties = ["color","position"]. properties = ["position","shape"]. properties = ["shape"]
* *method* - if method is "kmeans", it will cluster using K-Means algorithm, if the method is "hierarchical", no need to spicify the number of clusters
* *k* - The number of clusters(kmeans).
**RETURNS**
A list of featureset, each being a cluster itself.
**EXAMPLE**
>>> img = Image("lenna")
>>> blobs = img.findBlobs()
>>> clusters = blobs.cluster(method="kmeans",properties=["color"],k=5)
>>> for i in clusters:
>>> i.draw(color=Color.getRandom(),width=5)
>>> img.show()
"""
try :
from sklearn.cluster import KMeans, Ward
from sklearn import __version__
except :
logger.warning("install scikits-learning package")
return
X = [] #List of feature vector of each blob
if not properties:
properties = ['color','shape','position']
if k > len(self):
logger.warning("Number of clusters cannot be greater then the number of blobs in the featureset")
return
for i in self:
featureVector = []
if 'color' in properties:
featureVector.extend(i.mAvgColor)
if 'shape' in properties:
featureVector.extend(i.mHu)
if 'position' in properties:
featureVector.extend(i.extents())
if not featureVector :
logger.warning("properties parameter is not specified properly")
return
X.append(featureVector)
if method == "kmeans":
# Ignore minor version numbers.
sklearn_version = re.search(r'\d+\.\d+', __version__).group()
if (float(sklearn_version) > 0.11):
k_means = KMeans(init='random', n_clusters=k, n_init=10).fit(X)
else:
k_means = KMeans(init='random', k=k, n_init=10).fit(X)
KClusters = [ FeatureSet([]) for i in range(k)]
for i in range(len(self)):
KClusters[k_means.labels_[i]].append(self[i])
return KClusters
if method == "hierarchical":
ward = Ward(n_clusters=int(sqrt(len(self)))).fit(X) #n_clusters = sqrt(n)
WClusters = [ FeatureSet([]) for i in range(int(sqrt(len(self))))]
for i in range(len(self)):
WClusters[ward.labels_[i]].append(self[i])
return WClusters
@property
def image(self):
if not len(self):
return None
return self[0].image
@image.setter
def image(self, i):
for f in self:
f.image = i
### ----------------------------------------------------------------------------
### ----------------------------------------------------------------------------
### ----------------------------FEATURE CLASS-----------------------------------
### ----------------------------------------------------------------------------
### ----------------------------------------------------------------------------
class Feature(object):
"""
**SUMMARY**
The Feature object is an abstract class which real features descend from.
Each feature object has:
* a draw() method,
* an image property, referencing the originating Image object
* x and y coordinates
* default functions for determining angle, area, meanColor, etc for FeatureSets
* in the Feature class, these functions assume the feature is 1px
"""
x = 0.00
y = 0.00
_mMaxX = None
_mMaxY = None
_mMinX = None
_mMinY = None
_mWidth = None
_mHeight = None
_mSrcImgW = None
_mSrcImgH = None
# This is 2.0 refactoring
mBoundingBox = None # THIS SHALT BE TOP LEFT (X,Y) THEN W H i.e. [X,Y,W,H]
mExtents = None # THIS SHALT BE [MAXX,MINX,MAXY,MINY]
points = None # THIS SHALT BE (x,y) tuples in the ORDER [(TopLeft),(TopRight),(BottomLeft),(BottomRight)]
image = "" #parent image
#points = []
#boundingBox = []
def __init__(self, i, at_x, at_y, points):
#THE COVENANT IS THAT YOU PROVIDE THE POINTS IN THE SPECIFIED FORMAT AND ALL OTHER VALUES SHALT FLOW
self.x = at_x
self.y = at_y
self.image = i
self.points = points
self._updateExtents(new_feature=True)
def reassign(self, img):
"""
**SUMMARY**
Reassign the image of this feature and return an updated copy of the feature.
**PARAMETERS**
* *img* - the new image to which to assign the feature.
.. Warning::
THIS DOES NOT PERFORM A SIZE CHECK. IF YOUR NEW IMAGE IS NOT THE EXACT SAME SIZE YOU WILL CERTAINLY CAUSE ERRORS.
**EXAMPLE**
>>> img = Image("lenna")
>>> img2 = img.invert()
>>> l = img.findLines()
>>> l2 = img.reassignImage(img2)
>>> l2.show()
"""
retVal = copy.deepcopy(self)
if( self.image.width != img.width or
self.image.height != img.height ):
warnings.warn("DON'T REASSIGN IMAGES OF DIFFERENT SIZES")
retVal.image = img
return retVal
def corners(self):
self._updateExtents()
return self.points
def coordinates(self):
"""
**SUMMARY**
Returns the x,y position of the feature. This is usually the center coordinate.
**RETURNS**
Returns an (x,y) tuple of the position of the feature.
**EXAMPLE**
>>> img = Image("aerospace.png")
>>> blobs = img.findBlobs()
>>> for b in blobs:
>>> print b.coordinates()
"""
return np.array([self.x, self.y])
def draw(self, color = Color.GREEN):
"""
**SUMMARY**
This method will draw the feature on the source image.
**PARAMETERS**
* *color* - The color as an RGB tuple to render the image.
**RETURNS**
Nothing.
**EXAMPLE**
>>> img = Image("RedDog2.jpg")
>>> blobs = img.findBlobs()
>>> blobs[-1].draw()
>>> img.show()
"""
self.image[self.x, self.y] = color
def show(self, color = Color.GREEN):
"""
**SUMMARY**
This function will automatically draw the features on the image and show it.
**RETURNS**
Nothing.
**EXAMPLE**
>>> img = Image("logo")
>>> feat = img.findBlobs()
>>> feat[-1].show() #window pops up.
"""
self.draw(color)
self.image.show()
def distanceFrom(self, point = (-1, -1)):
"""
**SUMMARY**
Given a point (default to center of the image), return the euclidean distance of x,y from this point.
**PARAMETERS**
* *point* - The point, as an (x,y) tuple on the image to measure distance from.
**RETURNS**
The distance as a floating point value in pixels.
**EXAMPLE**
>>> img = Image("OWS.jpg")
>>> blobs = img.findBlobs(128)
>>> blobs[-1].distanceFrom(blobs[-2].coordinates())
"""
if (point[0] == -1 or point[1] == -1):
point = np.array(self.image.size()) / 2
return spsd.euclidean(point, [self.x, self.y])
def meanColor(self):
"""
**SUMMARY**
Return the average color within the feature as a tuple.
**RETURNS**
An RGB color tuple.
**EXAMPLE**
>>> img = Image("OWS.jpg")
>>> blobs = img.findBlobs(128)
>>> for b in blobs:
>>> if (b.meanColor() == color.WHITE):
>>> print "Found a white thing"
"""
return self.image[self.x, self.y]
def colorDistance(self, color = (0, 0, 0)):
"""
**SUMMARY**
Return the euclidean color distance of the color tuple at x,y from a given color (default black).
**PARAMETERS**
* *color* - An RGB triplet to calculate from which to calculate the color distance.
**RETURNS**
A floating point color distance value.
**EXAMPLE**
>>> img = Image("OWS.jpg")
>>> blobs = img.findBlobs(128)
>>> for b in blobs:
>>> print b.colorDistance(color.WHITE):
"""
return spsd.euclidean(np.array(color), np.array(self.meanColor()))
def angle(self):
"""
**SUMMARY**
Return the angle (theta) in degrees of the feature. The default is 0 (horizontal).
.. Warning::
This is not a valid operation for all features.
**RETURNS**
An angle value in degrees.
**EXAMPLE**
>>> img = Image("OWS.jpg")
>>> blobs = img.findBlobs(128)
>>> for b in blobs:
>>> if b.angle() == 0:
>>> print "I AM HORIZONTAL."
**TODO**
Double check that values are being returned consistently.
"""
return 0
def length(self):
"""
**SUMMARY**
This method returns the longest dimension of the feature (i.e max(width,height)).
**RETURNS**
A floating point length value.
**EXAMPLE**
>>> img = Image("OWS.jpg")
>>> blobs = img.findBlobs(128)
>>> for b in blobs:
>>> if b.length() > 200:
>>> print "OH MY! - WHAT A BIG FEATURE YOU HAVE!"
>>> print "---I bet you say that to all the features."
**TODO**
Should this be sqrt(x*x+y*y)?
"""
return float(np.max([self.width(),self.height()]))
def distanceToNearestEdge(self):
"""
**SUMMARY**
This method returns the distance, in pixels, from the nearest image edge.
**RETURNS**
The integer distance to the nearest edge.
**EXAMPLE**
>>> img = Image("../sampleimages/EdgeTest1.png")
>>> b = img.findBlobs()
>>> b[0].distanceToNearestEdge()
"""
w = self.image.width
h = self.image.height
return np.min([self._mMinX,self._mMinY, w-self._mMaxX,h-self._mMaxY])
def onImageEdge(self,tolerance=1):
"""
**SUMMARY**
This method returns True if the feature is less than `tolerance`
pixels away from the nearest edge.
**PARAMETERS**
* *tolerance* - the distance in pixels at which a feature qualifies
as being on the image edge.
**RETURNS**
True if the feature is on the edge, False otherwise.
**EXAMPLE**
>>> img = Image("../sampleimages/EdgeTest1.png")
>>> b = img.findBlobs()
>>> if(b[0].onImageEdge()):
>>> print "HELP! I AM ABOUT TO FALL OFF THE IMAGE"
"""
# this has to be one to deal with blob library weirdness that goes deep down to opencv
return ( self.distanceToNearestEdge() <= tolerance )
def notOnImageEdge(self,tolerance=1):
"""
**SUMMARY**
This method returns True if the feature is greate than `tolerance`
pixels away from the nearest edge.
**PARAMETERS**
* *tolerance* - the distance in pixels at which a feature qualifies
as not being on the image edge.
**RETURNS**
True if the feature is not on the edge of the image, False otherwise.
**EXAMPLE**
>>> img = Image("../sampleimages/EdgeTest1.png")
>>> b = img.findBlobs()
>>> if(b[0].notOnImageEdge()):
>>> print "I am safe and sound."
"""
# this has to be one to deal with blob library weirdness that goes deep down to opencv
return ( self.distanceToNearestEdge() > tolerance )
def aspectRatio(self):
"""
**SUMMARY**
Return the aspect ratio of the feature, which for our purposes
is max(width,height)/min(width,height).
**RETURNS**
A single floating point value of the aspect ration.
**EXAMPLE**
>>> img = Image("OWS.jpg")
>>> blobs = img.findBlobs(128)
>>> b[0].aspectRatio()
"""
self._updateExtents()
return self.mAspectRatio
def area(self):
"""
**SUMMARY**
Returns the area (number of pixels) covered by the feature.
**RETURNS**
An integer area of the feature.
**EXAMPLE**
>>> img = Image("OWS.jpg")
>>> blobs = img.findBlobs(128)
>>> for b in blobs:
>>> if b.area() > 200:
>>> print b.area()
"""
return self.width() * self.height()
def width(self):
"""
**SUMMARY**
Returns the height of the feature.
**RETURNS**
An integer value for the feature's width.
**EXAMPLE**
>>> img = Image("OWS.jpg")
>>> blobs = img.findBlobs(128)
>>> for b in blobs:
>>> if b.width() > b.height():
>>> print "wider than tall"
>>> b.draw()
>>> img.show()
"""
self._updateExtents()
return self._mWidth
def height(self):
"""
**SUMMARY**
Returns the height of the feature.
**RETURNS**
An integer value of the feature's height.
**EXAMPLE**
>>> img = Image("OWS.jpg")
>>> blobs = img.findBlobs(128)
>>> for b in blobs:
>>> if b.width() > b.height():
>>> print "wider than tall"
>>> b.draw()
>>> img.show()
"""
self._updateExtents()
return self._mHeight
def crop(self):
"""
**SUMMARY**
This function crops the source image to the location of the feature and returns
a new SimpleCV image.
**RETURNS**
A SimpleCV image that is cropped to the feature position and size.
**EXAMPLE**
>>> img = Image("OWS.jpg")
>>> blobs = img.findBlobs(128)
>>> big = blobs[-1].crop()
>>> big.show()
"""
return self.image.crop(self.x, self.y, self.width(), self.height(), centered = True)
def __repr__(self):
return "%s.%s at (%d,%d)" % (self.__class__.__module__, self.__class__.__name__, self.x, self.y)
def _updateExtents(self, new_feature=False):
# mBoundingBox = None # THIS SHALT BE TOP LEFT (X,Y) THEN W H i.e. [X,Y,W,H]
# mExtents = None # THIS SHALT BE [MAXX,MINX,MAXY,MINY]
# points = None # THIS SHALT BE (x,y) tuples in the ORDER [(TopLeft),(TopRight),(BottomLeft),(BottomRight)]
max_x = self._mMaxX
min_x = self._mMinX
max_y = self._mMaxY
min_y = self._mMinY
width = self._mWidth
height = self._mHeight
extents = self.mExtents
bounding_box = self.mBoundingBox
#if new_feature or None in [self._mMaxX, self._mMinX, self._mMaxY, self._mMinY,
# self._mWidth, self._mHeight, self.mExtents, self.mBoundingBox]:
if new_feature or None in [max_x, min_x, max_y, min_y, width, height, extents, bounding_box]:
max_x = max_y = float("-infinity")
min_x = min_y = float("infinity")
for p in self.points:
if (p[0] > max_x):
max_x = p[0]
if (p[0] < min_x):
min_x = p[0]
if (p[1] > max_y):
max_y = p[1]
if (p[1] < min_y):
min_y = p[1]
width = max_x - min_x
height = max_y - min_y
if (width <= 0):
width = 1
if (height <= 0):
height = 1
self.mBoundingBox = [min_x, min_y, width, height]
self.mExtents = [max_x, min_x, max_y, min_y]
if width > height:
self.mAspectRatio = float(width/height)
else:
self.mAspectRatio = float(height/width)
self._mMaxX = max_x
self._mMinX = min_x
self._mMaxY = max_y
self._mMinY = min_y
self._mWidth = width
self._mHeight = height
def boundingBox(self):
"""
**SUMMARY**
This function returns a rectangle which bounds the blob.
**RETURNS**
A list of [x, y, w, h] where (x, y) are the top left point of the rectangle
and w, h are its width and height respectively.
**EXAMPLE**
>>> img = Image("OWS.jpg")
>>> blobs = img.findBlobs(128)
>>> print blobs[-1].boundingBox()
"""
self._updateExtents()
return self.mBoundingBox
def extents(self):
"""
**SUMMARY**
This function returns the maximum and minimum x and y values for the feature and
returns them as a tuple.
**RETURNS**
A tuple of the extents of the feature. The order is (MaxX,MaxY,MinX,MinY).
**EXAMPLE**
>>> img = Image("OWS.jpg")
>>> blobs = img.findBlobs(128)
>>> print blobs[-1].extents()
"""
self._updateExtents()
return self.mExtents
def minY(self):
"""
**SUMMARY**
This method return the minimum y value of the bounding box of the
the feature.
**RETURNS**
An integer value of the minimum y value of the feature.
**EXAMPLE**
>>> img = Image("OWS.jpg")
>>> blobs = img.findBlobs(128)
>>> print blobs[-1].minY()
"""
self._updateExtents()
return self._mMinY
def maxY(self):
"""
**SUMMARY**
This method return the maximum y value of the bounding box of the
the feature.
**RETURNS**
An integer value of the maximum y value of the feature.
**EXAMPLE**
>>> img = Image("OWS.jpg")
>>> blobs = img.findBlobs(128)
>>> print blobs[-1].maxY()
"""
self._updateExtents()
return self._mMaxY
def minX(self):
"""
**SUMMARY**
This method return the minimum x value of the bounding box of the
the feature.
**RETURNS**
An integer value of the minimum x value of the feature.
**EXAMPLE**
>>> img = Image("OWS.jpg")
>>> blobs = img.findBlobs(128)
>>> print blobs[-1].minX()
"""
self._updateExtents()
return self._mMinX
def maxX(self):
"""
**SUMMARY**
This method return the minimum x value of the bounding box of the
the feature.
**RETURNS**
An integer value of the maxium x value of the feature.
**EXAMPLE**
>>> img = Image("OWS.jpg")
>>> blobs = img.findBlobs(128)
>>> print blobs[-1].maxX()
"""
self._updateExtents()
return self._mMaxX
def topLeftCorner(self):
"""
**SUMMARY**
This method returns the top left corner of the bounding box of
the blob as an (x,y) tuple.
**RESULT**
Returns a tupple of the top left corner.
**EXAMPLE**
>>> img = Image("OWS.jpg")
>>> blobs = img.findBlobs(128)
>>> print blobs[-1].topLeftCorner()
"""
self._updateExtents()
return (self._mMinX,self._mMinY)
def bottomRightCorner(self):
"""
**SUMMARY**
This method returns the bottom right corner of the bounding box of
the blob as an (x,y) tuple.
**RESULT**
Returns a tupple of the bottom right corner.
**EXAMPLE**
>>> img = Image("OWS.jpg")
>>> blobs = img.findBlobs(128)
>>> print blobs[-1].bottomRightCorner()
"""
self._updateExtents()
return (self._mMaxX,self._mMaxY)
def bottomLeftCorner(self):
"""
**SUMMARY**
This method returns the bottom left corner of the bounding box of
the blob as an (x,y) tuple.
**RESULT**
Returns a tupple of the bottom left corner.
**EXAMPLE**
>>> img = Image("OWS.jpg")
>>> blobs = img.findBlobs(128)
>>> print blobs[-1].bottomLeftCorner()
"""
self._updateExtents()
return (self._mMinX,self._mMaxY)
def topRightCorner(self):
"""
**SUMMARY**
This method returns the top right corner of the bounding box of
the blob as an (x,y) tuple.
**RESULT**
Returns a tupple of the top right corner.
**EXAMPLE**
>>> img = Image("OWS.jpg")
>>> blobs = img.findBlobs(128)
>>> print blobs[-1].topRightCorner()
"""
self._updateExtents()
return (self._mMaxX,self._mMinY)
def above(self,object):
"""
**SUMMARY**
Return true if the feature is above the object, where object can be a bounding box,
bounding circle, a list of tuples in a closed polygon, or any other featutres.
**PARAMETERS**
* *object*
* A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
* A bounding circle of the form (x,y,r)
* A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
* Any two dimensional feature (e.g. blobs, circle ...)
**RETURNS**
Returns a Boolean, True if the feature is above the object, False otherwise.
**EXAMPLE**
>>> img = Image("Lenna")
>>> blobs = img.findBlobs()
>>> b = blobs[0]
>>> if( blobs[-1].above(b) ):
>>> print "above the biggest blob"
"""
if( isinstance(object,Feature) ):
return( self.maxY() < object.minY() )
elif( isinstance(object,tuple) or isinstance(object,np.ndarray) ):
return( self.maxY() < object[1] )
elif( isinstance(object,float) or isinstance(object,int) ):
return( self.maxY() < object )
else:
logger.warning("SimpleCV did not recognize the input type to feature.above(). This method only takes another feature, an (x,y) tuple, or a ndarray type.")
return None
def below(self,object):
"""
**SUMMARY**
Return true if the feature is below the object, where object can be a bounding box,
bounding circle, a list of tuples in a closed polygon, or any other featutres.
**PARAMETERS**
* *object*
* A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
* A bounding circle of the form (x,y,r)
* A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
* Any two dimensional feature (e.g. blobs, circle ...)
**RETURNS**
Returns a Boolean, True if the feature is below the object, False otherwise.
**EXAMPLE**
>>> img = Image("Lenna")
>>> blobs = img.findBlobs()
>>> b = blobs[0]
>>> if( blobs[-1].below(b) ):
>>> print "above the biggest blob"
"""
if( isinstance(object,Feature) ):
return( self.minY() > object.maxY() )
elif( isinstance(object,tuple) or isinstance(object,np.ndarray) ):
return( self.minY() > object[1] )
elif( isinstance(object,float) or isinstance(object,int) ):
return( self.minY() > object )
else:
logger.warning("SimpleCV did not recognize the input type to feature.below(). This method only takes another feature, an (x,y) tuple, or a ndarray type.")
return None
def right(self,object):
"""
**SUMMARY**
Return true if the feature is to the right object, where object can be a bounding box,
bounding circle, a list of tuples in a closed polygon, or any other featutres.
**PARAMETERS**
* *object*
* A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
* A bounding circle of the form (x,y,r)
* A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
* Any two dimensional feature (e.g. blobs, circle ...)
**RETURNS**
Returns a Boolean, True if the feature is to the right object, False otherwise.
**EXAMPLE**
>>> img = Image("Lenna")
>>> blobs = img.findBlobs()
>>> b = blobs[0]
>>> if( blobs[-1].right(b) ):
>>> print "right of the the blob"
"""
if( isinstance(object,Feature) ):
return( self.minX() > object.maxX() )
elif( isinstance(object,tuple) or isinstance(object,np.ndarray) ):
return( self.minX() > object[0] )
elif( isinstance(object,float) or isinstance(object,int) ):
return( self.minX() > object )
else:
logger.warning("SimpleCV did not recognize the input type to feature.right(). This method only takes another feature, an (x,y) tuple, or a ndarray type.")
return None
def left(self,object):
"""
**SUMMARY**
Return true if the feature is to the left of the object, where object can be a bounding box,
bounding circle, a list of tuples in a closed polygon, or any other featutres.
**PARAMETERS**
* *object*
* A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
* A bounding circle of the form (x,y,r)
* A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
* Any two dimensional feature (e.g. blobs, circle ...)
**RETURNS**
Returns a Boolean, True if the feature is to the left of the object, False otherwise.
**EXAMPLE**
>>> img = Image("Lenna")
>>> blobs = img.findBlobs()
>>> b = blobs[0]
>>> if( blobs[-1].left(b) ):
>>> print "left of the biggest blob"
"""
if( isinstance(object,Feature) ):
return( self.maxX() < object.minX() )
elif( isinstance(object,tuple) or isinstance(object,np.ndarray) ):
return( self.maxX() < object[0] )
elif( isinstance(object,float) or isinstance(object,int) ):
return( self.maxX() < object )
else:
logger.warning("SimpleCV did not recognize the input type to feature.left(). This method only takes another feature, an (x,y) tuple, or a ndarray type.")
return None
def contains(self,other):
"""
**SUMMARY**
Return true if the feature contains the object, where object can be a bounding box,
bounding circle, a list of tuples in a closed polygon, or any other featutres.
**PARAMETERS**
* *object*
* A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
* A bounding circle of the form (x,y,r)
* A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
* Any two dimensional feature (e.g. blobs, circle ...)
**RETURNS**
Returns a Boolean, True if the feature contains the object, False otherwise.
**EXAMPLE**
>>> img = Image("Lenna")
>>> blobs = img.findBlobs()
>>> b = blobs[0]
>>> if( blobs[-1].contains(b) ):
>>> print "this blob is contained in the biggest blob"
**NOTE**
This currently performs a bounding box test, not a full polygon test for speed.
"""
retVal = False
bounds = self.points
if( isinstance(other,Feature) ):# A feature
retVal = True
for p in other.points: # this isn't completely correct - only tests if points lie in poly, not edges.
p2 = (int(p[0]),int(p[1]))
retVal = self._pointInsidePolygon(p2,bounds)
if( not retVal ):
break
# a single point
elif( (isinstance(other,tuple) and len(other)==2) or ( isinstance(other,np.ndarray) and other.shape[0]==2) ):
retVal = self._pointInsidePolygon(other,bounds)
elif( isinstance(other,tuple) and len(other)==3 ): # A circle
#assume we are in x,y, r format
retVal = True
rr = other[2]*other[2]
x = other[0]
y = other[1]
for p in bounds:
test = ((x-p[0])*(x-p[0]))+((y-p[1])*(y-p[1]))
if( test < rr ):
retVal = False
break
elif( isinstance(other,tuple) and len(other)==4 and ( isinstance(other[0],float) or isinstance(other[0],int))):
retVal = ( self.maxX() <= other[0]+other[2] and
self.minX() >= other[0] and
self.maxY() <= other[1]+other[3] and
self.minY() >= other[1] )
elif(isinstance(other,list) and len(other) >= 4): # an arbitrary polygon
#everything else ....
retVal = True
for p in other:
test = self._pointInsidePolygon(p,bounds)
if(not test):
retVal = False
break
else:
logger.warning("SimpleCV did not recognize the input type to features.contains. This method only takes another blob, an (x,y) tuple, or a ndarray type.")
return False
return retVal
def overlaps(self, other):
"""
**SUMMARY**
Return true if the feature overlaps the object, where object can be a bounding box,
bounding circle, a list of tuples in a closed polygon, or any other featutres.
**PARAMETERS**
* *object*
* A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
* A bounding circle of the form (x,y,r)
* A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
* Any two dimensional feature (e.g. blobs, circle ...)
**RETURNS**
Returns a Boolean, True if the feature overlaps object, False otherwise.
**EXAMPLE**
>>> img = Image("Lenna")
>>> blobs = img.findBlobs()
>>> b = blobs[0]
>>> if( blobs[-1].overlaps(b) ):
>>> print "This blob overlaps the biggest blob"
Returns true if this blob contains at least one point, part of a collection
of points, or any part of a blob.
**NOTE**
This currently performs a bounding box test, not a full polygon test for speed.
"""
retVal = False
bounds = self.points
if( isinstance(other,Feature) ):# A feature
retVal = True
for p in other.points: # this isn't completely correct - only tests if points lie in poly, not edges.
retVal = self._pointInsidePolygon(p,bounds)
if( retVal ):
break
elif( (isinstance(other,tuple) and len(other)==2) or ( isinstance(other,np.ndarray) and other.shape[0]==2) ):
retVal = self._pointInsidePolygon(other,bounds)
elif( isinstance(other,tuple) and len(other)==3 and not isinstance(other[0],tuple)): # A circle
#assume we are in x,y, r format
retVal = False
rr = other[2]*other[2]
x = other[0]
y = other[1]
for p in bounds:
test = ((x-p[0])*(x-p[0]))+((y-p[1])*(y-p[1]))
if( test < rr ):
retVal = True
break
elif( isinstance(other,tuple) and len(other)==4 and ( isinstance(other[0],float) or isinstance(other[0],int))):
retVal = ( self.contains( (other[0],other[1] ) ) or # see if we contain any corner
self.contains( (other[0]+other[2],other[1] ) ) or
self.contains( (other[0],other[1]+other[3] ) ) or
self.contains( (other[0]+other[2],other[1]+other[3] ) ) )
elif(isinstance(other,list) and len(other) >= 3): # an arbitrary polygon
#everything else ....
retVal = False
for p in other:
test = self._pointInsidePolygon(p,bounds)
if(test):
retVal = True
break
else:
logger.warning("SimpleCV did not recognize the input type to features.overlaps. This method only takes another blob, an (x,y) tuple, or a ndarray type.")
return False
return retVal
def doesNotContain(self, other):
"""
**SUMMARY**
Return true if the feature does not contain the other object, where other can be a bounding box,
bounding circle, a list of tuples in a closed polygon, or any other featutres.
**PARAMETERS**
* *other*
* A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
* A bounding circle of the form (x,y,r)
* A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
* Any two dimensional feature (e.g. blobs, circle ...)
**RETURNS**
Returns a Boolean, True if the feature does not contain the object, False otherwise.
**EXAMPLE**
>>> img = Image("Lenna")
>>> blobs = img.findBlobs()
>>> b = blobs[0]
>>> if( blobs[-1].doesNotContain(b) ):
>>> print "above the biggest blob"
Returns true if all of features points are inside this point.
"""
return not self.contains(other)
def doesNotOverlap( self, other):
"""
**SUMMARY**
Return true if the feature does not overlap the object other, where other can be a bounding box,
bounding circle, a list of tuples in a closed polygon, or any other featutres.
**PARAMETERS**
* *other*
* A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
* A bounding circle of the form (x,y,r)
* A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
* Any two dimensional feature (e.g. blobs, circle ...)
**RETURNS**
Returns a Boolean, True if the feature does not Overlap the object, False otherwise.
**EXAMPLE**
>>> img = Image("Lenna")
>>> blobs = img.findBlobs()
>>> b = blobs[0]
>>> if( blobs[-1].doesNotOverlap(b) ):
>>> print "does not over overlap biggest blob"
"""
return not self.overlaps( other)
def isContainedWithin(self,other):
"""
**SUMMARY**
Return true if the feature is contained withing the object other, where other can be a bounding box,
bounding circle, a list of tuples in a closed polygon, or any other featutres.
**PARAMETERS**
* *other*
* A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
* A bounding circle of the form (x,y,r)
* A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
* Any two dimensional feature (e.g. blobs, circle ...)
**RETURNS**
Returns a Boolean, True if the feature is above the object, False otherwise.
**EXAMPLE**
>>> img = Image("Lenna")
>>> blobs = img.findBlobs()
>>> b = blobs[0]
>>> if( blobs[-1].isContainedWithin(b) ):
>>> print "inside the blob"
"""
retVal = True
bounds = self.points
if( isinstance(other,Feature) ): # another feature do the containment test
retVal = other.contains(self)
elif( isinstance(other,tuple) and len(other)==3 ): # a circle
#assume we are in x,y, r format
rr = other[2]*other[2] # radius squared
x = other[0]
y = other[1]
for p in bounds:
test = ((x-p[0])*(x-p[0]))+((y-p[1])*(y-p[1]))
if( test > rr ):
retVal = False
break
elif( isinstance(other,tuple) and len(other)==4 and # a bounding box
( isinstance(other[0],float) or isinstance(other[0],int))): # we assume a tuple of four is (x,y,w,h)
retVal = ( self.maxX() <= other[0]+other[2] and
self.minX() >= other[0] and
self.maxY() <= other[1]+other[3] and
self.minY() >= other[1] )
elif(isinstance(other,list) and len(other) > 2 ): # an arbitrary polygon
#everything else ....
retVal = True
for p in bounds:
test = self._pointInsidePolygon(p,other)
if(not test):
retVal = False
break
else:
logger.warning("SimpleCV did not recognize the input type to features.contains. This method only takes another blob, an (x,y) tuple, or a ndarray type.")
retVal = False
return retVal
def isNotContainedWithin(self,shape):
"""
**SUMMARY**
Return true if the feature is not contained within the shape, where shape can be a bounding box,
bounding circle, a list of tuples in a closed polygon, or any other featutres.
**PARAMETERS**
* *shape*
* A bounding box - of the form (x,y,w,h) where x,y is the upper left corner
* A bounding circle of the form (x,y,r)
* A list of x,y tuples defining a closed polygon e.g. ((x,y),(x,y),....)
* Any two dimensional feature (e.g. blobs, circle ...)
**RETURNS**
Returns a Boolean, True if the feature is not contained within the shape, False otherwise.
**EXAMPLE**
>>> img = Image("Lenna")
>>> blobs = img.findBlobs()
>>> b = blobs[0]
>>> if( blobs[-1].isNotContainedWithin(b) ):
>>> print "Not inside the biggest blob"
"""
return not self.isContainedWithin(shape)
def _pointInsidePolygon(self,point,polygon):
"""
returns true if tuple point (x,y) is inside polygon of the form ((a,b),(c,d),...,(a,b)) the polygon should be closed
"""
# try:
# import cv2
# except:
# logger.warning("Unable to import cv2")
# return False
if( len(polygon) < 3 ):
logger.warning("feature._pointInsidePolygon - this is not a valid polygon")
return False
if( not isinstance(polygon,list)):
logger.warning("feature._pointInsidePolygon - this is not a valid polygon")
return False
#if( not isinstance(point,tuple) ):
#if( len(point) == 2 ):
# point = tuple(point)
#else:
# logger.warning("feature._pointInsidePolygon - this is not a valid point")
# return False
#if( cv2.__version__ == '$Rev:4557'):
counter = 0
retVal = True
p1 = None
#print "point: " + str(point)
poly = copy.deepcopy(polygon)
poly.append(polygon[0])
#for p2 in poly:
N = len(poly)
p1 = poly[0]
for i in range(1,N+1):
p2 = poly[i%N]
if( point[1] > np.min((p1[1],p2[1])) ):
if( point[1] <= np.max((p1[1],p2[1])) ):
if( point[0] <= np.max((p1[0],p2[0])) ):
if( p1[1] != p2[1] ):
test = float((point[1]-p1[1])*(p2[0]-p1[0]))/float(((p2[1]-p1[1])+p1[0]))
if( p1[0] == p2[0] or point[0] <= test ):
counter = counter + 1
p1 = p2
if( counter % 2 == 0 ):
retVal = False
return retVal
return retVal
#else:
# result = cv2.pointPolygonTest(np.array(polygon,dtype='float32'),point,0)
# return result > 0
def boundingCircle(self):
"""
**SUMMARY**
This function calculates the minimum bounding circle of the blob in the image
as an (x,y,r) tuple
**RETURNS**
An (x,y,r) tuple where (x,y) is the center of the circle and r is the radius
**EXAMPLE**
>>> img = Image("RatMask.png")
>>> blobs = img.findBlobs()
>>> print blobs[-1].boundingCircle()
"""
try:
import cv2
except:
logger.warning("Unable to import cv2")
return None
# contour of the blob in image
contour = self.contour()
points = []
# list of contour points converted to suitable format to pass into cv2.minEnclosingCircle()
for pair in contour:
points.append([[pair[0], pair[1]]])
points = np.array(points)
(cen, rad) = cv2.minEnclosingCircle(points);
return (cen[0], cen[1], rad)
#---------------------------------------------
| bsd-3-clause |
rubikloud/scikit-learn | sklearn/cluster/__init__.py | 364 | 1228 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
calpeyser/google-cloud-python | monitoring/google/cloud/monitoring/client.py | 2 | 23539 | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with the `Google Stackdriver Monitoring API (V3)`_.
Example::
>>> from google.cloud import monitoring
>>> client = monitoring.Client()
>>> query = client.query(minutes=5)
>>> print(query.as_dataframe()) # Requires pandas.
At present, the client supports querying of time series, metric descriptors,
and monitored resource descriptors.
.. _Google Stackdriver Monitoring API (V3):
https://cloud.google.com/monitoring/api/v3/
"""
import datetime
from google.cloud._helpers import _datetime_to_rfc3339
from google.cloud.client import ClientWithProject
from google.cloud.monitoring._http import Connection
from google.cloud.monitoring.group import Group
from google.cloud.monitoring.metric import Metric
from google.cloud.monitoring.metric import MetricDescriptor
from google.cloud.monitoring.metric import MetricKind
from google.cloud.monitoring.metric import ValueType
from google.cloud.monitoring.query import Query
from google.cloud.monitoring.resource import Resource
from google.cloud.monitoring.resource import ResourceDescriptor
from google.cloud.monitoring.timeseries import Point
from google.cloud.monitoring.timeseries import TimeSeries
_UTCNOW = datetime.datetime.utcnow # To be replaced by tests.
class Client(ClientWithProject):
"""Client to bundle configuration needed for API requests.
:type project: str
:param project: The target project. If not passed, falls back to the
default inferred from the environment.
:type credentials: :class:`~google.auth.credentials.Credentials`
:param credentials: (Optional) The OAuth2 Credentials to use for this
client. If not passed (and if no ``_http`` object is
passed), falls back to the default inferred from the
environment.
:type _http: :class:`~requests.Session`
:param _http: (Optional) HTTP object to make requests. Can be any object
that defines ``request()`` with the same interface as
:meth:`requests.Session.request`. If not passed, an
``_http`` object is created that is bound to the
``credentials`` for the current object.
This parameter should be considered private, and could
change in the future.
"""
SCOPE = ('https://www.googleapis.com/auth/monitoring.read',
'https://www.googleapis.com/auth/monitoring',
'https://www.googleapis.com/auth/cloud-platform')
"""The scopes required for authenticating as a Monitoring consumer."""
def __init__(self, project=None, credentials=None, _http=None):
super(Client, self).__init__(
project=project, credentials=credentials, _http=_http)
self._connection = Connection(self)
def query(self,
metric_type=Query.DEFAULT_METRIC_TYPE,
end_time=None,
days=0, hours=0, minutes=0):
"""Construct a query object for retrieving metric data.
Example::
>>> query = client.query(minutes=5)
>>> print(query.as_dataframe()) # Requires pandas.
:type metric_type: str
:param metric_type: The metric type name. The default value is
:data:`Query.DEFAULT_METRIC_TYPE
<google.cloud.monitoring.query.Query.DEFAULT_METRIC_TYPE>`,
but please note that this default value is provided only for
demonstration purposes and is subject to change. See the
`supported metrics`_.
:type end_time: :class:`datetime.datetime`
:param end_time:
(Optional) The end time (inclusive) of the time interval
for which results should be returned, as a datetime object.
The default is the start of the current minute.
The start time (exclusive) is determined by combining the
values of ``days``, ``hours``, and ``minutes``, and
subtracting the resulting duration from the end time.
It is also allowed to omit the end time and duration here,
in which case
:meth:`~google.cloud.monitoring.query.Query.select_interval`
must be called before the query is executed.
:type days: int
:param days: The number of days in the time interval.
:type hours: int
:param hours: The number of hours in the time interval.
:type minutes: int
:param minutes: The number of minutes in the time interval.
:rtype: :class:`~google.cloud.monitoring.query.Query`
:returns: The query object.
:raises: :exc:`ValueError` if ``end_time`` is specified but
``days``, ``hours``, and ``minutes`` are all zero.
If you really want to specify a point in time, use
:meth:`~google.cloud.monitoring.query.Query.select_interval`.
.. _supported metrics: https://cloud.google.com/monitoring/api/metrics
"""
return Query(self, metric_type,
end_time=end_time,
days=days, hours=hours, minutes=minutes)
def metric_descriptor(self, type_,
metric_kind=MetricKind.METRIC_KIND_UNSPECIFIED,
value_type=ValueType.VALUE_TYPE_UNSPECIFIED,
labels=(), unit='', description='', display_name=''):
"""Construct a metric descriptor object.
Metric descriptors specify the schema for a particular metric type.
This factory method is used most often in conjunction with the metric
descriptor
:meth:`~google.cloud.monitoring.metric.MetricDescriptor.create`
method to define custom metrics::
>>> descriptor = client.metric_descriptor(
... 'custom.googleapis.com/my_metric',
... metric_kind=MetricKind.GAUGE,
... value_type=ValueType.DOUBLE,
... description='This is a simple example of a custom metric.')
>>> descriptor.create()
Here is an example where the custom metric is parameterized by a
metric label::
>>> label = LabelDescriptor('response_code', LabelValueType.INT64,
... description='HTTP status code')
>>> descriptor = client.metric_descriptor(
... 'custom.googleapis.com/my_app/response_count',
... metric_kind=MetricKind.CUMULATIVE,
... value_type=ValueType.INT64,
... labels=[label],
... description='Cumulative count of HTTP responses.')
>>> descriptor.create()
:type type_: str
:param type_:
The metric type including a DNS name prefix. For example:
``"custom.googleapis.com/my_metric"``
:type metric_kind: str
:param metric_kind:
The kind of measurement. It must be one of
:data:`MetricKind.GAUGE`, :data:`MetricKind.DELTA`,
or :data:`MetricKind.CUMULATIVE`.
See :class:`~google.cloud.monitoring.metric.MetricKind`.
:type value_type: str
:param value_type:
The value type of the metric. It must be one of
:data:`ValueType.BOOL`, :data:`ValueType.INT64`,
:data:`ValueType.DOUBLE`, :data:`ValueType.STRING`,
or :data:`ValueType.DISTRIBUTION`.
See :class:`ValueType`.
:type labels:
list of :class:`~google.cloud.monitoring.label.LabelDescriptor`
:param labels:
A sequence of zero or more label descriptors specifying the labels
used to identify a specific instance of this metric.
:type unit: str
:param unit: An optional unit in which the metric value is reported.
:type description: str
:param description: An optional detailed description of the metric.
:type display_name: str
:param display_name: An optional concise name for the metric.
:rtype: :class:`MetricDescriptor`
:returns: The metric descriptor created with the passed-in arguments.
"""
return MetricDescriptor(
self, type_,
metric_kind=metric_kind,
value_type=value_type,
labels=labels,
unit=unit,
description=description,
display_name=display_name,
)
@staticmethod
def metric(type_, labels):
"""Factory for constructing metric objects.
:class:`~google.cloud.monitoring.metric.Metric` objects are typically
created to write custom metric values. The type should match the
metric type specified in the
:class:`~google.cloud.monitoring.metric.MetricDescriptor` used to
create the custom metric::
>>> metric = client.metric('custom.googleapis.com/my_metric',
... labels={
... 'status': 'successful',
... })
:type type_: str
:param type_: The metric type name.
:type labels: dict
:param labels: A mapping from label names to values for all labels
enumerated in the associated
:class:`~.metric.MetricDescriptor`.
:rtype: :class:`~google.cloud.monitoring.metric.Metric`
:returns: The metric object.
"""
return Metric(type=type_, labels=labels)
@staticmethod
def resource(type_, labels):
"""Factory for constructing monitored resource objects.
A monitored resource object (
:class:`~google.cloud.monitoring.resource.Resource`) is
typically used to create a
:class:`~google.cloud.monitoring.timeseries.TimeSeries` object.
For a list of possible monitored resource types and their associated
labels, see:
https://cloud.google.com/monitoring/api/resources
:type type_: str
:param type_: The monitored resource type name.
:type labels: dict
:param labels: A mapping from label names to values for all labels
enumerated in the associated
:class:`~.resource.ResourceDescriptor`,
except that ``project_id`` can and should be omitted
when writing time series data.
:rtype: :class:`~google.cloud.monitoring.resource.Resource`
:returns: A monitored resource object.
"""
return Resource(type_, labels)
@staticmethod
def time_series(metric, resource, value,
end_time=None, start_time=None):
"""Construct a time series object for a single data point.
.. note::
While :class:`~google.cloud.monitoring.timeseries.TimeSeries`
objects returned by the API typically have multiple data points,
:class:`~google.cloud.monitoring.timeseries.TimeSeries` objects
sent to the API must have at most one point.
For example::
>>> timeseries = client.time_series(metric, resource, 1.23,
... end_time=end)
For more information, see:
https://cloud.google.com/monitoring/api/ref_v3/rest/v3/TimeSeries
:type metric: :class:`~google.cloud.monitoring.metric.Metric`
:param metric: A :class:`~google.cloud.monitoring.metric.Metric`.
:type resource: :class:`~google.cloud.monitoring.resource.Resource`
:param resource: A :class:`~google.cloud.monitoring.resource.Resource`
object.
:type value: bool, int, string, or float
:param value:
The value of the data point to create for the
:class:`~google.cloud.monitoring.timeseries.TimeSeries`.
.. note::
The Python type of the value will determine the
:class:`~ValueType` sent to the API, which must match the value
type specified in the metric descriptor. For example, a Python
float will be sent to the API as a :data:`ValueType.DOUBLE`.
:type end_time: :class:`~datetime.datetime`
:param end_time:
The end time for the point to be included in the time series.
Assumed to be UTC if no time zone information is present.
Defaults to the current time, as obtained by calling
:meth:`datetime.datetime.utcnow`.
:type start_time: :class:`~datetime.datetime`
:param start_time:
The start time for the point to be included in the time series.
Assumed to be UTC if no time zone information is present.
Defaults to None. If the start time is unspecified,
the API interprets the start time to be the same as the end time.
:rtype: :class:`~google.cloud.monitoring.timeseries.TimeSeries`
:returns: A time series object.
"""
if end_time is None:
end_time = _UTCNOW()
end_time = _datetime_to_rfc3339(end_time, ignore_zone=False)
if start_time:
start_time = _datetime_to_rfc3339(start_time, ignore_zone=False)
point = Point(value=value, start_time=start_time, end_time=end_time)
return TimeSeries(metric=metric, resource=resource, metric_kind=None,
value_type=None, points=[point])
def fetch_metric_descriptor(self, metric_type):
"""Look up a metric descriptor by type.
Example::
>>> METRIC = 'compute.googleapis.com/instance/cpu/utilization'
>>> print(client.fetch_metric_descriptor(METRIC))
:type metric_type: str
:param metric_type: The metric type name.
:rtype: :class:`~google.cloud.monitoring.metric.MetricDescriptor`
:returns: The metric descriptor instance.
:raises: :class:`google.cloud.exceptions.NotFound` if the metric
descriptor is not found.
"""
return MetricDescriptor._fetch(self, metric_type)
def list_metric_descriptors(self, filter_string=None, type_prefix=None):
"""List all metric descriptors for the project.
Examples::
>>> for descriptor in client.list_metric_descriptors():
... print(descriptor.type)
>>> for descriptor in client.list_metric_descriptors(
... type_prefix='custom.'):
... print(descriptor.type)
:type filter_string: str
:param filter_string:
(Optional) An optional filter expression describing the metric
descriptors to be returned. See the `filter documentation`_.
:type type_prefix: str
:param type_prefix:
(Optional) An optional prefix constraining the selected metric
types. This adds ``metric.type = starts_with("<prefix>")`` to the
filter.
:rtype:
list of :class:`~google.cloud.monitoring.metric.MetricDescriptor`
:returns: A list of metric descriptor instances.
.. _filter documentation:
https://cloud.google.com/monitoring/api/v3/filters
"""
return MetricDescriptor._list(self, filter_string,
type_prefix=type_prefix)
def fetch_resource_descriptor(self, resource_type):
"""Look up a monitored resource descriptor by type.
Example::
>>> print(client.fetch_resource_descriptor('gce_instance'))
:type resource_type: str
:param resource_type: The resource type name.
:rtype: :class:`~google.cloud.monitoring.resource.ResourceDescriptor`
:returns: The resource descriptor instance.
:raises: :class:`google.cloud.exceptions.NotFound` if the resource
descriptor is not found.
"""
return ResourceDescriptor._fetch(self, resource_type)
def list_resource_descriptors(self, filter_string=None):
"""List all monitored resource descriptors for the project.
Example::
>>> for descriptor in client.list_resource_descriptors():
... print(descriptor.type)
:type filter_string: str
:param filter_string:
(Optional) An optional filter expression describing the resource
descriptors to be returned. See the `filter documentation`_.
:rtype: list of
:class:`~google.cloud.monitoring.resource.ResourceDescriptor`
:returns: A list of resource descriptor instances.
.. _filter documentation:
https://cloud.google.com/monitoring/api/v3/filters
"""
return ResourceDescriptor._list(self, filter_string)
def group(self, group_id=None, display_name=None, parent_id=None,
filter_string=None, is_cluster=False):
"""Factory constructor for group object.
.. note::
This will not make an HTTP request; it simply instantiates
a group object owned by this client.
:type group_id: str
:param group_id: (Optional) The ID of the group.
:type display_name: str
:param display_name:
(Optional) A user-assigned name for this group, used only for
display purposes.
:type parent_id: str
:param parent_id:
(Optional) The ID of the group's parent, if it has one.
:type filter_string: str
:param filter_string:
(Optional) The filter string used to determine which monitored
resources belong to this group.
:type is_cluster: bool
:param is_cluster:
If true, the members of this group are considered to be a cluster.
The system can perform additional analysis on groups that are
clusters.
:rtype: :class:`Group`
:returns: The group created with the passed-in arguments.
:raises:
:exc:`ValueError` if both ``group_id`` and ``name`` are specified.
"""
return Group(
self,
group_id=group_id,
display_name=display_name,
parent_id=parent_id,
filter_string=filter_string,
is_cluster=is_cluster,
)
def fetch_group(self, group_id):
"""Fetch a group from the API based on it's ID.
Example::
>>> try:
>>> group = client.fetch_group('1234')
>>> except google.cloud.exceptions.NotFound:
>>> print('That group does not exist!')
:type group_id: str
:param group_id: The ID of the group.
:rtype: :class:`~google.cloud.monitoring.group.Group`
:returns: The group instance.
:raises: :class:`google.cloud.exceptions.NotFound` if the group
is not found.
"""
return Group._fetch(self, group_id)
def list_groups(self):
"""List all groups for the project.
Example::
>>> for group in client.list_groups():
... print((group.display_name, group.name))
:rtype: list of :class:`~google.cloud.monitoring.group.Group`
:returns: A list of group instances.
"""
return Group._list(self)
def write_time_series(self, timeseries_list):
"""Write a list of time series objects to the API.
The recommended approach to creating time series objects is using
the :meth:`~google.cloud.monitoring.client.Client.time_series` factory
method.
Example::
>>> client.write_time_series([ts1, ts2])
If you only need to write a single time series object, consider using
the :meth:`~google.cloud.monitoring.client.Client.write_point` method
instead.
:type timeseries_list:
list of :class:`~google.cloud.monitoring.timeseries.TimeSeries`
:param timeseries_list:
A list of time series object to be written
to the API. Each time series must contain exactly one point.
"""
path = '/projects/{project}/timeSeries/'.format(
project=self.project)
timeseries_dict = [timeseries._to_dict()
for timeseries in timeseries_list]
self._connection.api_request(method='POST', path=path,
data={'timeSeries': timeseries_dict})
def write_point(self, metric, resource, value,
end_time=None,
start_time=None):
"""Write a single point for a metric to the API.
This is a convenience method to write a single time series object to
the API. To write multiple time series objects to the API as a batch
operation, use the
:meth:`~google.cloud.monitoring.client.Client.time_series`
factory method to create time series objects and the
:meth:`~google.cloud.monitoring.client.Client.write_time_series`
method to write the objects.
Example::
>>> client.write_point(metric, resource, 3.14)
:type metric: :class:`~google.cloud.monitoring.metric.Metric`
:param metric: A :class:`~google.cloud.monitoring.metric.Metric`
object.
:type resource: :class:`~google.cloud.monitoring.resource.Resource`
:param resource: A :class:`~google.cloud.monitoring.resource.Resource`
object.
:type value: bool, int, string, or float
:param value:
The value of the data point to create for the
:class:`~google.cloud.monitoring.timeseries.TimeSeries`.
.. note::
The Python type of the value will determine the
:class:`~ValueType` sent to the API, which must match the value
type specified in the metric descriptor. For example, a Python
float will be sent to the API as a :data:`ValueType.DOUBLE`.
:type end_time: :class:`~datetime.datetime`
:param end_time:
The end time for the point to be included in the time series.
Assumed to be UTC if no time zone information is present.
Defaults to the current time, as obtained by calling
:meth:`datetime.datetime.utcnow`.
:type start_time: :class:`~datetime.datetime`
:param start_time:
The start time for the point to be included in the time series.
Assumed to be UTC if no time zone information is present.
Defaults to None. If the start time is unspecified,
the API interprets the start time to be the same as the end time.
"""
timeseries = self.time_series(
metric, resource, value, end_time, start_time)
self.write_time_series([timeseries])
| apache-2.0 |
SpaceKatt/CSPLN | apps/scaffolding/mac/web2py/web2py.app/Contents/Resources/lib/python2.7/matplotlib/backends/backend_gtkcairo.py | 3 | 2079 | """
GTK+ Matplotlib interface using cairo (not GDK) drawing operations.
Author: Steve Chaplin
"""
import gtk
if gtk.pygtk_version < (2,7,0):
import cairo.gtk
from matplotlib.backends import backend_cairo
from matplotlib.backends.backend_gtk import *
backend_version = 'PyGTK(%d.%d.%d) ' % gtk.pygtk_version + \
'Pycairo(%s)' % backend_cairo.backend_version
_debug = False
#_debug = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if _debug: print 'backend_gtkcairo.%s()' % fn_name()
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasGTKCairo(thisFig)
return FigureManagerGTK(canvas, num)
class RendererGTKCairo (backend_cairo.RendererCairo):
if gtk.pygtk_version >= (2,7,0):
def set_pixmap (self, pixmap):
self.gc.ctx = pixmap.cairo_create()
else:
def set_pixmap (self, pixmap):
self.gc.ctx = cairo.gtk.gdk_cairo_create (pixmap)
class FigureCanvasGTKCairo(backend_cairo.FigureCanvasCairo, FigureCanvasGTK):
filetypes = FigureCanvasGTK.filetypes.copy()
filetypes.update(backend_cairo.FigureCanvasCairo.filetypes)
def _renderer_init(self):
"""Override to use cairo (rather than GDK) renderer"""
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
self._renderer = RendererGTKCairo (self.figure.dpi)
class FigureManagerGTKCairo(FigureManagerGTK):
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='classic':
toolbar = NavigationToolbar (canvas, self.window)
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2GTKCairo (canvas, self.window)
else:
toolbar = None
return toolbar
class NavigationToolbar2Cairo(NavigationToolbar2GTK):
def _get_canvas(self, fig):
return FigureCanvasGTKCairo(fig)
| gpl-3.0 |
jdanbrown/pydatalab | solutionbox/structured_data/test_mltoolbox/test_datalab_e2e.py | 5 | 7533 | # Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""Test analyze, training, and prediction.
"""
from __future__ import absolute_import
from __future__ import print_function
import json
import logging
import os
import pandas as pd
import shutil
import six
import sys
import tempfile
import unittest
from . import e2e_functions
from tensorflow.python.lib.io import file_io
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
'../../..')))
import mltoolbox.regression.linear as reglinear # noqa: E402
import google.datalab.ml as dlml # noqa: E402
class TestLinearRegression(unittest.TestCase):
"""Test linear regression works e2e locally.
Note that there should be little need for testing the other scenarios (linear
classification, dnn regression, dnn classification) as they should only
differ at training time. The training coverage of task.py is already done in
test_sd_trainer.
"""
def __init__(self, *args, **kwargs):
super(TestLinearRegression, self).__init__(*args, **kwargs)
# Log everything
self._logger = logging.getLogger('TestStructuredDataLogger')
self._logger.setLevel(logging.DEBUG)
if not self._logger.handlers:
self._logger.addHandler(logging.StreamHandler(stream=sys.stdout))
def _make_test_files(self):
"""Builds test files and folders"""
# Make the output folders
self._test_dir = tempfile.mkdtemp()
self._preprocess_output = os.path.join(self._test_dir, 'preprocess')
self._train_output = os.path.join(self._test_dir, 'train')
self._batch_predict_output = os.path.join(self._test_dir, 'batch_predict')
# Don't make train_output folder as it should not exist at training time.
os.mkdir(self._preprocess_output)
os.mkdir(self._batch_predict_output)
# Make csv files
self._csv_train_filename = os.path.join(self._test_dir,
'train_csv_data.csv')
self._csv_eval_filename = os.path.join(self._test_dir,
'eval_csv_data.csv')
self._csv_predict_filename = os.path.join(self._test_dir,
'predict_csv_data.csv')
e2e_functions.make_csv_data(self._csv_train_filename, 100, 'regression',
True)
e2e_functions.make_csv_data(self._csv_eval_filename, 100, 'regression',
True)
self._predict_num_rows = 10
e2e_functions.make_csv_data(self._csv_predict_filename,
self._predict_num_rows, 'regression', False)
# Make schema file
self._schema_filename = os.path.join(self._test_dir, 'schema.json')
e2e_functions.make_preprocess_schema(self._schema_filename, 'regression')
# Make feature file
self._input_features_filename = os.path.join(self._test_dir,
'input_features_file.json')
transforms = {
"num1": {"transform": "scale"},
"num2": {"transform": "scale", "value": 4},
"str1": {"transform": "one_hot"},
"str2": {"transform": "embedding", "embedding_dim": 3},
"target": {"transform": "target"},
"key": {"transform": "key"},
}
file_io.write_string_to_file(
self._input_features_filename,
json.dumps(transforms, indent=2))
def _run_analyze(self):
reglinear.analyze(
output_dir=self._preprocess_output,
dataset=dlml.CsvDataSet(
file_pattern=self._csv_train_filename,
schema_file=self._schema_filename))
self.assertTrue(os.path.isfile(
os.path.join(self._preprocess_output, 'stats.json')))
self.assertTrue(os.path.isfile(
os.path.join(self._preprocess_output, 'vocab_str1.csv')))
def _run_train(self):
reglinear.train(
train_dataset=dlml.CsvDataSet(
file_pattern=self._csv_train_filename,
schema_file=self._schema_filename),
eval_dataset=dlml.CsvDataSet(
file_pattern=self._csv_eval_filename,
schema_file=self._schema_filename),
analysis_dir=self._preprocess_output,
output_dir=self._train_output,
features=self._input_features_filename,
max_steps=100,
train_batch_size=100)
self.assertTrue(os.path.isfile(
os.path.join(self._train_output, 'model', 'saved_model.pb')))
self.assertTrue(os.path.isfile(
os.path.join(self._train_output, 'evaluation_model', 'saved_model.pb')))
def _run_predict(self):
data = pd.read_csv(self._csv_predict_filename,
header=None)
df = reglinear.predict(data=data,
training_dir=self._train_output)
self.assertEqual(len(df.index), self._predict_num_rows)
self.assertEqual(list(df), ['key', 'predicted'])
def _run_batch_prediction(self, output_dir, use_target):
reglinear.batch_predict(
training_dir=self._train_output,
prediction_input_file=(self._csv_eval_filename if use_target
else self._csv_predict_filename),
output_dir=output_dir,
mode='evaluation' if use_target else 'prediction',
batch_size=4,
output_format='csv')
# check errors file is empty
errors = file_io.get_matching_files(os.path.join(output_dir, 'errors*'))
self.assertEqual(len(errors), 1)
self.assertEqual(os.path.getsize(errors[0]), 0)
# check predictions files are not empty
predictions = file_io.get_matching_files(os.path.join(output_dir,
'predictions*'))
self.assertGreater(os.path.getsize(predictions[0]), 0)
# check the schema is correct
schema_file = os.path.join(output_dir, 'csv_schema.json')
self.assertTrue(os.path.isfile(schema_file))
schema = json.loads(file_io.read_file_to_string(schema_file))
self.assertEqual(schema[0]['name'], 'key')
self.assertEqual(schema[1]['name'], 'predicted')
if use_target:
self.assertEqual(schema[2]['name'], 'target')
self.assertEqual(len(schema), 3)
else:
self.assertEqual(len(schema), 2)
def _cleanup(self):
shutil.rmtree(self._test_dir)
def test_e2e(self):
try:
self._make_test_files()
self._run_analyze()
self._run_train()
if six.PY2:
# Dataflow is only supported by python 2. Prediction assumes Dataflow
# is installed.
self._run_predict()
self._run_batch_prediction(
os.path.join(self._batch_predict_output, 'with_target'),
True)
self._run_batch_prediction(
os.path.join(self._batch_predict_output, 'without_target'),
False)
else:
print('only tested analyze in TestLinearRegression')
finally:
self._cleanup()
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
eric-haibin-lin/mxnet | example/named_entity_recognition/src/ner.py | 4 | 12663 | # !/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
from collections import Counter
import itertools
import iterators
import os
import numpy as np
import pandas as pd
import mxnet as mx
import argparse
import pickle
import logging
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description="Deep neural network for multivariate time series forecasting",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data-dir', type=str, default='../data',
help='relative path to input data')
parser.add_argument('--output-dir', type=str, default='../results',
help='directory to save model files to')
parser.add_argument('--max-records', type=int, default=None,
help='total records before data split')
parser.add_argument('--train_fraction', type=float, default=0.8,
help='fraction of data to use for training. remainder used for testing.')
parser.add_argument('--batch-size', type=int, default=128,
help='the batch size.')
parser.add_argument('--buckets', type=str, default="",
help='unique bucket sizes')
parser.add_argument('--char-embed', type=int, default=25,
help='Embedding size for each unique character.')
parser.add_argument('--char-filter-list', type=str, default="3,4,5",
help='unique filter sizes for char level cnn')
parser.add_argument('--char-filters', type=int, default=20,
help='number of each filter size')
parser.add_argument('--word-embed', type=int, default=500,
help='Embedding size for each unique character.')
parser.add_argument('--word-filter-list', type=str, default="3,4,5",
help='unique filter sizes for char level cnn')
parser.add_argument('--word-filters', type=int, default=200,
help='number of each filter size')
parser.add_argument('--lstm-state-size', type=int, default=100,
help='number of hidden units in each unrolled recurrent cell')
parser.add_argument('--lstm-layers', type=int, default=1,
help='number of recurrent layers')
parser.add_argument('--gpus', type=str, default='',
help='list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu. ')
parser.add_argument('--optimizer', type=str, default='adam',
help='the optimizer type')
parser.add_argument('--lr', type=float, default=0.001,
help='initial learning rate')
parser.add_argument('--dropout', type=float, default=0.2,
help='dropout rate for network')
parser.add_argument('--num-epochs', type=int, default=100,
help='max num of epochs')
parser.add_argument('--save-period', type=int, default=20,
help='save checkpoint for every n epochs')
parser.add_argument('--model_prefix', type=str, default='electricity_model',
help='prefix for saving model params')
def save_obj(obj, name):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def save_model():
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
return mx.callback.do_checkpoint(os.path.join(args.output_dir, "checkpoint"), args.save_period)
def build_vocab(nested_list):
"""
:param nested_list: list of list of string
:return: dictionary mapping from string to int, inverse of that dictionary
"""
# Build vocabulary
word_counts = Counter(itertools.chain(*nested_list))
logging.info("build_vocab: word_counts=%d" % (len(word_counts)))
# Mapping from index to label
vocabulary_inv = [x[0] for x in word_counts.most_common()]
# Mapping from label to index
vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}
return vocabulary, vocabulary_inv
def build_iters(data_dir, max_records, train_fraction, batch_size, buckets=None):
"""
Reads a csv of sentences/tag sequences into a pandas dataframe.
Converts into X = array(list(int)) & Y = array(list(int))
Splits into training and test sets
Builds dictionaries mapping from index labels to labels/ indexed features to features
:param data_dir: directory to read in csv data from
:param max_records: total number of records to randomly select from input data
:param train_fraction: fraction of the data to use for training
:param batch_size: records in mini-batches during training
:param buckets: size of each bucket in the iterators
:return: train_iter, val_iter, word_to_index, index_to_word, pos_to_index, index_to_pos
"""
# Read in data as numpy array
df = pd.read_pickle(os.path.join(data_dir, "ner_data.pkl"))[:max_records]
# Get feature lists
entities=[list(array) for array in df["BILOU_tag"].values]
sentences = [list(array) for array in df["token"].values]
chars=[[[c for c in word] for word in sentence] for sentence in sentences]
# Build vocabularies
entity_to_index, index_to_entity = build_vocab(entities)
word_to_index, index_to_word = build_vocab(sentences)
char_to_index, index_to_char = build_vocab([np.array([c for c in word]) for word in index_to_word])
save_obj(entity_to_index, os.path.join(args.data_dir, "tag_to_index"))
# Map strings to integer values
indexed_entities=[list(map(entity_to_index.get, l)) for l in entities]
indexed_tokens=[list(map(word_to_index.get, l)) for l in sentences]
indexed_chars=[[list(map(char_to_index.get, word)) for word in sentence] for sentence in chars]
# Split into training and testing data
idx=int(len(indexed_tokens)*train_fraction)
logging.info("Preparing train/test datasets splitting at idx %d on total %d sentences using a batchsize of %d", idx, len(indexed_tokens), batch_size)
X_token_train, X_char_train, Y_train = indexed_tokens[:idx], indexed_chars[:idx], indexed_entities[:idx]
X_token_test, X_char_test, Y_test = indexed_tokens[idx:], indexed_chars[idx:], indexed_entities[idx:]
# build iterators to feed batches to network
train_iter = iterators.BucketNerIter(sentences=X_token_train, characters=X_char_train, label=Y_train,
max_token_chars=5, batch_size=batch_size, buckets=buckets)
logging.info("Creating the val_iter using %d sentences", len(X_token_test))
val_iter = iterators.BucketNerIter(sentences=X_token_test, characters=X_char_test, label=Y_test,
max_token_chars=train_iter.max_token_chars, batch_size=batch_size, buckets=train_iter.buckets)
return train_iter, val_iter, word_to_index, char_to_index, entity_to_index
def sym_gen(seq_len):
"""
Build NN symbol depending on the length of the input sequence
"""
sentence_shape = train_iter.provide_data[0][1]
char_sentence_shape = train_iter.provide_data[1][1]
entities_shape = train_iter.provide_label[0][1]
X_sent = mx.symbol.Variable(train_iter.provide_data[0].name)
X_char_sent = mx.symbol.Variable(train_iter.provide_data[1].name)
Y = mx.sym.Variable(train_iter.provide_label[0].name)
###############################
# Character embedding component
###############################
char_embeddings = mx.sym.Embedding(data=X_char_sent, input_dim=len(char_to_index), output_dim=args.char_embed, name='char_embed')
char_embeddings = mx.sym.reshape(data=char_embeddings, shape=(0,1,seq_len,-1,args.char_embed), name='char_embed2')
char_cnn_outputs = []
for i, filter_size in enumerate(args.char_filter_list):
# Kernel that slides over entire words resulting in a 1d output
convi = mx.sym.Convolution(data=char_embeddings, kernel=(1, filter_size, args.char_embed), stride=(1, 1, 1),
num_filter=args.char_filters, name="char_conv_layer_" + str(i))
acti = mx.sym.Activation(data=convi, act_type='tanh')
pooli = mx.sym.Pooling(data=acti, pool_type='max', kernel=(1, char_sentence_shape[2] - filter_size + 1, 1),
stride=(1, 1, 1), name="char_pool_layer_" + str(i))
pooli = mx.sym.transpose(mx.sym.Reshape(pooli, shape=(0, 0, 0)), axes=(0, 2, 1), name="cchar_conv_layer_" + str(i))
char_cnn_outputs.append(pooli)
# combine features from all filters & apply dropout
cnn_char_features = mx.sym.Concat(*char_cnn_outputs, dim=2, name="cnn_char_features")
regularized_cnn_char_features = mx.sym.Dropout(data=cnn_char_features, p=args.dropout, mode='training',
name='regularized charCnn features')
##################################
# Combine char and word embeddings
##################################
word_embeddings = mx.sym.Embedding(data=X_sent, input_dim=len(word_to_index), output_dim=args.word_embed, name='word_embed')
rnn_features = mx.sym.Concat(*[word_embeddings, regularized_cnn_char_features], dim=2, name='rnn input')
##############################
# Bidirectional LSTM component
##############################
# unroll the lstm cell in time, merging outputs
bi_cell.reset()
output, states = bi_cell.unroll(length=seq_len, inputs=rnn_features, merge_outputs=True)
# Map to num entity classes
rnn_output = mx.sym.Reshape(output, shape=(-1, args.lstm_state_size * 2), name='r_output')
fc = mx.sym.FullyConnected(data=rnn_output, num_hidden=len(entity_to_index), name='fc_layer')
# reshape back to same shape as loss will be
reshaped_fc = mx.sym.transpose(mx.sym.reshape(fc, shape=(-1, seq_len, len(entity_to_index))), axes=(0, 2, 1))
sm = mx.sym.SoftmaxOutput(data=reshaped_fc, label=Y, ignore_label=-1, use_ignore=True, multi_output=True, name='softmax')
return sm, [v.name for v in train_iter.provide_data], [v.name for v in train_iter.provide_label]
def train(train_iter, val_iter):
import metrics
devs = mx.cpu() if args.gpus is None or args.gpus is '' else [mx.gpu(int(i)) for i in args.gpus.split(',')]
logging.info("train on device %s using optimizer %s at learningrate %f for %d epochs using %d records: lstm_state_size=%d ...",
devs, args.optimizer, args.lr, args.num_epochs, args.max_records, args.lstm_state_size)
module = mx.mod.BucketingModule(sym_gen, train_iter.default_bucket_key, context=devs)
module.fit(train_data=train_iter,
eval_data=val_iter,
eval_metric=metrics.composite_classifier_metrics(),
optimizer=args.optimizer,
optimizer_params={'learning_rate': args.lr },
initializer=mx.initializer.Uniform(0.1),
num_epoch=args.num_epochs,
epoch_end_callback=save_model())
if __name__ == '__main__':
# parse args
args = parser.parse_args()
args.buckets = list(map(int, args.buckets.split(','))) if len(args.buckets) > 0 else None
args.char_filter_list = list(map(int, args.char_filter_list.split(',')))
# Build data iterators
train_iter, val_iter, word_to_index, char_to_index, entity_to_index = build_iters(args.data_dir, args.max_records,
args.train_fraction, args.batch_size, args.buckets)
logging.info("validation iterator: %s", val_iter)
# Define the recurrent layer
bi_cell = mx.rnn.SequentialRNNCell()
for layer_num in range(args.lstm_layers):
bi_cell.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(num_hidden=args.lstm_state_size, prefix="forward_layer_" + str(layer_num)),
mx.rnn.LSTMCell(num_hidden=args.lstm_state_size, prefix="backward_layer_" + str(layer_num))))
bi_cell.add(mx.rnn.DropoutCell(args.dropout))
train(train_iter, val_iter) | apache-2.0 |
franblas/facialrecoChallenge | itml.py | 1 | 3881 | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 18 19:04:51 2015
@author: Paco
"""
"""
Information Theoretic Metric Learning, Kulis et al., ICML 2007
"""
import numpy as np
from sklearn.metrics import pairwise_distances
from base_metric import BaseMetricLearner
class ITML(BaseMetricLearner):
"""
Information Theoretic Metric Learning (ITML)
"""
def __init__(self, gamma=1., max_iters=1000, convergence_threshold=1e-3):
"""
gamma: value for slack variables
"""
self.gamma = gamma
self.max_iters = max_iters
self.convergence_threshold = convergence_threshold
def _process_inputs(self, X, constraints, bounds, A0):
self.X = X
# check to make sure that no two constrained vectors are identical
a,b,c,d = constraints
ident = _vector_norm(self.X[a] - self.X[b]) > 1e-9
a, b = a[ident], b[ident]
ident = _vector_norm(self.X[c] - self.X[d]) > 1e-9
c, d = c[ident], d[ident]
# init bounds
if bounds is None:
self.bounds = np.percentile(pairwise_distances(X), (5, 95))
else:
assert len(bounds) == 2
self.bounds = bounds
# init metric
if A0 is None:
self.A = np.identity(X.shape[1])
else:
self.A = A0
return a,b,c,d
def fit(self, X, constraints, bounds=None, A0=None, verbose=False):
"""
X: (n x d) data matrix - each row corresponds to a single instance
constraints: tuple of arrays: (a,b,c,d) indices into X, such that:
d(X[a],X[b]) < d(X[c],X[d])
bounds: (pos,neg) pair of bounds on similarity, such that:
d(X[a],X[b]) < pos
d(X[c],X[d]) > neg
A0: [optional] (d x d) initial regularization matrix, defaults to identity
"""
a,b,c,d = self._process_inputs(X, constraints, bounds, A0)
gamma = self.gamma
num_pos = len(a)
num_neg = len(c)
_lambda = np.zeros(num_pos + num_neg)
lambdaold = np.zeros_like(_lambda)
gamma_proj = 1. if gamma is np.inf else gamma/(gamma+1.)
pos_bhat = np.zeros(num_pos) + self.bounds[0]
neg_bhat = np.zeros(num_neg) + self.bounds[1]
A = self.A
for it in xrange(self.max_iters):
# update positives
vv = self.X[a] - self.X[b]
for i,v in enumerate(vv):
wtw = v.dot(A).dot(v) # scalar
alpha = min(_lambda[i], gamma_proj*(1./wtw - 1./pos_bhat[i]))
_lambda[i] -= alpha
beta = alpha/(1 - alpha*wtw)
pos_bhat[i] = 1./((1 / pos_bhat[i]) + (alpha / gamma))
A += beta * A.dot(np.outer(v,v)).dot(A)
# update negatives
vv = self.X[c] - self.X[d]
for i,v in enumerate(vv):
wtw = v.dot(A).dot(v) # scalar
alpha = min(_lambda[i+num_pos],gamma_proj*(1./neg_bhat[i] - 1./wtw))
_lambda[i+num_pos] -= alpha
beta = -alpha/(1 + alpha*wtw)
neg_bhat[i] = 1./((1 / neg_bhat[i]) - (alpha / gamma))
A += beta * A.dot(np.outer(v,v)).dot(A)
normsum = np.linalg.norm(_lambda) + np.linalg.norm(lambdaold)
if normsum == 0:
conv = np.inf
break
conv = np.abs(lambdaold - _lambda).sum() / normsum
if conv < self.convergence_threshold:
break
lambdaold = _lambda.copy()
if verbose:
print 'itml iter: %d, conv = %f' % (it, conv)
if verbose:
print 'itml converged at iter: %d, conv = %f' % (it, conv)
return self
def metric(self):
return self.A
@classmethod
def prepare_constraints(self, labels, num_points, num_constraints):
ac,bd = np.random.randint(num_points, size=(2,num_constraints))
pos = labels[ac] == labels[bd]
a,c = ac[pos], ac[~pos]
b,d = bd[pos], bd[~pos]
return a,b,c,d
# hack around lack of axis kwarg in older numpy versions
try:
np.linalg.norm([[4]], axis=1)
except TypeError:
def _vector_norm(X):
return np.apply_along_axis(np.linalg.norm, 1, X)
else:
def _vector_norm(X):
return np.linalg.norm(X, axis=1) | mit |
spallavolu/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 248 | 2588 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
jmetzen/scikit-learn | examples/ensemble/plot_partial_dependence.py | 3 | 4833 | """
========================
Partial Dependence Plots
========================
Partial dependence plots show the dependence between the target function [2]_
and a set of 'target' features, marginalizing over the
values of all other features (the complement features). Due to the limits
of human perception the size of the target feature set must be small (usually,
one or two) thus the target features are usually chosen among the most
important features
(see :attr:`~sklearn.ensemble.GradientBoostingRegressor.feature_importances_`).
This example shows how to obtain partial dependence plots from a
:class:`~sklearn.ensemble.GradientBoostingRegressor` trained on the California
housing dataset. The example is taken from [1]_.
The plot shows four one-way and one two-way partial dependence plots.
The target variables for the one-way PDP are:
median income (`MedInc`), avg. occupants per household (`AvgOccup`),
median house age (`HouseAge`), and avg. rooms per household (`AveRooms`).
We can clearly see that the median house price shows a linear relationship
with the median income (top left) and that the house price drops when the
avg. occupants per household increases (top middle).
The top right plot shows that the house age in a district does not have
a strong influence on the (median) house price; so does the average rooms
per household.
The tick marks on the x-axis represent the deciles of the feature values
in the training data.
Partial dependence plots with two target features enable us to visualize
interactions among them. The two-way partial dependence plot shows the
dependence of median house price on joint values of house age and avg.
occupants per household. We can clearly see an interaction between the
two features:
For an avg. occupancy greater than two, the house price is nearly independent
of the house age, whereas for values less than two there is a strong dependence
on age.
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning Ed. 2", Springer, 2009.
.. [2] For classification you can think of it as the regression score before
the link function.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from six.moves.urllib.error import HTTPError
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.datasets.california_housing import fetch_california_housing
def main():
# fetch California housing dataset
try:
cal_housing = fetch_california_housing()
except HTTPError:
print("Failed downloading california housing data.")
return
# split 80/20 train-test
X_train, X_test, y_train, y_test = train_test_split(cal_housing.data,
cal_housing.target,
test_size=0.2,
random_state=1)
names = cal_housing.feature_names
print('_' * 80)
print("Training GBRT...")
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,
learning_rate=0.1, loss='huber',
random_state=1)
clf.fit(X_train, y_train)
print("done.")
print('_' * 80)
print('Convenience plot with ``partial_dependence_plots``')
print
features = [0, 5, 1, 2, (5, 1)]
fig, axs = plot_partial_dependence(clf, X_train, features, feature_names=names,
n_jobs=3, grid_resolution=50)
fig.suptitle('Partial dependence of house value on nonlocation features\n'
'for the California housing dataset')
plt.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle
print('_' * 80)
print('Custom 3d plot via ``partial_dependence``')
print
fig = plt.figure()
target_feature = (1, 5)
pdp, (x_axis, y_axis) = partial_dependence(clf, target_feature,
X=X_train, grid_resolution=50)
XX, YY = np.meshgrid(x_axis, y_axis)
Z = pdp.T.reshape(XX.shape).T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu)
ax.set_xlabel(names[target_feature[0]])
ax.set_ylabel(names[target_feature[1]])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle('Partial dependence of house value on median age and '
'average occupancy')
plt.subplots_adjust(top=0.9)
plt.show()
if __name__ == "__main__":
main()
| bsd-3-clause |
theoryno3/scikit-learn | sklearn/linear_model/tests/test_sgd.py | 13 | 43295 | import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_auto(self):
# partial_fit with class_weight='auto' not supported
assert_raises_regexp(ValueError,
"class_weight 'auto' is not supported for "
"partial_fit. In order to use 'auto' weights, "
"use compute_class_weight\('auto', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='auto').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([3, 2])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([-1, -1])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([3, 2])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([-1, -1])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([-1, -1])
d = clf.decision_function([-1, -1])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([3, 2])
p = clf.predict_proba([3, 2])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([-1, -1])
p = clf.predict_proba([-1, -1])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([3, 2])
p = clf.predict_proba([3, 2])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function(x)
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba(x)
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_auto_weight(self):
# Test class weights for imbalanced data
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using automated class_weight
clf_auto = self.factory(alpha=0.0001, n_iter=1000,
class_weight="auto", shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_auto.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "auto"
assert_array_almost_equal(clf.coef_, clf_auto.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with auto class_weight enabled
clf = self.factory(n_iter=1000, class_weight="auto", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="auto", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
| bsd-3-clause |
michigraber/scikit-learn | sklearn/datasets/mldata.py | 309 | 7838 | """Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname:
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name: optional, default: 'label'
Name or index of the column containing the target values.
data_name: optional, default: 'data'
Name or index of the column containing the data.
transpose_data: optional, default: True
If True, transpose the downloaded data array.
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the sklearn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to sklearn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by nosetests to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
| bsd-3-clause |
jian-li/rpg_svo | svo_analysis/scripts/compare_results.py | 17 | 6127 | #!/usr/bin/python
import os
import sys
import time
import rospkg
import numpy as np
import matplotlib.pyplot as plt
import yaml
import argparse
from matplotlib import rc
# tell matplotlib to use latex font
rc('font',**{'family':'serif','serif':['Cardo']})
rc('text', usetex=True)
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
def plot_trajectory(ax, filename, label, color, linewidth):
file = open(filename)
data = file.read()
lines = data.replace(","," ").replace("\t"," ").split("\n")
trajectory = np.array([[v.strip() for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"], dtype=np.float64)
ax.plot(trajectory[:,1], trajectory[:,2], label=label, color=color, linewidth=linewidth)
def compare_results(experiments, results_dir, comparison_dir,
plot_scale_drift = False):
# ------------------------------------------------------------------------------
# position error
fig_poserr = plt.figure(figsize=(8,6))
ax_poserr_x = fig_poserr.add_subplot(311, ylabel='x-error [m]')
ax_poserr_y = fig_poserr.add_subplot(312, ylabel='y-error [m]')
ax_poserr_z = fig_poserr.add_subplot(313, ylabel='z-error [m]', xlabel='time [s]')
for exp in experiments:
# load dataset parameters
params_stream = open(os.path.join(results_dir, exp, 'params.yaml'))
params = yaml.load(params_stream)
# plot translation error
trans_error = np.loadtxt(os.path.join(results_dir, exp, 'translation_error.txt'))
trans_error[:,0] = trans_error[:,0]-trans_error[0,0]
ax_poserr_x.plot(trans_error[:,0], trans_error[:,1], label=params['experiment_label'])
ax_poserr_y.plot(trans_error[:,0], trans_error[:,2])
ax_poserr_z.plot(trans_error[:,0], trans_error[:,3])
ax_poserr_x.set_xlim([0, trans_error[-1,0]+4])
ax_poserr_y.set_xlim([0, trans_error[-1,0]+4])
ax_poserr_z.set_xlim([0, trans_error[-1,0]+4])
ax_poserr_x.legend(bbox_to_anchor=[0, 0], loc='lower left', ncol=3)
ax_poserr_x.grid()
ax_poserr_y.grid()
ax_poserr_z.grid()
fig_poserr.tight_layout()
fig_poserr.savefig(os.path.join(comparison_dir, 'translation_error.pdf'))
# ------------------------------------------------------------------------------
# orientation error
fig_roterr = plt.figure(figsize=(8,6))
ax_roterr_r = fig_roterr.add_subplot(311, ylabel='roll-error [rad]')
ax_roterr_p = fig_roterr.add_subplot(312, ylabel='pitch-error [rad]')
ax_roterr_y = fig_roterr.add_subplot(313, ylabel='yaw-error [rad]', xlabel='time [s]')
for exp in experiments:
# load dataset parameters
params_stream = open(os.path.join(results_dir, exp, 'params.yaml'))
params = yaml.load(params_stream)
# plot translation error
rot_error = np.loadtxt(os.path.join(results_dir, exp, 'orientation_error.txt'))
rot_error[:,0] = rot_error[:,0]-rot_error[0,0]
ax_roterr_r.plot(rot_error[:,0], rot_error[:,3], label=params['experiment_label'])
ax_roterr_p.plot(rot_error[:,0], rot_error[:,2])
ax_roterr_y.plot(rot_error[:,0], rot_error[:,1])
ax_roterr_r.set_xlim([0, rot_error[-1,0]+4])
ax_roterr_p.set_xlim([0, rot_error[-1,0]+4])
ax_roterr_y.set_xlim([0, rot_error[-1,0]+4])
ax_roterr_r.legend(bbox_to_anchor=[0, 1], loc='upper left', ncol=3)
ax_roterr_r.grid()
ax_roterr_p.grid()
ax_roterr_y.grid()
fig_roterr.tight_layout()
fig_roterr.savefig(os.path.join(comparison_dir, 'orientation_error.pdf'))
# ------------------------------------------------------------------------------
# scale error
if plot_scale_drift:
fig_scale = plt.figure(figsize=(8,2.5))
ax_scale = fig_scale.add_subplot(111, xlabel='time [s]', ylabel='scale change [\%]')
for exp in experiments:
# load dataset parameters
params = yaml.load(open(os.path.join(results_dir, exp, 'params.yaml')))
# plot translation error
scale_drift = open(os.path.join(results_dir, exp, 'scale_drift.txt'))
scale_drift[:,0] = scale_drift[:,0]-scale_drift[0,0]
ax_scale.plot(scale_drift[:,0], scale_drift[:,1], label=params['experiment_label'])
ax_scale.set_xlim([0, rot_error[-1,0]+4])
ax_scale.legend(bbox_to_anchor=[0, 1], loc='upper left', ncol=3)
ax_scale.grid()
fig_scale.tight_layout()
fig_scale.savefig(os.path.join(comparison_dir, 'scale_drift.pdf'))
# ------------------------------------------------------------------------------
# trajectory
# fig_traj = plt.figure(figsize=(8,4.8))
# ax_traj = fig_traj.add_subplot(111, xlabel='x [m]', ylabel='y [m]', aspect='equal', xlim=[-3.1, 4], ylim=[-1.5, 2.6])
#
# plotTrajectory(ax_traj, '/home/cforster/Datasets/asl_vicon_d2/groundtruth_filtered.txt', 'Groundtruth', 'k', 1.5)
# plotTrajectory(ax_traj, results_dir+'/20130911_2229_nslam_i7_asl2_fast/traj_estimate_rotated.txt', 'Fast', 'g', 1)
# plotTrajectory(ax_traj, results_dir+'/20130906_2149_ptam_i7_asl2/traj_estimate_rotated.txt', 'PTAM', 'r', 1)
#
# mark_inset(ax_traj, axins, loc1=2, loc2=4, fc="none", ec='b')
# plt.draw()
# plt.show()
# ax_traj.legend(bbox_to_anchor=[1, 0], loc='lower right', ncol=3)
# ax_traj.grid()
# fig_traj.tight_layout()
# fig_traj.savefig('../results/trajectory_asl.pdf')
if __name__ == '__main__':
default_name = time.strftime("%Y%m%d_%H%M", time.localtime())+'_comparison'
parser = argparse.ArgumentParser(description='Compare results.')
parser.add_argument('result_directories', nargs='+', help='list of result directories to compare')
parser.add_argument('--name', help='name of the comparison', default=default_name)
args = parser.parse_args()
# create folder for comparison results
results_dir = os.path.join(rospkg.RosPack().get_path('svo_analysis'), 'results')
comparison_dir = os.path.join(results_dir, args.name)
if not os.path.exists(comparison_dir):
os.makedirs(comparison_dir)
# run comparison
compare_results(args.result_directories, results_dir, comparison_dir)
| gpl-3.0 |
jdavidrcamacho/Tests_GP | 02 - Programs being tested/RV_function.py | 1 | 3615 | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 3 11:36:58 2017
@author: camacho
"""
import numpy as np
import matplotlib.pyplot as pl
pl.close("all")
##### RV FUNCTION 1 - circular orbit
def RV_circular(P=365,K=0.1,T=0,gamma=0,time=100,space=20):
#parameters
#P = period in days
#K = semi-amplitude of the signal
#T = velocity at zero phase
#gamma = average velocity of the star
#time = time of the simulation
#space => I want an observation every time/space days
t=np.linspace(0,time,space)
RV=[K*np.sin(2*np.pi*x/P - T) + gamma for x in t]
RV=[x for x in RV] #m/s
return [t,RV]
##### RV FUNCTION 2 - keplerian orbit
def RV_kepler(P=365,e=0,K=0.1,T=0,gamma=0,w=np.pi,time=100,space=1000):
#parameters
#P = period in days
#e = eccentricity
#K = RV amplitude
#gamma = constant system RV
#T = zero phase
#w = longitude of the periastron
#time = time of the simulation
#space => I want an observation every time/space days
t=np.linspace(0,time,space)
#mean anomaly
Mean_anom=[2*np.pi*(x1-T)/P for x1 in t]
#eccentric anomaly -> E0=M + e*sin(M) + 0.5*(e**2)*sin(2*M)
E0=[x + e*np.sin(x) + 0.5*(e**2)*np.sin(2*x) for x in Mean_anom]
#mean anomaly -> M0=E0 - e*sin(E0)
M0=[x - e*np.sin(x) for x in E0]
i=0
while i<100:
#[x + y for x, y in zip(first, second)]
calc_aux=[x2-y for x2,y in zip(Mean_anom,M0)]
E1=[x3 + y/(1-e*np.cos(x3)) for x3,y in zip(E0,calc_aux)]
M1=[x4 - e*np.sin(x4) for x4 in E0]
i+=1
E0=E1
M0=M1
nu=[2*np.arctan(np.sqrt((1+e)/(1-e))*np.tan(x5/2)) for x5 in E0]
RV=[ gamma + K*(e*np.cos(w)+np.cos(w+x6)) for x6 in nu]
RV=[x for x in RV] #m/s
return t,RV
#Examples
#a=RV_circular()
#pl.figure('RV_circular with P=365')
#pl.plot(a[0],a[1],':',)
#pl.title('planet of 365 days orbit')
#pl.xlabel('time')
#pl.ylabel('RV (Km/s)')
#b=RV_circular(P=100)
#pl.figure('RV_circular with P=100')
#pl.title('planet of 100 days orbit')
#pl.plot(b[0],b[1],':',)
#pl.xlabel('time')
#pl.ylabel('RV (Km/s)')
#c=RV_kepler(P=100,e=0,w=np.pi,time=100)
#pl.figure()
#pl.plot(c[0],c[1],':',)
#pl.title('P=100, e=0, w=pi, time=100')
#pl.xlabel('time')
#pl.ylabel('RV (Km/s)')
#d1=RV_kepler(P=100,e=0, w=0,time=500)
#pl.figure()
#pl.title('P=100, e=0, w=pi, time=25')
#pl.plot(d[0],d[1],'-',)
#pl.xlabel('time')
#pl.ylabel('RV (Km/s)')
#d2=RV_kepler(P=100,e=0, w=np.pi,time=500)
#pl.figure()
#pl.title('P=100, e=0, w=pi, time=25')
#pl.plot(d[0],d[1],'-',)
#pl.xlabel('time')
#pl.ylabel('RV (Km/s)')
#d3=RV_kepler(P=100,e=0.5, w=np.pi,time=500)
#pl.figure()
#pl.title('P=100, e=0, w=pi, time=25')
#pl.plot(d[0],d[1],'-',)
#pl.xlabel('time')
#pl.ylabel('RV (Km/s)')
#d4=RV_kepler(P=100,e=0.5, w=np.pi/2,time=500)
#pl.figure()
#pl.title('P=100, e=0, w=pi, time=25')
#pl.plot(d[0],d[1],'-',)
#pl.xlabel('time')
#pl.ylabel('RV (Km/s)')
d1=RV_kepler(P=100,e=0, w=0,time=500)
d2=RV_kepler(P=100,e=0.5, w=0,time=500)
d3=RV_kepler(P=100,e=0.5, w=np.pi,time=500)
d4=RV_kepler(P=100,e=0.5, w=np.pi/2,time=500)
# Four axes, returned as a 2-d array
f, axarr = pl.subplots(2, 2)
axarr[0, 0].plot(d1[0],d1[1])
axarr[0, 0].set_title('e=0 and w=0')
axarr[0, 1].plot(d2[0],d2[1])
axarr[0, 1].set_title('e=0.5, w=0')
axarr[1, 0].plot(d3[0],d3[1])
axarr[1, 0].set_title('e=0.5, w=pi')
axarr[1, 1].plot(d4[0],d4[1])
axarr[1, 1].set_title('e=0.5, w=pi/2')
#pl.setp(pl.xticks(fontsize = 18) for a in axarr[0,:])#pl.yticks(fontsize=18))
pl.setp([a.get_xticklabels() for a in axarr[0, :]], visible=False) | mit |
IamJeffG/geopandas | geopandas/plotting.py | 1 | 13216 | from __future__ import print_function
import warnings
import numpy as np
from six import next
from six.moves import xrange
from shapely.geometry import Polygon
def plot_polygon(ax, poly, facecolor='red', edgecolor='black', alpha=0.5, linewidth=1.0, **kwargs):
""" Plot a single Polygon geometry """
from descartes.patch import PolygonPatch
a = np.asarray(poly.exterior)
if poly.has_z:
poly = Polygon(zip(*poly.exterior.xy))
# without Descartes, we could make a Patch of exterior
ax.add_patch(PolygonPatch(poly, facecolor=facecolor, linewidth=0, alpha=alpha)) # linewidth=0 because boundaries are drawn separately
ax.plot(a[:, 0], a[:, 1], color=edgecolor, linewidth=linewidth, **kwargs)
for p in poly.interiors:
x, y = zip(*p.coords)
ax.plot(x, y, color=edgecolor, linewidth=linewidth)
def plot_multipolygon(ax, geom, facecolor='red', edgecolor='black', alpha=0.5, linewidth=1.0, **kwargs):
""" Can safely call with either Polygon or Multipolygon geometry
"""
if geom.type == 'Polygon':
plot_polygon(ax, geom, facecolor=facecolor, edgecolor=edgecolor, alpha=alpha, linewidth=linewidth, **kwargs)
elif geom.type == 'MultiPolygon':
for poly in geom.geoms:
plot_polygon(ax, poly, facecolor=facecolor, edgecolor=edgecolor, alpha=alpha, linewidth=linewidth, **kwargs)
def plot_linestring(ax, geom, color='black', linewidth=1.0, **kwargs):
""" Plot a single LineString geometry """
a = np.array(geom)
ax.plot(a[:, 0], a[:, 1], color=color, linewidth=linewidth, **kwargs)
def plot_multilinestring(ax, geom, color='red', linewidth=1.0, **kwargs):
""" Can safely call with either LineString or MultiLineString geometry
"""
if geom.type == 'LineString':
plot_linestring(ax, geom, color=color, linewidth=linewidth, **kwargs)
elif geom.type == 'MultiLineString':
for line in geom.geoms:
plot_linestring(ax, line, color=color, linewidth=linewidth, **kwargs)
def plot_point(ax, pt, marker='o', markersize=2, color='black', **kwargs):
""" Plot a single Point geometry """
ax.plot(pt.x, pt.y, marker=marker, markersize=markersize, color=color, **kwargs)
def gencolor(N, colormap='Set1'):
"""
Color generator intended to work with one of the ColorBrewer
qualitative color scales.
Suggested values of colormap are the following:
Accent, Dark2, Paired, Pastel1, Pastel2, Set1, Set2, Set3
(although any matplotlib colormap will work).
"""
from matplotlib import cm
# don't use more than 9 discrete colors
n_colors = min(N, 9)
cmap = cm.get_cmap(colormap, n_colors)
colors = cmap(range(n_colors))
for i in xrange(N):
yield colors[i % n_colors]
def plot_series(s, cmap='Set1', color=None, ax=None, linewidth=1.0,
figsize=None, **color_kwds):
""" Plot a GeoSeries
Generate a plot of a GeoSeries geometry with matplotlib.
Parameters
----------
Series
The GeoSeries to be plotted. Currently Polygon,
MultiPolygon, LineString, MultiLineString and Point
geometries can be plotted.
cmap : str (default 'Set1')
The name of a colormap recognized by matplotlib. Any
colormap will work, but categorical colormaps are
generally recommended. Examples of useful discrete
colormaps include:
Accent, Dark2, Paired, Pastel1, Pastel2, Set1, Set2, Set3
color : str (default None)
If specified, all objects will be colored uniformly.
ax : matplotlib.pyplot.Artist (default None)
axes on which to draw the plot
linewidth : float (default 1.0)
Line width for geometries.
figsize : pair of floats (default None)
Size of the resulting matplotlib.figure.Figure. If the argument
ax is given explicitly, figsize is ignored.
**color_kwds : dict
Color options to be passed on to the actual plot function
Returns
-------
matplotlib axes instance
"""
if 'colormap' in color_kwds:
warnings.warn("'colormap' is deprecated, please use 'cmap' instead "
"(for consistency with matplotlib)", FutureWarning)
cmap = color_kwds.pop('colormap')
if 'axes' in color_kwds:
warnings.warn("'axes' is deprecated, please use 'ax' instead "
"(for consistency with pandas)", FutureWarning)
ax = color_kwds.pop('axes')
import matplotlib.pyplot as plt
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
ax.set_aspect('equal')
color_generator = gencolor(len(s), colormap=cmap)
for geom in s:
if color is None:
col = next(color_generator)
else:
col = color
if geom.type == 'Polygon' or geom.type == 'MultiPolygon':
if 'facecolor' in color_kwds:
plot_multipolygon(ax, geom, linewidth=linewidth, **color_kwds)
else:
plot_multipolygon(ax, geom, facecolor=col, linewidth=linewidth, **color_kwds)
elif geom.type == 'LineString' or geom.type == 'MultiLineString':
plot_multilinestring(ax, geom, color=col, linewidth=linewidth, **color_kwds)
elif geom.type == 'Point':
plot_point(ax, geom, color=col, **color_kwds)
plt.draw()
return ax
def plot_dataframe(s, column=None, cmap=None, color=None, linewidth=1.0,
categorical=False, legend=False, ax=None,
scheme=None, k=5, vmin=None, vmax=None, figsize=None,
**color_kwds):
""" Plot a GeoDataFrame
Generate a plot of a GeoDataFrame with matplotlib. If a
column is specified, the plot coloring will be based on values
in that column. Otherwise, a categorical plot of the
geometries in the `geometry` column will be generated.
Parameters
----------
GeoDataFrame
The GeoDataFrame to be plotted. Currently Polygon,
MultiPolygon, LineString, MultiLineString and Point
geometries can be plotted.
column : str (default None)
The name of the column to be plotted.
categorical : bool (default False)
If False, cmap will reflect numerical values of the
column being plotted. For non-numerical columns (or if
column=None), this will be set to True.
cmap : str (default 'Set1')
The name of a colormap recognized by matplotlib.
color : str (default None)
If specified, all objects will be colored uniformly.
linewidth : float (default 1.0)
Line width for geometries.
legend : bool (default False)
Plot a legend (Experimental; currently for categorical
plots only)
ax : matplotlib.pyplot.Artist (default None)
axes on which to draw the plot
scheme : pysal.esda.mapclassify.Map_Classifier
Choropleth classification schemes (requires PySAL)
k : int (default 5)
Number of classes (ignored if scheme is None)
vmin : None or float (default None)
Minimum value of cmap. If None, the minimum data value
in the column to be plotted is used.
vmax : None or float (default None)
Maximum value of cmap. If None, the maximum data value
in the column to be plotted is used.
figsize
Size of the resulting matplotlib.figure.Figure. If the argument
axes is given explicitly, figsize is ignored.
**color_kwds : dict
Color options to be passed on to the actual plot function
Returns
-------
matplotlib axes instance
"""
if 'colormap' in color_kwds:
warnings.warn("'colormap' is deprecated, please use 'cmap' instead "
"(for consistency with matplotlib)", FutureWarning)
cmap = color_kwds.pop('colormap')
if 'axes' in color_kwds:
warnings.warn("'axes' is deprecated, please use 'ax' instead "
"(for consistency with pandas)", FutureWarning)
ax = color_kwds.pop('axes')
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.colors import Normalize
from matplotlib import cm
if column is None:
return plot_series(s.geometry, cmap=cmap, color=color,
ax=ax, linewidth=linewidth, figsize=figsize,
**color_kwds)
else:
if s[column].dtype is np.dtype('O'):
categorical = True
if categorical:
if cmap is None:
cmap = 'Set1'
categories = list(set(s[column].values))
categories.sort()
valuemap = dict([(k, v) for (v, k) in enumerate(categories)])
values = [valuemap[k] for k in s[column]]
else:
values = s[column]
if scheme is not None:
binning = __pysal_choro(values, scheme, k=k)
values = binning.yb
# set categorical to True for creating the legend
categorical = True
binedges = [binning.yb.min()] + binning.bins.tolist()
categories = ['{0:.2f} - {1:.2f}'.format(binedges[i], binedges[i+1])
for i in range(len(binedges)-1)]
cmap = norm_cmap(values, cmap, Normalize, cm, vmin=vmin, vmax=vmax)
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
ax.set_aspect('equal')
for geom, value in zip(s.geometry, values):
if color is None:
col = cmap.to_rgba(value)
else:
col = color
if geom.type == 'Polygon' or geom.type == 'MultiPolygon':
plot_multipolygon(ax, geom, facecolor=col, linewidth=linewidth, **color_kwds)
elif geom.type == 'LineString' or geom.type == 'MultiLineString':
plot_multilinestring(ax, geom, color=col, linewidth=linewidth, **color_kwds)
elif geom.type == 'Point':
plot_point(ax, geom, color=col, **color_kwds)
if legend:
if categorical:
patches = []
for value, cat in enumerate(categories):
patches.append(Line2D([0], [0], linestyle="none",
marker="o", alpha=color_kwds.get('alpha', 0.5),
markersize=10, markerfacecolor=cmap.to_rgba(value)))
ax.legend(patches, categories, numpoints=1, loc='best')
else:
# TODO: show a colorbar
raise NotImplementedError
plt.draw()
return ax
def __pysal_choro(values, scheme, k=5):
""" Wrapper for choropleth schemes from PySAL for use with plot_dataframe
Parameters
----------
values
Series to be plotted
scheme
pysal.esda.mapclassify classificatin scheme
['Equal_interval'|'Quantiles'|'Fisher_Jenks']
k
number of classes (2 <= k <=9)
Returns
-------
binning
Binning objects that holds the Series with values replaced with
class identifier and the bins.
"""
try:
from pysal.esda.mapclassify import Quantiles, Equal_Interval, Fisher_Jenks
schemes = {}
schemes['equal_interval'] = Equal_Interval
schemes['quantiles'] = Quantiles
schemes['fisher_jenks'] = Fisher_Jenks
s0 = scheme
scheme = scheme.lower()
if scheme not in schemes:
scheme = 'quantiles'
warnings.warn('Unrecognized scheme "{0}". Using "Quantiles" '
'instead'.format(s0), UserWarning, stacklevel=3)
if k < 2 or k > 9:
warnings.warn('Invalid k: {0} (2 <= k <= 9), setting k=5 '
'(default)'.format(k), UserWarning, stacklevel=3)
k = 5
binning = schemes[scheme](values, k)
return binning
except ImportError:
raise ImportError("PySAL is required to use the 'scheme' keyword")
def norm_cmap(values, cmap, normalize, cm, vmin=None, vmax=None):
""" Normalize and set colormap
Parameters
----------
values
Series or array to be normalized
cmap
matplotlib Colormap
normalize
matplotlib.colors.Normalize
cm
matplotlib.cm
vmin
Minimum value of colormap. If None, uses min(values).
vmax
Maximum value of colormap. If None, uses max(values).
Returns
-------
n_cmap
mapping of normalized values to colormap (cmap)
"""
mn = min(values) if vmin is None else vmin
mx = max(values) if vmax is None else vmax
norm = normalize(vmin=mn, vmax=mx)
n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap)
return n_cmap
| bsd-3-clause |
markovmodel/molPX | molpx/_linkutils.py | 1 | 18471 | import numpy as _np
from matplotlib.widgets import AxesWidget as _AxesWidget
from matplotlib.colors import is_color_like as _is_color_like
from matplotlib.axes import Axes as _mplAxes
from matplotlib.figure import Figure as _mplFigure
from IPython.display import display as _ipydisplay
from pyemma.util.types import is_int as _is_int
from scipy.spatial import cKDTree as _cKDTree
from ._bmutils import get_ascending_coord_idx
from mdtraj import Trajectory as _mdTrajectory
from nglview import NGLWidget as _NGLwdg
from ipywidgets import HBox as _HBox, VBox as _VBox
def pts_per_axis_unit(mplax, pt_per_inch=72):
r"""
Return how many pt per axis unit of a given maptplotlib axis a figure has
Parameters
----------
mplax : :obj:`matplotlib.axes._subplots.AxesSubplot`
pt_per_inch : how many points are in an inch (this number should not change)
Returns
--------
pt_per_xunit, pt_per_yunit
"""
# matplotlib voodoo
# Get bounding box
bbox = mplax.get_window_extent().transformed(mplax.get_figure().dpi_scale_trans.inverted())
span_inch = _np.array([bbox.width, bbox.height], ndmin=2).T
span_units = [mplax.get_xlim(), mplax.get_ylim()]
span_units = _np.diff(span_units, axis=1)
inch_per_unit = span_inch / span_units
return inch_per_unit * pt_per_inch
def update2Dlines(iline, x, y):
"""
provide a common interface to update objects on the plot to a new position (x,y) depending
on whether they are hlines, vlines, dots etc
Parameters
----------
iline: :obj:`matplotlib.lines.Line2D` object
x : float with new position
y : float with new position
"""
# TODO FIND OUT A CLEANER WAY TO DO THIS (dict or class)
if not hasattr(iline,'whatisthis'):
raise AttributeError("This method will only work if iline has the attribute 'whatsthis'")
else:
# TODO find cleaner way of distinguishing these 2Dlines
if iline.whatisthis in ['dot']:
iline.set_xdata((x))
iline.set_ydata((y))
elif iline.whatisthis in ['lineh']:
iline.set_ydata((y,y))
elif iline.whatisthis in ['linev']:
iline.set_xdata((x,x))
else:
# TODO: FIND OUT WNY EXCEPTIONS ARE NOT BEING RAISED
raise TypeError("what is this type of 2Dline?")
class ClickOnAxisListener(object):
def __init__(self, ngl_wdg, crosshairs, showclick_objs, ax, pos,
list_mpl_objects_to_update):
self.ngl_wdg = ngl_wdg
self.crosshairs = crosshairs
self.showclick_objs = showclick_objs
self.ax = ax
self.pos = pos
self.list_mpl_objects_to_update = list_mpl_objects_to_update
self.list_of_dots = [None]*self.pos.shape[0]
self.fig_size = self.ax.figure.get_size_inches()
self.kdtree = None
def build_tree(self):
# Use ax.transData to compute distance in pixels
# regardelss of the axes units (http://matplotlib.org/users/transforms_tutorial.html)
# Corresponds to the visual distance between clicked point and target point
self.kdtree = _cKDTree(self.ax.transData.transform(self.pos))
@property
def figure_changed_size(self):
return not _np.allclose(self.fig_size, self.ax.figure.get_size_inches())
def __call__(self, event):
# Wait for the first click or a a figsize change
# to build the kdtree
if self.figure_changed_size or self.kdtree is None:
self.build_tree()
self.fig_size = self.ax.figure.get_size_inches()
# Was the click inside the bounding box?
if self.ax.get_window_extent().contains(event.x, event.y):
if self.crosshairs:
for iline in self.showclick_objs:
update2Dlines(iline, event.xdata, event.ydata)
_, index = self.kdtree.query(x=[event.x, event.y], k=1)
for idot in self.list_mpl_objects_to_update:
update2Dlines(idot, self.pos[index, 0], self.pos[index, 1])
self.ngl_wdg.isClick = True
if hasattr(self.ngl_wdg, '_GeomsInWid'):
# We're in a sticky situation
if event.button == 1:
# Pressed left
self.ngl_wdg._GeomsInWid[index].show()
if self.list_of_dots[index] is None:
# Plot and store the dot in case there wasn't
self.list_of_dots[index] = self.ax.plot(self.pos[index, 0], self.pos[index, 1], 'o',
c=self.ngl_wdg._GeomsInWid[index].color_dot, ms=7)[0]
elif event.button in [2, 3]:
# Pressed right or middle
self.ngl_wdg._GeomsInWid[index].hide()
# Delete dot if the geom is not visible anymore
if not self.ngl_wdg._GeomsInWid[index].is_visible() and self.list_of_dots[index] is not None:
self.list_of_dots[index].remove()
self.list_of_dots[index] = None
else:
# We're not sticky, just go to the frame
self.ngl_wdg.frame = index
class MolPXBox(object):
r"""
Class created to be the parent class of MolPXHBox and MolPXVBox, which inherit from
MolPXBox and the ipywidget classes HBox and VBox (*args and **kwargs are for these)
The sole purpose of this class is to avoid monkey-patching elsewhere in the code,
this class creates them as empty lists on instantiation.
It also implements two methods:
* self.display (=IPython.display(self)
* append_if_existing
"""
def __init__(self, *args, **kwargs):
self.linked_axes = []
self.linked_mdgeoms = []
self.linked_ngl_wdgs = []
self.linked_data_arrays = []
self.linked_ax_wdgs = []
self.linked_figs = []
def display(self):
_ipydisplay(self)
def append_if_existing(self, args0, startswith_arg="linked_"):
r"""
args0 is the tuple containing all widgets to be included in the MolPXBox
this tuple can contain itself other MolPXWidget
so we iterate through them and appending linked stuff
"""
for iarg in args0:
for attrname in dir(iarg):
if attrname.startswith(startswith_arg) and len(iarg.__dict__[attrname]) != 0:
self.__dict__[attrname] += iarg.__dict__[attrname]
def auto_append_these_mpx_attrs(iobj, *attrs):
r""" The attribute s name is automatically derived
from the attribute s type via a type:name dictionary
*attrs : any number of unnamed objects of the types in type2attrname.
If the object type is a list, it will be flattened prior to attempting
"""
attrs_flat_list = []
for sublist in attrs:
if isinstance(sublist, list):
for item in sublist:
attrs_flat_list.append(item)
else:
attrs_flat_list.append(sublist)
# Go through the arguments and assign them an attrname according to their types
for iattr in attrs_flat_list:
for attrname, itype in type2attrname.items():
if isinstance(iattr, itype):
iobj.__dict__[attrname].append(iattr)
break
class MolPXHBox(_HBox, MolPXBox):
def __init__(self, *args, **kwargs):
super(MolPXHBox, self).__init__(*args, **kwargs)
self.append_if_existing(args[0])
class MolPXVBox(_VBox, MolPXBox):
def __init__(self, *args, **kwargs):
super(MolPXVBox, self).__init__(*args, **kwargs)
self.append_if_existing(args[0])
type2attrname = {"linked_axes": _mplAxes,
"linked_mdgeoms": _mdTrajectory,
"linked_ngl_wdgs": _NGLwdg,
"linked_data_arrays": _np.ndarray,
"linked_ax_wdgs": _AxesWidget,
"linked_figs": _mplFigure,
}
class ChangeInNGLWidgetListener(object):
def __init__(self, ngl_wdg, list_mpl_objects_to_update, pos):
self.ngl_wdg = ngl_wdg
self.list_mpl_objects_to_update = list_mpl_objects_to_update
self.pos = pos
def __call__(self, change):
self.ngl_wdg.isClick = False
_idx = change["new"]
try:
for idot in self.list_mpl_objects_to_update:
update2Dlines(idot, self.pos[_idx, 0], self.pos[_idx, 1])
#print("caught index error with index %s (new=%s, old=%s)" % (_idx, change["new"], change["old"]))
except IndexError as e:
for idot in self.list_mpl_objects_to_update:
update2Dlines(idot, self.pos[0, 0], self.pos[0, 1])
print("caught index error with index %s (new=%s, old=%s)" % (_idx, change["new"], change["old"]))
#print("set xy = (%s, %s)" % (x[_idx], y[_idx]))
class GeometryInNGLWidget(object):
r"""
returns an object that is aware of where its geometries are located in the NGLWidget their representation status
The object exposes two methods, show and hide, to automagically know what to do
"""
def __init__(self, geom, ngl_wdg, list_of_repr_dicts=None,
color_molecule_hex='Element', n_small=10):
self.lives_at_components = []
self.geom = geom
self.ngl_wdg = ngl_wdg
self.have_repr = []
sticky_rep = 'cartoon'
if self.geom[0].top.n_residues < n_small:
sticky_rep = 'ball+stick'
if list_of_repr_dicts is None:
list_of_repr_dicts = [{'repr_type': sticky_rep, 'selection': 'all'}]
self.list_of_repr_dicts = list_of_repr_dicts
self.color_molecule_hex = color_molecule_hex
self.color_dot = color_molecule_hex
if isinstance(self.color_molecule_hex, str) and color_molecule_hex == 'Element':
self.color_dot = 'red'
def show(self):
# Show can mean either
# - add a whole new component (case 1)
# - add the representation again to a representation-less component (case 2)
# CASE 1
if self.is_empty() or self.all_reps_are_on():
if len(self.have_repr) == self.geom.n_frames:
print("arrived at the end")
component = None
else:
idx = len(self.have_repr)
self.ngl_wdg.add_trajectory(self.geom[idx])
self.lives_at_components.append(len(self.ngl_wdg._ngl_component_ids) - 1)
self.ngl_wdg.clear_representations(component=self.lives_at_components[-1])
self.have_repr.append(True)
component = self.lives_at_components[-1]
# CASE 2
elif self.any_rep_is_off(): # Some are living in the widget already but have no rep
idx = _np.argwhere(~_np.array(self.have_repr))[0].squeeze()
component = self.lives_at_components[idx]
self.have_repr[idx] = True
else:
raise Exception("This situation should not arise. This is a bug")
if component is not None:
for irepr in self.list_of_repr_dicts:
self.ngl_wdg.add_representation(irepr['repr_type'],
selection=irepr['selection'],
component=component,
color=self.color_molecule_hex)
def hide(self):
if self.is_empty() or self.all_reps_are_off():
print("nothing to hide")
pass
elif self.any_rep_is_on(): # There's represented components already in the widget
idx = _np.argwhere(self.have_repr)[-1].squeeze()
self.ngl_wdg.clear_representations(component=self.lives_at_components[idx])
self.have_repr[idx] = False
else:
raise Exception("This situation should not arise. This is a bug")
# Quickhand methods for knowing what's up
def is_empty(self):
if len(self.have_repr) == 0:
return True
else:
return False
def all_reps_are_off(self):
if len(self.have_repr) == 0:
return True
else:
return _np.all(~_np.array(self.have_repr))
def all_reps_are_on(self):
if len(self.have_repr) == 0:
return False
else:
return _np.all(self.have_repr)
def any_rep_is_off(self):
return _np.any(~_np.array(self.have_repr))
def any_rep_is_on(self):
return _np.any(self.have_repr)
def is_visible(self):
if self.is_empty() or self.all_reps_are_off():
return False
else:
return True
def link_ax_w_pos_2_nglwidget(ax, pos, ngl_wdg,
crosshairs=True,
dot_color='red',
band_width=None,
radius=False,
directionality=None,
exclude_coord=None,
):
r"""
Initial idea for this function comes from @arose, the rest is @gph82
Parameters
----------
ax : matplotlib axis object to be linked
pos : ndarray of shape (N,2) with the positions of the geoms in the ngl_wdg
crosshairs : Boolean or str
If True, a crosshair will show where the mouse-click ocurred. If 'h' or 'v', only the horizontal or
vertical line of the crosshair will be shown, respectively. If False, no crosshair will appear
dot_color : Anything that yields matplotlib.colors.is_color_like(dot_color)==True
Default is 'red'. dot_color='None' yields no dot
band_width : None or iterable of len = 2
If band_width is not None, the method tries to figure out on its own if
there is an ascending coordinate and will include a moving band on :obj:ax
of this width (in units of the axis along which the band is plotted)
If the method cannot find an ascending coordinate, an exception is thrown
directionality : str or None, default is None
If not None, directionality can be either 'a2w' or 'w2a', meaning that connectivity
between axis and widget will be only established as
* 'a2w' : action in axis triggers action in widget, but not the other way around
* 'w2a' : action in widget triggers action in axis, but not the other way around
exclude_coord : None or int , default is None
The excluded coordinate will not be considered when computing the nearest-point-to-click.
Typical use case is for visualize.traj to only compute distances horizontally along the time axis
Returns
-------
axes_widget : :obj:`matplotlib.Axes.Axeswidget` that has been linked to the NGLWidget
"""
assert directionality in [None, 'a2w', 'w2a'], "The directionality parameter has to be in [None, 'a2w', 'w2a'] " \
"not %s"%directionality
assert crosshairs in [True, False, 'h', 'v'], "The crosshairs parameter has to be in [True, False, 'h','v'], " \
"not %s" % crosshairs
ipos = _np.copy(pos)
if _is_int(exclude_coord):
ipos[:,exclude_coord] = 0
# Are we in a sticky situation?
if hasattr(ngl_wdg, '_GeomsInWid'):
sticky = True
else:
assert ngl_wdg.trajectory_0.n_frames == pos.shape[0], \
("Mismatching frame numbers %u vs %u" % (ngl_wdg.trajectory_0.n_frames, pos.shape[0]))
sticky = False
# Basic interactive objects
showclick_objs = []
if crosshairs in [True, 'h']:
lineh = ax.axhline(ax.get_ybound()[0], c="black", ls='--')
setattr(lineh, 'whatisthis', 'lineh')
showclick_objs.append(lineh)
if crosshairs in [True, 'v']:
linev = ax.axvline(ax.get_xbound()[0], c="black", ls='--')
setattr(linev, 'whatisthis', 'linev')
showclick_objs.append(linev)
if _is_color_like(dot_color):
pass
else:
raise TypeError('dot_color should be a matplotlib color')
dot = ax.plot(pos[0,0],pos[0,1], 'o', c=dot_color, ms=7, zorder=100)[0]
setattr(dot,'whatisthis','dot')
list_mpl_objects_to_update = [dot]
# Other objects, related to smoothing options
if band_width is not None:
if radius:
band_width_in_pts = int(_np.round(pts_per_axis_unit(ax).mean() * _np.mean(band_width)))
rad = ax.plot(pos[0, 0], pos[0, 1], 'o',
ms=_np.round(band_width_in_pts),
c='green', alpha=.25, markeredgecolor='None')[0]
setattr(rad, 'whatisthis', 'dot')
if not sticky:
list_mpl_objects_to_update.append(rad)
else:
# print("Band_width(x,y) is %s" % (band_width))
coord_idx = get_ascending_coord_idx(pos)
if _np.ndim(coord_idx)>0 and len(coord_idx)==0:
raise ValueError("Must have an ascending coordinate for band_width usage")
band_width_in_pts = int(_np.round(pts_per_axis_unit(ax)[coord_idx] * band_width[coord_idx]))
# print("Band_width in %s is %s pts"%('xy'[coord_idx], band_width_in_pts))
band_call = [ax.axvline, ax.axhline][coord_idx]
band_init = [ax.get_xbound, ax.get_ybound][coord_idx]
band_type = ['linev', 'lineh'][coord_idx]
band = band_call(band_init()[0],
lw=band_width_in_pts,
c="green", ls='-',
alpha=.25)
setattr(band, 'whatisthis', band_type)
list_mpl_objects_to_update.append(band)
ngl_wdg.isClick = False
CLA_listener = ClickOnAxisListener(ngl_wdg, crosshairs, showclick_objs, ax, pos,
list_mpl_objects_to_update)
NGL_listener = ChangeInNGLWidgetListener(ngl_wdg, list_mpl_objects_to_update, pos)
# Connect axes to widget
axes_widget = _AxesWidget(ax)
if directionality in [None, 'a2w']:
axes_widget.connect_event('button_release_event', CLA_listener)
# Connect widget to axes
if directionality in [None, 'w2a']:
ngl_wdg.observe(NGL_listener, "frame", "change")
ngl_wdg.center()
return axes_widget
| lgpl-3.0 |
Ziqi-Li/bknqgis | pandas/pandas/core/window.py | 3 | 68731 | """
provide a generic structure to support window functions,
similar to how we have a Groupby object
"""
from __future__ import division
import warnings
import numpy as np
from collections import defaultdict
from datetime import timedelta
from pandas.core.dtypes.generic import (
ABCSeries,
ABCDataFrame,
ABCDatetimeIndex,
ABCTimedeltaIndex,
ABCPeriodIndex,
ABCDateOffset)
from pandas.core.dtypes.common import (
is_integer,
is_bool,
is_float_dtype,
is_integer_dtype,
needs_i8_conversion,
is_timedelta64_dtype,
is_list_like,
_ensure_float64,
is_scalar)
from pandas.core.base import (PandasObject, SelectionMixin,
GroupByMixin)
import pandas.core.common as com
import pandas._libs.window as _window
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (Substitution, Appender,
cache_readonly)
from pandas.core.generic import _shared_docs
from textwrap import dedent
_shared_docs = dict(**_shared_docs)
_doc_template = """
Returns
-------
same type as input
See also
--------
pandas.Series.%(name)s
pandas.DataFrame.%(name)s
"""
class _Window(PandasObject, SelectionMixin):
_attributes = ['window', 'min_periods', 'freq', 'center', 'win_type',
'axis', 'on', 'closed']
exclusions = set()
def __init__(self, obj, window=None, min_periods=None, freq=None,
center=False, win_type=None, axis=0, on=None, closed=None,
**kwargs):
if freq is not None:
warnings.warn("The freq kw is deprecated and will be removed in a "
"future version. You can resample prior to passing "
"to a window function", FutureWarning, stacklevel=3)
self.__dict__.update(kwargs)
self.blocks = []
self.obj = obj
self.on = on
self.closed = closed
self.window = window
self.min_periods = min_periods
self.freq = freq
self.center = center
self.win_type = win_type
self.win_freq = None
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.validate()
@property
def _constructor(self):
return Window
@property
def is_datetimelike(self):
return None
@property
def _on(self):
return None
@property
def is_freq_type(self):
return self.win_type == 'freq'
def validate(self):
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
if self.min_periods is not None and not \
is_integer(self.min_periods):
raise ValueError("min_periods must be an integer")
if self.closed is not None and self.closed not in \
['right', 'both', 'left', 'neither']:
raise ValueError("closed must be 'right', 'left', 'both' or "
"'neither'")
def _convert_freq(self, how=None):
""" resample according to the how, return a new object """
obj = self._selected_obj
index = None
if (self.freq is not None and
isinstance(obj, (ABCSeries, ABCDataFrame))):
if how is not None:
warnings.warn("The how kw argument is deprecated and removed "
"in a future version. You can resample prior "
"to passing to a window function", FutureWarning,
stacklevel=6)
obj = obj.resample(self.freq).aggregate(how or 'asfreq')
return obj, index
def _create_blocks(self, how):
""" split data into blocks & return conformed data """
obj, index = self._convert_freq(how)
if index is not None:
index = self._on
# filter out the on from the object
if self.on is not None:
if obj.ndim == 2:
obj = obj.reindex(columns=obj.columns.difference([self.on]),
copy=False)
blocks = obj.as_blocks(copy=False).values()
return blocks, obj, index
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
self = self._shallow_copy(subset)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
def _dir_additions(self):
return self.obj._dir_additions()
def _get_window(self, other=None):
return self.window
@property
def _window_type(self):
return self.__class__.__name__
def __unicode__(self):
""" provide a nice str repr of our rolling object """
attrs = ["{k}={v}".format(k=k, v=getattr(self, k))
for k in self._attributes
if getattr(self, k, None) is not None]
return "{klass} [{attrs}]".format(klass=self._window_type,
attrs=','.join(attrs))
def _get_index(self, index=None):
"""
Return index as ndarrays
Returns
-------
tuple of (index, index_as_ndarray)
"""
if self.is_freq_type:
if index is None:
index = self._on
return index, index.asi8
return index, index
def _prep_values(self, values=None, kill_inf=True, how=None):
if values is None:
values = getattr(self._selected_obj, 'values', self._selected_obj)
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
if is_float_dtype(values.dtype):
values = _ensure_float64(values)
elif is_integer_dtype(values.dtype):
values = _ensure_float64(values)
elif needs_i8_conversion(values.dtype):
raise NotImplementedError("ops for {action} for this "
"dtype {dtype} are not "
"implemented".format(
action=self._window_type,
dtype=values.dtype))
else:
try:
values = _ensure_float64(values)
except (ValueError, TypeError):
raise TypeError("cannot handle this type -> {0}"
"".format(values.dtype))
if kill_inf:
values = values.copy()
values[np.isinf(values)] = np.NaN
return values
def _wrap_result(self, result, block=None, obj=None):
""" wrap a single result """
if obj is None:
obj = self._selected_obj
index = obj.index
if isinstance(result, np.ndarray):
# coerce if necessary
if block is not None:
if is_timedelta64_dtype(block.values.dtype):
from pandas import to_timedelta
result = to_timedelta(
result.ravel(), unit='ns').values.reshape(result.shape)
if result.ndim == 1:
from pandas import Series
return Series(result, index, name=obj.name)
return type(obj)(result, index=index, columns=block.columns)
return result
def _wrap_results(self, results, blocks, obj):
"""
wrap the results
Paramters
---------
results : list of ndarrays
blocks : list of blocks
obj : conformed data (may be resampled)
"""
from pandas import Series, concat
from pandas.core.index import _ensure_index
final = []
for result, block in zip(results, blocks):
result = self._wrap_result(result, block=block, obj=obj)
if result.ndim == 1:
return result
final.append(result)
# if we have an 'on' column
# we want to put it back into the results
# in the same location
columns = self._selected_obj.columns
if self.on is not None and not self._on.equals(obj.index):
name = self._on.name
final.append(Series(self._on, index=obj.index, name=name))
if self._selection is not None:
selection = _ensure_index(self._selection)
# need to reorder to include original location of
# the on column (if its not already there)
if name not in selection:
columns = self.obj.columns
indexer = columns.get_indexer(selection.tolist() + [name])
columns = columns.take(sorted(indexer))
if not len(final):
return obj.astype('float64')
return concat(final, axis=1).reindex(columns=columns, copy=False)
def _center_window(self, result, window):
""" center the result in the window """
if self.axis > result.ndim - 1:
raise ValueError("Requested axis is larger then no. of argument "
"dimensions")
offset = _offset(window, True)
if offset > 0:
if isinstance(result, (ABCSeries, ABCDataFrame)):
result = result.slice_shift(-offset, axis=self.axis)
else:
lead_indexer = [slice(None)] * result.ndim
lead_indexer[self.axis] = slice(offset, None)
result = np.copy(result[tuple(lead_indexer)])
return result
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
return self.apply(arg, args=args, kwargs=kwargs)
return result
agg = aggregate
_shared_docs['sum'] = dedent("""
%(name)s sum
Parameters
----------
how : string, default None
.. deprecated:: 0.18.0
Method for down- or re-sampling""")
_shared_docs['mean'] = dedent("""
%(name)s mean
Parameters
----------
how : string, default None
.. deprecated:: 0.18.0
Method for down- or re-sampling""")
class Window(_Window):
"""
Provides rolling window calculations.
.. versionadded:: 0.18.0
Parameters
----------
window : int, or offset
Size of the moving window. This is the number of observations used for
calculating the statistic. Each window will be a fixed size.
If its an offset then this will be the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes. This is
new in 0.19.0
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
this will default to 1.
freq : string or DateOffset object, optional (default None)
.. deprecated:: 0.18.0
Frequency to conform the data to before computing the statistic.
Specified as a frequency string or DateOffset object.
center : boolean, default False
Set the labels at the center of the window.
win_type : string, default None
Provide a window type. See the notes below.
on : string, optional
For a DataFrame, column on which to calculate
the rolling window, rather than the index
closed : string, default None
Make the interval closed on the 'right', 'left', 'both' or
'neither' endpoints.
For offset-based windows, it defaults to 'right'.
For fixed windows, defaults to 'both'. Remaining cases not implemented
for fixed windows.
.. versionadded:: 0.20.0
axis : int or string, default 0
Returns
-------
a Window or Rolling sub-classed for the particular operation
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
Rolling sum with a window length of 2, using the 'triang'
window type.
>>> df.rolling(2, win_type='triang').sum()
B
0 NaN
1 1.0
2 2.5
3 NaN
4 NaN
Rolling sum with a window length of 2, min_periods defaults
to the window length.
>>> df.rolling(2).sum()
B
0 NaN
1 1.0
2 3.0
3 NaN
4 NaN
Same as above, but explicity set the min_periods
>>> df.rolling(2, min_periods=1).sum()
B
0 0.0
1 1.0
2 3.0
3 2.0
4 4.0
A ragged (meaning not-a-regular frequency), time-indexed DataFrame
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
....: index = [pd.Timestamp('20130101 09:00:00'),
....: pd.Timestamp('20130101 09:00:02'),
....: pd.Timestamp('20130101 09:00:03'),
....: pd.Timestamp('20130101 09:00:05'),
....: pd.Timestamp('20130101 09:00:06')])
>>> df
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 2.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Contrasting to an integer rolling window, this will roll a variable
length window corresponding to the time period.
The default for min_periods is 1.
>>> df.rolling('2s').sum()
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 3.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
To learn more about the offsets & frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
The recognized win_types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width).
"""
def validate(self):
super(Window, self).validate()
window = self.window
if isinstance(window, (list, tuple, np.ndarray)):
pass
elif is_integer(window):
if window < 0:
raise ValueError("window must be non-negative")
try:
import scipy.signal as sig
except ImportError:
raise ImportError('Please install scipy to generate window '
'weight')
if not isinstance(self.win_type, compat.string_types):
raise ValueError('Invalid win_type {0}'.format(self.win_type))
if getattr(sig, self.win_type, None) is None:
raise ValueError('Invalid win_type {0}'.format(self.win_type))
else:
raise ValueError('Invalid window {0}'.format(window))
def _prep_window(self, **kwargs):
"""
provide validation for our window type, return the window
we have already been validated
"""
window = self._get_window()
if isinstance(window, (list, tuple, np.ndarray)):
return com._asarray_tuplesafe(window).astype(float)
elif is_integer(window):
import scipy.signal as sig
# the below may pop from kwargs
def _validate_win_type(win_type, kwargs):
arg_map = {'kaiser': ['beta'],
'gaussian': ['std'],
'general_gaussian': ['power', 'width'],
'slepian': ['width']}
if win_type in arg_map:
return tuple([win_type] + _pop_args(win_type,
arg_map[win_type],
kwargs))
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = '%s window requires %%s' % win_type
all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
all_args.append(kwargs.pop(n))
return all_args
win_type = _validate_win_type(self.win_type, kwargs)
# GH #15662. `False` makes symmetric window, rather than periodic.
return sig.get_window(win_type, window, False).astype(float)
def _apply_window(self, mean=True, how=None, **kwargs):
"""
Applies a moving window of type ``window_type`` on the data.
Parameters
----------
mean : boolean, default True
If True computes weighted mean, else weighted sum
how : string, default to None
.. deprecated:: 0.18.0
how to resample
Returns
-------
y : type of input argument
"""
window = self._prep_window(**kwargs)
center = self.center
blocks, obj, index = self._create_blocks(how=how)
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except TypeError:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, len(window))
return _window.roll_window(np.concatenate((arg,
additional_nans))
if center else arg, window, minp,
avg=mean)
result = np.apply_along_axis(f, self.axis, values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, blocks, obj)
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3, win_type='boxcar').agg('mean')
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -0.885035 0.212600 -0.711689
3 -0.323928 -0.200122 -1.093408
4 -0.071445 -0.431533 -1.075833
5 0.504739 0.676083 -0.996353
6 0.358206 1.903256 -0.774200
7 0.906020 1.283573 0.085482
8 -0.096361 0.818139 0.472290
9 0.070889 0.134399 -0.031308
See also
--------
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
# these must apply directly
result = arg(self)
return result
agg = aggregate
@Substitution(name='window')
@Appender(_doc_template)
@Appender(_shared_docs['sum'])
def sum(self, *args, **kwargs):
nv.validate_window_func('sum', args, kwargs)
return self._apply_window(mean=False, **kwargs)
@Substitution(name='window')
@Appender(_doc_template)
@Appender(_shared_docs['mean'])
def mean(self, *args, **kwargs):
nv.validate_window_func('mean', args, kwargs)
return self._apply_window(mean=True, **kwargs)
class _GroupByMixin(GroupByMixin):
""" provide the groupby facilities """
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop('parent', None) # noqa
groupby = kwargs.pop('groupby', None)
if groupby is None:
groupby, obj = obj, obj.obj
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
super(GroupByMixin, self).__init__(obj, *args, **kwargs)
count = GroupByMixin._dispatch('count')
corr = GroupByMixin._dispatch('corr', other=None, pairwise=None)
cov = GroupByMixin._dispatch('cov', other=None, pairwise=None)
def _apply(self, func, name, window=None, center=None,
check_minp=None, how=None, **kwargs):
"""
dispatch to apply; we are stripping all of the _apply kwargs and
performing the original function call on the grouped object
"""
def f(x, name=name, *args):
x = self._shallow_copy(x)
if isinstance(name, compat.string_types):
return getattr(x, name)(*args, **kwargs)
return x.apply(name, *args, **kwargs)
return self._groupby.apply(f)
class _Rolling(_Window):
@property
def _constructor(self):
return Rolling
def _apply(self, func, name=None, window=None, center=None,
check_minp=None, how=None, **kwargs):
"""
Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
func : string/callable to apply
name : string, optional
name of this function
window : int/array, default to _get_window()
center : boolean, default to self.center
check_minp : function, default to _use_window
how : string, default to None
.. deprecated:: 0.18.0
how to resample
Returns
-------
y : type of input
"""
if center is None:
center = self.center
if window is None:
window = self._get_window()
if check_minp is None:
check_minp = _use_window
blocks, obj, index = self._create_blocks(how=how)
index, indexi = self._get_index(index=index)
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except TypeError:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, compat.string_types):
cfunc = getattr(_window, func, None)
if cfunc is None:
raise ValueError("we do not support this function "
"in _window.{0}".format(func))
def func(arg, window, min_periods=None, closed=None):
minp = check_minp(min_periods, window)
# ensure we are only rolling on floats
arg = _ensure_float64(arg)
return cfunc(arg,
window, minp, indexi, closed, **kwargs)
# calculation function
if center:
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def calc(x):
return func(np.concatenate((x, additional_nans)),
window, min_periods=self.min_periods,
closed=self.closed)
else:
def calc(x):
return func(x, window, min_periods=self.min_periods,
closed=self.closed)
with np.errstate(all='ignore'):
if values.ndim > 1:
result = np.apply_along_axis(calc, self.axis, values)
else:
result = calc(values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, blocks, obj)
class _Rolling_and_Expanding(_Rolling):
_shared_docs['count'] = """%(name)s count of number of non-NaN
observations inside provided window."""
def count(self):
blocks, obj, index = self._create_blocks(how=None)
index, indexi = self._get_index(index=index)
window = self._get_window()
window = min(window, len(obj)) if not self.center else window
results = []
for b in blocks:
result = b.notna().astype(int)
result = self._constructor(result, window=window, min_periods=0,
center=self.center,
closed=self.closed).sum()
results.append(result)
return self._wrap_results(results, blocks, obj)
_shared_docs['apply'] = dedent(r"""
%(name)s function apply
Parameters
----------
func : function
Must produce a single value from an ndarray input
\*args and \*\*kwargs are passed to the function""")
def apply(self, func, args=(), kwargs={}):
# TODO: _level is unused?
_level = kwargs.pop('_level', None) # noqa
window = self._get_window()
offset = _offset(window, self.center)
index, indexi = self._get_index()
def f(arg, window, min_periods, closed):
minp = _use_window(min_periods, window)
return _window.roll_generic(arg, window, minp, indexi, closed,
offset, func, args, kwargs)
return self._apply(f, func, args=args, kwargs=kwargs,
center=False)
def sum(self, *args, **kwargs):
nv.validate_window_func('sum', args, kwargs)
return self._apply('roll_sum', 'sum', **kwargs)
_shared_docs['max'] = dedent("""
%(name)s maximum
Parameters
----------
how : string, default 'max'
.. deprecated:: 0.18.0
Method for down- or re-sampling""")
def max(self, how=None, *args, **kwargs):
nv.validate_window_func('max', args, kwargs)
if self.freq is not None and how is None:
how = 'max'
return self._apply('roll_max', 'max', how=how, **kwargs)
_shared_docs['min'] = dedent("""
%(name)s minimum
Parameters
----------
how : string, default 'min'
.. deprecated:: 0.18.0
Method for down- or re-sampling""")
def min(self, how=None, *args, **kwargs):
nv.validate_window_func('min', args, kwargs)
if self.freq is not None and how is None:
how = 'min'
return self._apply('roll_min', 'min', how=how, **kwargs)
def mean(self, *args, **kwargs):
nv.validate_window_func('mean', args, kwargs)
return self._apply('roll_mean', 'mean', **kwargs)
_shared_docs['median'] = dedent("""
%(name)s median
Parameters
----------
how : string, default 'median'
.. deprecated:: 0.18.0
Method for down- or re-sampling""")
def median(self, how=None, **kwargs):
if self.freq is not None and how is None:
how = 'median'
return self._apply('roll_median_c', 'median', how=how, **kwargs)
_shared_docs['std'] = dedent("""
%(name)s standard deviation
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.""")
def std(self, ddof=1, *args, **kwargs):
nv.validate_window_func('std', args, kwargs)
window = self._get_window()
index, indexi = self._get_index()
def f(arg, *args, **kwargs):
minp = _require_min_periods(1)(self.min_periods, window)
return _zsqrt(_window.roll_var(arg, window, minp, indexi,
self.closed, ddof))
return self._apply(f, 'std', check_minp=_require_min_periods(1),
ddof=ddof, **kwargs)
_shared_docs['var'] = dedent("""
%(name)s variance
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.""")
def var(self, ddof=1, *args, **kwargs):
nv.validate_window_func('var', args, kwargs)
return self._apply('roll_var', 'var',
check_minp=_require_min_periods(1), ddof=ddof,
**kwargs)
_shared_docs['skew'] = """Unbiased %(name)s skewness"""
def skew(self, **kwargs):
return self._apply('roll_skew', 'skew',
check_minp=_require_min_periods(3), **kwargs)
_shared_docs['kurt'] = """Unbiased %(name)s kurtosis"""
def kurt(self, **kwargs):
return self._apply('roll_kurt', 'kurt',
check_minp=_require_min_periods(4), **kwargs)
_shared_docs['quantile'] = dedent("""
%(name)s quantile
Parameters
----------
quantile : float
0 <= quantile <= 1""")
def quantile(self, quantile, **kwargs):
window = self._get_window()
index, indexi = self._get_index()
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, window)
if quantile == 1.0:
return _window.roll_max(arg, window, minp, indexi,
self.closed)
elif quantile == 0.0:
return _window.roll_min(arg, window, minp, indexi,
self.closed)
else:
return _window.roll_quantile(arg, window, minp, indexi,
self.closed, quantile)
return self._apply(f, 'quantile', quantile=quantile,
**kwargs)
_shared_docs['cov'] = dedent("""
%(name)s sample covariance
Parameters
----------
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
If False then only matching columns between self and other will be used
and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndexed DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.""")
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
# GH 16058: offset window
if self.is_freq_type:
window = self.win_freq
else:
window = self._get_window(other)
def _get_cov(X, Y):
# GH #12373 : rolling functions error on float32 data
# to avoid potential overflow, cast the data to float64
X = X.astype('float64')
Y = Y.astype('float64')
mean = lambda x: x.rolling(window, self.min_periods,
center=self.center).mean(**kwargs)
count = (X + Y).rolling(window=window,
center=self.center).count(**kwargs)
bias_adj = count / (count - ddof)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_cov, pairwise=bool(pairwise))
_shared_docs['corr'] = dedent("""
%(name)s sample correlation
Parameters
----------
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndex DataFrame in the case of DataFrame inputs.
In the case of missing elements, only complete pairwise observations
will be used.""")
def corr(self, other=None, pairwise=None, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
window = self._get_window(other)
def _get_corr(a, b):
a = a.rolling(window=window, min_periods=self.min_periods,
freq=self.freq, center=self.center)
b = b.rolling(window=window, min_periods=self.min_periods,
freq=self.freq, center=self.center)
return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs))
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_corr, pairwise=bool(pairwise))
class Rolling(_Rolling_and_Expanding):
@cache_readonly
def is_datetimelike(self):
return isinstance(self._on,
(ABCDatetimeIndex,
ABCTimedeltaIndex,
ABCPeriodIndex))
@cache_readonly
def _on(self):
if self.on is None:
return self.obj.index
elif (isinstance(self.obj, ABCDataFrame) and
self.on in self.obj.columns):
from pandas import Index
return Index(self.obj[self.on])
else:
raise ValueError("invalid on specified as {0}, "
"must be a column (if DataFrame) "
"or None".format(self.on))
def validate(self):
super(Rolling, self).validate()
# we allow rolling on a datetimelike index
if ((self.obj.empty or self.is_datetimelike) and
isinstance(self.window, (compat.string_types, ABCDateOffset,
timedelta))):
self._validate_monotonic()
freq = self._validate_freq()
# we don't allow center
if self.center:
raise NotImplementedError("center is not implemented "
"for datetimelike and offset "
"based windows")
# this will raise ValueError on non-fixed freqs
self.win_freq = self.window
self.window = freq.nanos
self.win_type = 'freq'
# min_periods must be an integer
if self.min_periods is None:
self.min_periods = 1
elif not is_integer(self.window):
raise ValueError("window must be an integer")
elif self.window < 0:
raise ValueError("window must be non-negative")
if not self.is_datetimelike and self.closed is not None:
raise ValueError("closed only implemented for datetimelike "
"and offset based windows")
def _validate_monotonic(self):
""" validate on is monotonic """
if not self._on.is_monotonic:
formatted = self.on or 'index'
raise ValueError("{0} must be "
"monotonic".format(formatted))
def _validate_freq(self):
""" validate & return our freq """
from pandas.tseries.frequencies import to_offset
try:
return to_offset(self.window)
except (TypeError, ValueError):
raise ValueError("passed window {0} in not "
"compat with a datetimelike "
"index".format(self.window))
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3).sum()
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -2.655105 0.637799 -2.135068
3 -0.971785 -0.600366 -3.280224
4 -0.214334 -1.294599 -3.227500
5 1.514216 2.028250 -2.989060
6 1.074618 5.709767 -2.322600
7 2.718061 3.850718 0.256446
8 -0.289082 2.454418 1.416871
9 0.212668 0.403198 -0.093924
>>> df.rolling(3).agg({'A':'sum', 'B':'min'})
A B
0 NaN NaN
1 NaN NaN
2 -2.655105 -0.165272
3 -0.971785 -1.340923
4 -0.214334 -1.340923
5 1.514216 -1.340923
6 1.074618 0.211596
7 2.718061 -1.647453
8 -0.289082 -1.647453
9 0.212668 -1.647453
See also
--------
pandas.Series.rolling
pandas.DataFrame.rolling
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
return super(Rolling, self).aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['count'])
def count(self):
# different impl for freq counting
if self.is_freq_type:
return self._apply('roll_count', 'count')
return super(Rolling, self).count()
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['apply'])
def apply(self, func, args=(), kwargs={}):
return super(Rolling, self).apply(func, args=args, kwargs=kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['sum'])
def sum(self, *args, **kwargs):
nv.validate_rolling_func('sum', args, kwargs)
return super(Rolling, self).sum(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['max'])
def max(self, *args, **kwargs):
nv.validate_rolling_func('max', args, kwargs)
return super(Rolling, self).max(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['min'])
def min(self, *args, **kwargs):
nv.validate_rolling_func('min', args, kwargs)
return super(Rolling, self).min(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['mean'])
def mean(self, *args, **kwargs):
nv.validate_rolling_func('mean', args, kwargs)
return super(Rolling, self).mean(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['median'])
def median(self, **kwargs):
return super(Rolling, self).median(**kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['std'])
def std(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func('std', args, kwargs)
return super(Rolling, self).std(ddof=ddof, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['var'])
def var(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func('var', args, kwargs)
return super(Rolling, self).var(ddof=ddof, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['skew'])
def skew(self, **kwargs):
return super(Rolling, self).skew(**kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['kurt'])
def kurt(self, **kwargs):
return super(Rolling, self).kurt(**kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['quantile'])
def quantile(self, quantile, **kwargs):
return super(Rolling, self).quantile(quantile=quantile, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['cov'])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
return super(Rolling, self).cov(other=other, pairwise=pairwise,
ddof=ddof, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['corr'])
def corr(self, other=None, pairwise=None, **kwargs):
return super(Rolling, self).corr(other=other, pairwise=pairwise,
**kwargs)
class RollingGroupby(_GroupByMixin, Rolling):
"""
Provides a rolling groupby implementation
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return Rolling
def _gotitem(self, key, ndim, subset=None):
# we are setting the index on the actual object
# here so our index is carried thru to the selected obj
# when we do the splitting for the groupby
if self.on is not None:
self._groupby.obj = self._groupby.obj.set_index(self._on)
self.on = None
return super(RollingGroupby, self)._gotitem(key, ndim, subset=subset)
def _validate_monotonic(self):
"""
validate that on is monotonic;
we don't care for groupby.rolling
because we have already validated at a higher
level
"""
pass
class Expanding(_Rolling_and_Expanding):
"""
Provides expanding transformations.
.. versionadded:: 0.18.0
Parameters
----------
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
.. deprecated:: 0.18.0
Frequency to conform the data to before computing the statistic.
Specified as a frequency string or DateOffset object.
center : boolean, default False
Set the labels at the center of the window.
axis : int or string, default 0
Returns
-------
a Window sub-classed for the particular operation
Examples
--------
>>> df = DataFrame({'B': [0, 1, 2, np.nan, 4]})
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.expanding(2).sum()
B
0 NaN
1 1.0
2 3.0
3 3.0
4 7.0
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
_attributes = ['min_periods', 'freq', 'center', 'axis']
def __init__(self, obj, min_periods=1, freq=None, center=False, axis=0,
**kwargs):
super(Expanding, self).__init__(obj=obj, min_periods=min_periods,
freq=freq, center=center, axis=axis)
@property
def _constructor(self):
return Expanding
def _get_window(self, other=None):
obj = self._selected_obj
if other is None:
return (max(len(obj), self.min_periods) if self.min_periods
else len(obj))
return (max((len(obj) + len(obj)), self.min_periods)
if self.min_periods else (len(obj) + len(obj)))
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.ewm(alpha=0.5).mean()
A B C
0 -2.385977 -0.102758 0.438822
1 -1.464856 0.569633 -0.490089
2 -0.207700 0.149687 -1.135379
3 -0.471677 -0.645305 -0.906555
4 -0.355635 -0.203033 -0.904111
5 1.076417 1.503943 -1.146293
6 -0.041654 1.925562 -0.588728
7 0.680292 0.132049 0.548693
8 0.067236 0.948257 0.163353
9 -0.286980 0.618493 -0.694496
See also
--------
pandas.DataFrame.expanding.aggregate
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
return super(Expanding, self).aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['count'])
def count(self, **kwargs):
return super(Expanding, self).count(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['apply'])
def apply(self, func, args=(), kwargs={}):
return super(Expanding, self).apply(func, args=args, kwargs=kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['sum'])
def sum(self, *args, **kwargs):
nv.validate_expanding_func('sum', args, kwargs)
return super(Expanding, self).sum(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['max'])
def max(self, *args, **kwargs):
nv.validate_expanding_func('max', args, kwargs)
return super(Expanding, self).max(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['min'])
def min(self, *args, **kwargs):
nv.validate_expanding_func('min', args, kwargs)
return super(Expanding, self).min(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['mean'])
def mean(self, *args, **kwargs):
nv.validate_expanding_func('mean', args, kwargs)
return super(Expanding, self).mean(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['median'])
def median(self, **kwargs):
return super(Expanding, self).median(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['std'])
def std(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func('std', args, kwargs)
return super(Expanding, self).std(ddof=ddof, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['var'])
def var(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func('var', args, kwargs)
return super(Expanding, self).var(ddof=ddof, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['skew'])
def skew(self, **kwargs):
return super(Expanding, self).skew(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['kurt'])
def kurt(self, **kwargs):
return super(Expanding, self).kurt(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['quantile'])
def quantile(self, quantile, **kwargs):
return super(Expanding, self).quantile(quantile=quantile, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['cov'])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
return super(Expanding, self).cov(other=other, pairwise=pairwise,
ddof=ddof, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['corr'])
def corr(self, other=None, pairwise=None, **kwargs):
return super(Expanding, self).corr(other=other, pairwise=pairwise,
**kwargs)
class ExpandingGroupby(_GroupByMixin, Expanding):
"""
Provides a expanding groupby implementation
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return Expanding
_bias_template = """
Parameters
----------
bias : boolean, default False
Use a standard estimation bias correction
"""
_pairwise_template = """
Parameters
----------
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
If False then only matching columns between self and other will be used and
the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the output
will be a MultiIndex DataFrame in the case of DataFrame inputs.
In the case of missing elements, only complete pairwise observations will
be used.
bias : boolean, default False
Use a standard estimation bias correction
"""
class EWM(_Rolling):
r"""
Provides exponential weighted functions
.. versionadded:: 0.18.0
Parameters
----------
com : float, optional
Specify decay in terms of center of mass,
:math:`\alpha = 1 / (1 + com),\text{ for } com \geq 0`
span : float, optional
Specify decay in terms of span,
:math:`\alpha = 2 / (span + 1),\text{ for } span \geq 1`
halflife : float, optional
Specify decay in terms of half-life,
:math:`\alpha = 1 - exp(log(0.5) / halflife),\text{ for } halflife > 0`
alpha : float, optional
Specify smoothing factor :math:`\alpha` directly,
:math:`0 < \alpha \leq 1`
.. versionadded:: 0.18.0
min_periods : int, default 0
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : None or string alias / date offset object, default=None
.. deprecated:: 0.18.0
Frequency to conform to before computing statistic
adjust : boolean, default True
Divide by decaying adjustment factor in beginning periods to account
for imbalance in relative weightings (viewing EWMA as a moving average)
ignore_na : boolean, default False
Ignore missing values when calculating weights;
specify True to reproduce pre-0.15.0 behavior
Returns
-------
a Window sub-classed for the particular operation
Examples
--------
>>> df = DataFrame({'B': [0, 1, 2, np.nan, 4]})
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.ewm(com=0.5).mean()
B
0 0.000000
1 0.750000
2 1.615385
3 1.615385
4 3.670213
Notes
-----
Exactly one of center of mass, span, half-life, and alpha must be provided.
Allowed values and relationship between the parameters are specified in the
parameter descriptions above; see the link at the end of this section for
a detailed explanation.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
When adjust is True (default), weighted averages are calculated using
weights (1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1.
When adjust is False, weighted averages are calculated recursively as:
weighted_average[0] = arg[0];
weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i].
When ignore_na is False (default), weights are based on absolute positions.
For example, the weights of x and y used in calculating the final weighted
average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and
(1-alpha)**2 and alpha (if adjust is False).
When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based
on relative positions. For example, the weights of x and y used in
calculating the final weighted average of [x, None, y] are 1-alpha and 1
(if adjust is True), and 1-alpha and alpha (if adjust is False).
More details can be found at
http://pandas.pydata.org/pandas-docs/stable/computation.html#exponentially-weighted-windows
"""
_attributes = ['com', 'min_periods', 'freq', 'adjust', 'ignore_na', 'axis']
def __init__(self, obj, com=None, span=None, halflife=None, alpha=None,
min_periods=0, freq=None, adjust=True, ignore_na=False,
axis=0):
self.obj = obj
self.com = _get_center_of_mass(com, span, halflife, alpha)
self.min_periods = min_periods
self.freq = freq
self.adjust = adjust
self.ignore_na = ignore_na
self.axis = axis
self.on = None
@property
def _constructor(self):
return EWM
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.ewm(alpha=0.5).mean()
A B C
0 -2.385977 -0.102758 0.438822
1 -1.464856 0.569633 -0.490089
2 -0.207700 0.149687 -1.135379
3 -0.471677 -0.645305 -0.906555
4 -0.355635 -0.203033 -0.904111
5 1.076417 1.503943 -1.146293
6 -0.041654 1.925562 -0.588728
7 0.680292 0.132049 0.548693
8 0.067236 0.948257 0.163353
9 -0.286980 0.618493 -0.694496
See also
--------
pandas.DataFrame.rolling.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
return super(EWM, self).aggregate(arg, *args, **kwargs)
agg = aggregate
def _apply(self, func, how=None, **kwargs):
"""Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
func : string/callable to apply
how : string, default to None
.. deprecated:: 0.18.0
how to resample
Returns
-------
y : type of input argument
"""
blocks, obj, index = self._create_blocks(how=how)
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except TypeError:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, compat.string_types):
cfunc = getattr(_window, func, None)
if cfunc is None:
raise ValueError("we do not support this function "
"in _window.{0}".format(func))
def func(arg):
return cfunc(arg, self.com, int(self.adjust),
int(self.ignore_na), int(self.min_periods))
results.append(np.apply_along_axis(func, self.axis, values))
return self._wrap_results(results, blocks, obj)
@Substitution(name='ewm')
@Appender(_doc_template)
def mean(self, *args, **kwargs):
"""exponential weighted moving average"""
nv.validate_window_func('mean', args, kwargs)
return self._apply('ewma', **kwargs)
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_bias_template)
def std(self, bias=False, *args, **kwargs):
"""exponential weighted moving stddev"""
nv.validate_window_func('std', args, kwargs)
return _zsqrt(self.var(bias=bias, **kwargs))
vol = std
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_bias_template)
def var(self, bias=False, *args, **kwargs):
"""exponential weighted moving variance"""
nv.validate_window_func('var', args, kwargs)
def f(arg):
return _window.ewmcov(arg, arg, self.com, int(self.adjust),
int(self.ignore_na), int(self.min_periods),
int(bias))
return self._apply(f, **kwargs)
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_pairwise_template)
def cov(self, other=None, pairwise=None, bias=False, **kwargs):
"""exponential weighted sample covariance"""
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
def _get_cov(X, Y):
X = self._shallow_copy(X)
Y = self._shallow_copy(Y)
cov = _window.ewmcov(X._prep_values(), Y._prep_values(), self.com,
int(self.adjust), int(self.ignore_na),
int(self.min_periods), int(bias))
return X._wrap_result(cov)
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_cov, pairwise=bool(pairwise))
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_pairwise_template)
def corr(self, other=None, pairwise=None, **kwargs):
"""exponential weighted sample correlation"""
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
def _get_corr(X, Y):
X = self._shallow_copy(X)
Y = self._shallow_copy(Y)
def _cov(x, y):
return _window.ewmcov(x, y, self.com, int(self.adjust),
int(self.ignore_na),
int(self.min_periods),
1)
x_values = X._prep_values()
y_values = Y._prep_values()
with np.errstate(all='ignore'):
cov = _cov(x_values, y_values)
x_var = _cov(x_values, x_values)
y_var = _cov(y_values, y_values)
corr = cov / _zsqrt(x_var * y_var)
return X._wrap_result(corr)
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_corr, pairwise=bool(pairwise))
# Helper Funcs
def _flex_binary_moment(arg1, arg2, f, pairwise=False):
if not (isinstance(arg1, (np.ndarray, ABCSeries, ABCDataFrame)) and
isinstance(arg2, (np.ndarray, ABCSeries, ABCDataFrame))):
raise TypeError("arguments to moment function must be of type "
"np.ndarray/Series/DataFrame")
if (isinstance(arg1, (np.ndarray, ABCSeries)) and
isinstance(arg2, (np.ndarray, ABCSeries))):
X, Y = _prep_binary(arg1, arg2)
return f(X, Y)
elif isinstance(arg1, ABCDataFrame):
from pandas import DataFrame
def dataframe_from_int_dict(data, frame_template):
result = DataFrame(data, index=frame_template.index)
if len(result.columns) > 0:
result.columns = frame_template.columns[result.columns]
return result
results = {}
if isinstance(arg2, ABCDataFrame):
if pairwise is False:
if arg1 is arg2:
# special case in order to handle duplicate column names
for i, col in enumerate(arg1.columns):
results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i])
return dataframe_from_int_dict(results, arg1)
else:
if not arg1.columns.is_unique:
raise ValueError("'arg1' columns are not unique")
if not arg2.columns.is_unique:
raise ValueError("'arg2' columns are not unique")
with warnings.catch_warnings(record=True):
X, Y = arg1.align(arg2, join='outer')
X = X + 0 * Y
Y = Y + 0 * X
with warnings.catch_warnings(record=True):
res_columns = arg1.columns.union(arg2.columns)
for col in res_columns:
if col in X and col in Y:
results[col] = f(X[col], Y[col])
return DataFrame(results, index=X.index,
columns=res_columns)
elif pairwise is True:
results = defaultdict(dict)
for i, k1 in enumerate(arg1.columns):
for j, k2 in enumerate(arg2.columns):
if j < i and arg2 is arg1:
# Symmetric case
results[i][j] = results[j][i]
else:
results[i][j] = f(*_prep_binary(arg1.iloc[:, i],
arg2.iloc[:, j]))
# TODO: not the most efficient (perf-wise)
# though not bad code-wise
from pandas import Panel, MultiIndex, concat
with warnings.catch_warnings(record=True):
p = Panel.from_dict(results).swapaxes('items', 'major')
if len(p.major_axis) > 0:
p.major_axis = arg1.columns[p.major_axis]
if len(p.minor_axis) > 0:
p.minor_axis = arg2.columns[p.minor_axis]
if len(p.items):
result = concat(
[p.iloc[i].T for i in range(len(p.items))],
keys=p.items)
else:
result = DataFrame(
index=MultiIndex(levels=[arg1.index, arg1.columns],
labels=[[], []]),
columns=arg2.columns,
dtype='float64')
# reset our index names to arg1 names
# reset our column names to arg2 names
# careful not to mutate the original names
result.columns = result.columns.set_names(
arg2.columns.names)
result.index = result.index.set_names(
arg1.index.names + arg1.columns.names)
return result
else:
raise ValueError("'pairwise' is not True/False")
else:
results = {}
for i, col in enumerate(arg1.columns):
results[i] = f(*_prep_binary(arg1.iloc[:, i], arg2))
return dataframe_from_int_dict(results, arg1)
else:
return _flex_binary_moment(arg2, arg1, f)
def _get_center_of_mass(com, span, halflife, alpha):
valid_count = len([x for x in [com, span, halflife, alpha]
if x is not None])
if valid_count > 1:
raise ValueError("com, span, halflife, and alpha "
"are mutually exclusive")
# Convert to center of mass; domain checks ensure 0 < alpha <= 1
if com is not None:
if com < 0:
raise ValueError("com must satisfy: com >= 0")
elif span is not None:
if span < 1:
raise ValueError("span must satisfy: span >= 1")
com = (span - 1) / 2.
elif halflife is not None:
if halflife <= 0:
raise ValueError("halflife must satisfy: halflife > 0")
decay = 1 - np.exp(np.log(0.5) / halflife)
com = 1 / decay - 1
elif alpha is not None:
if alpha <= 0 or alpha > 1:
raise ValueError("alpha must satisfy: 0 < alpha <= 1")
com = (1.0 - alpha) / alpha
else:
raise ValueError("Must pass one of com, span, halflife, or alpha")
return float(com)
def _offset(window, center):
if not is_integer(window):
window = len(window)
offset = (window - 1) / 2. if center else 0
try:
return int(offset)
except:
return offset.astype(int)
def _require_min_periods(p):
def _check_func(minp, window):
if minp is None:
return window
else:
return max(p, minp)
return _check_func
def _use_window(minp, window):
if minp is None:
return window
else:
return minp
def _zsqrt(x):
with np.errstate(all='ignore'):
result = np.sqrt(x)
mask = x < 0
if isinstance(x, ABCDataFrame):
if mask.values.any():
result[mask] = 0
else:
if mask.any():
result[mask] = 0
return result
def _prep_binary(arg1, arg2):
if not isinstance(arg2, type(arg1)):
raise Exception('Input arrays must be of the same type!')
# mask out values, this also makes a common index...
X = arg1 + 0 * arg2
Y = arg2 + 0 * arg1
return X, Y
# Top-level exports
def rolling(obj, win_type=None, **kwds):
if not isinstance(obj, (ABCSeries, ABCDataFrame)):
raise TypeError('invalid type: %s' % type(obj))
if win_type is not None:
return Window(obj, win_type=win_type, **kwds)
return Rolling(obj, **kwds)
rolling.__doc__ = Window.__doc__
def expanding(obj, **kwds):
if not isinstance(obj, (ABCSeries, ABCDataFrame)):
raise TypeError('invalid type: %s' % type(obj))
return Expanding(obj, **kwds)
expanding.__doc__ = Expanding.__doc__
def ewm(obj, **kwds):
if not isinstance(obj, (ABCSeries, ABCDataFrame)):
raise TypeError('invalid type: %s' % type(obj))
return EWM(obj, **kwds)
ewm.__doc__ = EWM.__doc__
| gpl-2.0 |
her0e1c1/pystock | stock/signals.py | 1 | 2696 | import pandas as pd
from . import line, util
def rolling_mean(series, period):
"""現在の株価(短期)と長期移動平均線(長期)のクロス"""
slow = series.rolling(window=period, center=False).mean()
return util.cross(series, slow)
def rolling_mean_ratio(series, period, ratio):
"""長期移動平均線と現在の株価の最終日の差がratio乖離したら売買シグナル"""
mean = series.rolling(window=period, center=False).mean()
r = util.increment(util.last(series), util.last(mean))
return "BUY" if r > ratio else "SELL" if r < -ratio else None
def increment_ratio(series, ratio=25):
"""前日に比べてratio乖離してたら売買シグナル(変動が大きいので戻りの可能性が高いと考える)"""
curr = util.last(series)
prev = util.last(series, offset_from_last=1)
r = util.increment(curr, prev)
return "BUY" if r < -ratio else "SELL" if r > ratio else None
def rsi(series, period, buy, sell):
"""RSIは基本的に30%以下で売られ過ぎ, 70%で買われ過ぎ"""
rsi = line.rsi(series, period)
if rsi.empty:
return None
f = float(rsi[rsi.last_valid_index()])
return "BUY" if f < buy else "SELL" if f > sell else None
def min_low(series, period, ratio):
"""指定期間中の最安値に近いたら買い. (底値が支えになって反発する可能性があると考える)"""
m = float(series.tail(period).min())
if pd.isnull(m):
return None
last = series[series.last_valid_index()]
return "BUY" if util.increment(last, m) < ratio else None
def max_high(series, period, ratio):
"""min_lowの逆version"""
m = float(series.tail(period).max())
if pd.isnull(m):
return None
last = series[series.last_valid_index()]
return "SELL" if util.increment(m, last) < ratio else None
def macd_signal(series, fast, slow, signal):
"""macd(短期)とsignal(長期)のクロス"""
f = line.macd_line(series, fast, slow, signal)
s = line.macd_signal(series, fast, slow, signal)
return util.cross(f, s)
def stochastic(series, k, d, sd):
"""
macd(短期)とsignal(長期)のクロス
一般的に次の値を利用する (k, d, sd) = (14, 3, 3)
"""
fast = line.stochastic_d(series, k=k, d=d)
slow = line.stochastic_sd(series, k=k, d=d, sd=sd)
return util.cross(fast, slow)
def bollinger_band(series, period=20, ratio=3):
"""
2sigmaを超えたら、買われすぎと判断してSELL
-2sigmaを超えたら、売られ過ぎと判断してBUY
"""
s = util.sigma(series, period)
return "BUY" if s <= -ratio else "SELL" if s >= ratio else None
| gpl-3.0 |
JPFrancoia/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 84 | 7866 | # Important note for the deprecation cleaning of 0.20 :
# All the function and classes of this file have been deprecated in 0.18.
# When you remove this file please also remove the related files
# - 'sklearn/mixture/dpgmm.py'
# - 'sklearn/mixture/gmm.py'
# - 'sklearn/mixture/test_gmm.py'
import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.utils.testing import assert_warns_message, ignore_warnings
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.mixture.dpgmm import digamma, gammaln
from sklearn.mixture.dpgmm import wishart_log_det, wishart_logz
np.seterr(all='warn')
@ignore_warnings(category=DeprecationWarning)
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
@ignore_warnings(category=DeprecationWarning)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_digamma():
assert_warns_message(DeprecationWarning, "The function digamma is"
" deprecated in 0.18 and will be removed in 0.20. "
"Use scipy.special.digamma instead.", digamma, 3)
@ignore_warnings(category=DeprecationWarning)
def test_gammaln():
assert_warns_message(DeprecationWarning, "The function gammaln"
" is deprecated in 0.18 and will be removed"
" in 0.20. Use scipy.special.gammaln instead.",
gammaln, 3)
@ignore_warnings(category=DeprecationWarning)
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
result = assert_warns_message(DeprecationWarning, "The function "
"log_normalize is deprecated in 0.18 and"
" will be removed in 0.20.",
log_normalize, a)
assert np.allclose(v, result, rtol=0.01)
@ignore_warnings(category=DeprecationWarning)
def test_wishart_log_det():
a = np.array([0.1, 0.8, 0.01, 0.09])
b = np.array([0.2, 0.7, 0.05, 0.1])
assert_warns_message(DeprecationWarning, "The function "
"wishart_log_det is deprecated in 0.18 and"
" will be removed in 0.20.",
wishart_log_det, a, b, 2, 4)
@ignore_warnings(category=DeprecationWarning)
def test_wishart_logz():
assert_warns_message(DeprecationWarning, "The function "
"wishart_logz is deprecated in 0.18 and "
"will be removed in 0.20.", wishart_logz,
3, np.identity(3), 1, 3)
@ignore_warnings(category=DeprecationWarning)
def test_DPGMM_deprecation():
assert_warns_message(
DeprecationWarning, "The `DPGMM` class is not working correctly and "
"it's better to use `sklearn.mixture.BayesianGaussianMixture` class "
"with parameter `weight_concentration_prior_type='dirichlet_process'` "
"instead. DPGMM is deprecated in 0.18 and will be removed in 0.20.",
DPGMM)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
def test_VBGMM_deprecation():
assert_warns_message(
DeprecationWarning, "The `VBGMM` class is not working correctly and "
"it's better to use `sklearn.mixture.BayesianGaussianMixture` class "
"with parameter `weight_concentration_prior_type="
"'dirichlet_distribution'` instead. VBGMM is deprecated "
"in 0.18 and will be removed in 0.20.", VBGMM)
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
def test_vbgmm_no_modify_alpha():
alpha = 2.
n_components = 3
X, y = make_blobs(random_state=1)
vbgmm = VBGMM(n_components=n_components, alpha=alpha, n_iter=1)
assert_equal(vbgmm.alpha, alpha)
assert_equal(vbgmm.fit(X).alpha_, float(alpha) / n_components)
| bsd-3-clause |
ofgulban/scikit-image | doc/examples/edges/plot_marching_cubes.py | 2 | 2078 | """
==============
Marching Cubes
==============
Marching cubes is an algorithm to extract a 2D surface mesh from a 3D volume.
This can be conceptualized as a 3D generalization of isolines on topographical
or weather maps. It works by iterating across the volume, looking for regions
which cross the level of interest. If such regions are found, triangulations
are generated and added to an output mesh. The final result is a set of
vertices and a set of triangular faces.
The algorithm requires a data volume and an isosurface value. For example, in
CT imaging Hounsfield units of +700 to +3000 represent bone. So, one potential
input would be a reconstructed CT set of data and the value +700, to extract
a mesh for regions of bone or bone-like density.
This implementation also works correctly on anisotropic datasets, where the
voxel spacing is not equal for every spatial dimension, through use of the
`spacing` kwarg.
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from skimage import measure
from skimage.draw import ellipsoid
# Generate a level set about zero of two identical ellipsoids in 3D
ellip_base = ellipsoid(6, 10, 16, levelset=True)
ellip_double = np.concatenate((ellip_base[:-1, ...],
ellip_base[2:, ...]), axis=0)
# Use marching cubes to obtain the surface mesh of these ellipsoids
verts, faces, normals, values = measure.marching_cubes(ellip_double, 0)
# Display resulting triangular mesh using Matplotlib. This can also be done
# with mayavi or visvis (see skimage.measure.marching_cubes docstring).
fig = plt.figure(figsize=(10, 12))
ax = fig.add_subplot(111, projection='3d')
# Fancy indexing: `verts[faces]` to generate a collection of triangles
mesh = Poly3DCollection(verts[faces])
ax.add_collection3d(mesh)
ax.set_xlabel("x-axis: a = 6 per ellipsoid")
ax.set_ylabel("y-axis: b = 10")
ax.set_zlabel("z-axis: c = 16")
ax.set_xlim(0, 24) # a = 6 (times two for 2nd ellipsoid)
ax.set_ylim(0, 20) # b = 10
ax.set_zlim(0, 32) # c = 16
plt.show()
| bsd-3-clause |
postvakje/sympy | sympy/plotting/plot.py | 7 | 65097 | """Plotting module for Sympy.
A plot is represented by the ``Plot`` class that contains a reference to the
backend and a list of the data series to be plotted. The data series are
instances of classes meant to simplify getting points and meshes from sympy
expressions. ``plot_backends`` is a dictionary with all the backends.
This module gives only the essential. For all the fancy stuff use directly
the backend. You can get the backend wrapper for every plot from the
``_backend`` attribute. Moreover the data series classes have various useful
methods like ``get_points``, ``get_segments``, ``get_meshes``, etc, that may
be useful if you wish to use another plotting library.
Especially if you need publication ready graphs and this module is not enough
for you - just get the ``_backend`` attribute and add whatever you want
directly to it. In the case of matplotlib (the common way to graph data in
python) just copy ``_backend.fig`` which is the figure and ``_backend.ax``
which is the axis and work on them as you would on any other matplotlib object.
Simplicity of code takes much greater importance than performance. Don't use it
if you care at all about performance. A new backend instance is initialized
every time you call ``show()`` and the old one is left to the garbage collector.
"""
from __future__ import print_function, division
import inspect
from collections import Callable
import warnings
import sys
from sympy import sympify, Expr, Tuple, Dummy, Symbol
from sympy.external import import_module
from sympy.core.compatibility import range
from sympy.utilities.decorator import doctest_depends_on
from sympy.utilities.iterables import is_sequence
from .experimental_lambdify import (vectorized_lambdify, lambdify)
# N.B.
# When changing the minimum module version for matplotlib, please change
# the same in the `SymPyDocTestFinder`` in `sympy/utilities/runtests.py`
# Backend specific imports - textplot
from sympy.plotting.textplot import textplot
# Global variable
# Set to False when running tests / doctests so that the plots don't show.
_show = True
def unset_show():
global _show
_show = False
##############################################################################
# The public interface
##############################################################################
def _arity(f):
"""
Python 2 and 3 compatible version that do not raise a Deprecation warning.
"""
if sys.version_info < (3,):
return len(inspect.getargspec(f)[0])
else:
param = inspect.signature(f).parameters.values()
return len([p for p in param if p.kind == p.POSITIONAL_OR_KEYWORD])
class Plot(object):
"""The central class of the plotting module.
For interactive work the function ``plot`` is better suited.
This class permits the plotting of sympy expressions using numerous
backends (matplotlib, textplot, the old pyglet module for sympy, Google
charts api, etc).
The figure can contain an arbitrary number of plots of sympy expressions,
lists of coordinates of points, etc. Plot has a private attribute _series that
contains all data series to be plotted (expressions for lines or surfaces,
lists of points, etc (all subclasses of BaseSeries)). Those data series are
instances of classes not imported by ``from sympy import *``.
The customization of the figure is on two levels. Global options that
concern the figure as a whole (eg title, xlabel, scale, etc) and
per-data series options (eg name) and aesthetics (eg. color, point shape,
line type, etc.).
The difference between options and aesthetics is that an aesthetic can be
a function of the coordinates (or parameters in a parametric plot). The
supported values for an aesthetic are:
- None (the backend uses default values)
- a constant
- a function of one variable (the first coordinate or parameter)
- a function of two variables (the first and second coordinate or
parameters)
- a function of three variables (only in nonparametric 3D plots)
Their implementation depends on the backend so they may not work in some
backends.
If the plot is parametric and the arity of the aesthetic function permits
it the aesthetic is calculated over parameters and not over coordinates.
If the arity does not permit calculation over parameters the calculation is
done over coordinates.
Only cartesian coordinates are supported for the moment, but you can use
the parametric plots to plot in polar, spherical and cylindrical
coordinates.
The arguments for the constructor Plot must be subclasses of BaseSeries.
Any global option can be specified as a keyword argument.
The global options for a figure are:
- title : str
- xlabel : str
- ylabel : str
- legend : bool
- xscale : {'linear', 'log'}
- yscale : {'linear', 'log'}
- axis : bool
- axis_center : tuple of two floats or {'center', 'auto'}
- xlim : tuple of two floats
- ylim : tuple of two floats
- aspect_ratio : tuple of two floats or {'auto'}
- autoscale : bool
- margin : float in [0, 1]
The per data series options and aesthetics are:
There are none in the base series. See below for options for subclasses.
Some data series support additional aesthetics or options:
ListSeries, LineOver1DRangeSeries, Parametric2DLineSeries,
Parametric3DLineSeries support the following:
Aesthetics:
- line_color : function which returns a float.
options:
- label : str
- steps : bool
- integers_only : bool
SurfaceOver2DRangeSeries, ParametricSurfaceSeries support the following:
aesthetics:
- surface_color : function which returns a float.
"""
def __init__(self, *args, **kwargs):
super(Plot, self).__init__()
# Options for the graph as a whole.
# The possible values for each option are described in the docstring of
# Plot. They are based purely on convention, no checking is done.
self.title = None
self.xlabel = None
self.ylabel = None
self.aspect_ratio = 'auto'
self.xlim = None
self.ylim = None
self.axis_center = 'auto'
self.axis = True
self.xscale = 'linear'
self.yscale = 'linear'
self.legend = False
self.autoscale = True
self.margin = 0
# Contains the data objects to be plotted. The backend should be smart
# enough to iterate over this list.
self._series = []
self._series.extend(args)
# The backend type. On every show() a new backend instance is created
# in self._backend which is tightly coupled to the Plot instance
# (thanks to the parent attribute of the backend).
self.backend = DefaultBackend
# The keyword arguments should only contain options for the plot.
for key, val in kwargs.items():
if hasattr(self, key):
setattr(self, key, val)
def show(self):
# TODO move this to the backend (also for save)
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.show()
def save(self, path):
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.save(path)
def __str__(self):
series_strs = [('[%d]: ' % i) + str(s)
for i, s in enumerate(self._series)]
return 'Plot object containing:\n' + '\n'.join(series_strs)
def __getitem__(self, index):
return self._series[index]
def __setitem__(self, index, *args):
if len(args) == 1 and isinstance(args[0], BaseSeries):
self._series[index] = args
def __delitem__(self, index):
del self._series[index]
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def append(self, arg):
"""Adds an element from a plot's series to an existing plot.
Examples
========
Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the
second plot's first series object to the first, use the
``append`` method, like so:
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
>>> p1 = plot(x*x)
>>> p2 = plot(x)
>>> p1.append(p2[0])
>>> p1
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
[1]: cartesian line: x for x over (-10.0, 10.0)
See Also
========
extend
"""
if isinstance(arg, BaseSeries):
self._series.append(arg)
else:
raise TypeError('Must specify element of plot to append.')
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def extend(self, arg):
"""Adds all series from another plot.
Examples
========
Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the
second plot to the first, use the ``extend`` method, like so:
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
>>> p1 = plot(x*x)
>>> p2 = plot(x)
>>> p1.extend(p2)
>>> p1
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
[1]: cartesian line: x for x over (-10.0, 10.0)
"""
if isinstance(arg, Plot):
self._series.extend(arg._series)
elif is_sequence(arg):
self._series.extend(arg)
else:
raise TypeError('Expecting Plot or sequence of BaseSeries')
##############################################################################
# Data Series
##############################################################################
#TODO more general way to calculate aesthetics (see get_color_array)
### The base class for all series
class BaseSeries(object):
"""Base class for the data objects containing stuff to be plotted.
The backend should check if it supports the data series that it's given.
(eg TextBackend supports only LineOver1DRange).
It's the backend responsibility to know how to use the class of
data series that it's given.
Some data series classes are grouped (using a class attribute like is_2Dline)
according to the api they present (based only on convention). The backend is
not obliged to use that api (eg. The LineOver1DRange belongs to the
is_2Dline group and presents the get_points method, but the
TextBackend does not use the get_points method).
"""
# Some flags follow. The rationale for using flags instead of checking base
# classes is that setting multiple flags is simpler than multiple
# inheritance.
is_2Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dsurface = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_contour = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_implicit = False
# Some of the backends expect:
# - get_meshes returning mesh_x (1D array), mesh_y(1D array,
# mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
#Different from is_contour as the colormap in backend will be
#different
is_parametric = False
# The calculation of aesthetics expects:
# - get_parameter_points returning one or two np.arrays (1D or 2D)
# used for calculation aesthetics
def __init__(self):
super(BaseSeries, self).__init__()
@property
def is_3D(self):
flags3D = [
self.is_3Dline,
self.is_3Dsurface
]
return any(flags3D)
@property
def is_line(self):
flagslines = [
self.is_2Dline,
self.is_3Dline
]
return any(flagslines)
### 2D lines
class Line2DBaseSeries(BaseSeries):
"""A base class for 2D lines.
- adding the label, steps and only_integers options
- making is_2Dline true
- defining get_segments and get_color_array
"""
is_2Dline = True
_dim = 2
def __init__(self):
super(Line2DBaseSeries, self).__init__()
self.label = None
self.steps = False
self.only_integers = False
self.line_color = None
def get_segments(self):
np = import_module('numpy')
points = self.get_points()
if self.steps is True:
x = np.array((points[0], points[0])).T.flatten()[1:]
y = np.array((points[1], points[1])).T.flatten()[:-1]
points = (x, y)
points = np.ma.array(points).T.reshape(-1, 1, self._dim)
return np.ma.concatenate([points[:-1], points[1:]], axis=1)
def get_color_array(self):
np = import_module('numpy')
c = self.line_color
if hasattr(c, '__call__'):
f = np.vectorize(c)
arity = _arity(c)
if arity == 1 and self.is_parametric:
x = self.get_parameter_points()
return f(centers_of_segments(x))
else:
variables = list(map(centers_of_segments, self.get_points()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables[:2])
else: # only if the line is 3D (otherwise raises an error)
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class List2DSeries(Line2DBaseSeries):
"""Representation for a line consisting of list of points."""
def __init__(self, list_x, list_y):
np = import_module('numpy')
super(List2DSeries, self).__init__()
self.list_x = np.array(list_x)
self.list_y = np.array(list_y)
self.label = 'list'
def __str__(self):
return 'list plot'
def get_points(self):
return (self.list_x, self.list_y)
class LineOver1DRangeSeries(Line2DBaseSeries):
"""Representation for a line consisting of a SymPy expression over a range."""
def __init__(self, expr, var_start_end, **kwargs):
super(LineOver1DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.label = str(self.expr)
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'cartesian line: %s for %s over %s' % (
str(self.expr), str(self.var), str((self.start, self.end)))
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if self.only_integers or not self.adaptive:
return super(LineOver1DRangeSeries, self).get_segments()
else:
f = lambdify([self.var], self.expr)
list_segments = []
def sample(p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
np = import_module('numpy')
#Randomly sample to avoid aliasing.
random = 0.45 + np.random.rand() * 0.1
xnew = p[0] + random * (q[0] - p[0])
ynew = f(xnew)
new_point = np.array([xnew, ynew])
#Maximum depth
if depth > self.depth:
list_segments.append([p, q])
#Sample irrespective of whether the line is flat till the
#depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
#Sample ten points if complex values are encountered
#at both ends. If there is a real value in between, then
#sample those points further.
elif p[1] is None and q[1] is None:
xarray = np.linspace(p[0], q[0], 10)
yarray = list(map(f, xarray))
if any(y is not None for y in yarray):
for i in range(len(yarray) - 1):
if yarray[i] is not None or yarray[i + 1] is not None:
sample([xarray[i], yarray[i]],
[xarray[i + 1], yarray[i + 1]], depth + 1)
#Sample further if one of the end points in None( i.e. a complex
#value) or the three points are not almost collinear.
elif (p[1] is None or q[1] is None or new_point[1] is None
or not flat(p, new_point, q)):
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start = f(self.start)
f_end = f(self.end)
sample([self.start, f_start], [self.end, f_end], 0)
return list_segments
def get_points(self):
np = import_module('numpy')
if self.only_integers is True:
list_x = np.linspace(int(self.start), int(self.end),
num=int(self.end) - int(self.start) + 1)
else:
list_x = np.linspace(self.start, self.end, num=self.nb_of_points)
f = vectorized_lambdify([self.var], self.expr)
list_y = f(list_x)
return (list_x, list_y)
class Parametric2DLineSeries(Line2DBaseSeries):
"""Representation for a line consisting of two parametric sympy expressions
over a range."""
is_parametric = True
def __init__(self, expr_x, expr_y, var_start_end, **kwargs):
super(Parametric2DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'parametric cartesian line: (%s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.var),
str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
list_x = fx(param)
list_y = fy(param)
return (list_x, list_y)
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if not self.adaptive:
return super(Parametric2DLineSeries, self).get_segments()
f_x = lambdify([self.var], self.expr_x)
f_y = lambdify([self.var], self.expr_y)
list_segments = []
def sample(param_p, param_q, p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
#Randomly sample to avoid aliasing.
np = import_module('numpy')
random = 0.45 + np.random.rand() * 0.1
param_new = param_p + random * (param_q - param_p)
xnew = f_x(param_new)
ynew = f_y(param_new)
new_point = np.array([xnew, ynew])
#Maximum depth
if depth > self.depth:
list_segments.append([p, q])
#Sample irrespective of whether the line is flat till the
#depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
#Sample ten points if complex values are encountered
#at both ends. If there is a real value in between, then
#sample those points further.
elif ((p[0] is None and q[1] is None) or
(p[1] is None and q[1] is None)):
param_array = np.linspace(param_p, param_q, 10)
x_array = list(map(f_x, param_array))
y_array = list(map(f_y, param_array))
if any(x is not None and y is not None
for x, y in zip(x_array, y_array)):
for i in range(len(y_array) - 1):
if ((x_array[i] is not None and y_array[i] is not None) or
(x_array[i + 1] is not None and y_array[i + 1] is not None)):
point_a = [x_array[i], y_array[i]]
point_b = [x_array[i + 1], y_array[i + 1]]
sample(param_array[i], param_array[i], point_a,
point_b, depth + 1)
#Sample further if one of the end points in None( ie a complex
#value) or the three points are not almost collinear.
elif (p[0] is None or p[1] is None
or q[1] is None or q[0] is None
or not flat(p, new_point, q)):
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start_x = f_x(self.start)
f_start_y = f_y(self.start)
start = [f_start_x, f_start_y]
f_end_x = f_x(self.end)
f_end_y = f_y(self.end)
end = [f_end_x, f_end_y]
sample(self.start, self.end, start, end, 0)
return list_segments
### 3D lines
class Line3DBaseSeries(Line2DBaseSeries):
"""A base class for 3D lines.
Most of the stuff is derived from Line2DBaseSeries."""
is_2Dline = False
is_3Dline = True
_dim = 3
def __init__(self):
super(Line3DBaseSeries, self).__init__()
class Parametric3DLineSeries(Line3DBaseSeries):
"""Representation for a 3D line consisting of two parametric sympy
expressions and a range."""
def __init__(self, expr_x, expr_y, expr_z, var_start_end, **kwargs):
super(Parametric3DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return '3D parametric cartesian line: (%s, %s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.expr_z),
str(self.var), str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
fz = vectorized_lambdify([self.var], self.expr_z)
list_x = fx(param)
list_y = fy(param)
list_z = fz(param)
return (list_x, list_y, list_z)
### Surfaces
class SurfaceBaseSeries(BaseSeries):
"""A base class for 3D surfaces."""
is_3Dsurface = True
def __init__(self):
super(SurfaceBaseSeries, self).__init__()
self.surface_color = None
def get_color_array(self):
np = import_module('numpy')
c = self.surface_color
if isinstance(c, Callable):
f = np.vectorize(c)
arity = _arity(c)
if self.is_parametric:
variables = list(map(centers_of_faces, self.get_parameter_meshes()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables)
variables = list(map(centers_of_faces, self.get_meshes()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables[:2])
else:
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class SurfaceOver2DRangeSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of a sympy expression and 2D
range."""
def __init__(self, expr, var_start_end_x, var_start_end_y, **kwargs):
super(SurfaceOver2DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.nb_of_points_x = kwargs.get('nb_of_points_x', 50)
self.nb_of_points_y = kwargs.get('nb_of_points_y', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('cartesian surface: %s for'
' %s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
class ParametricSurfaceSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of three parametric sympy
expressions and a range."""
is_parametric = True
def __init__(
self, expr_x, expr_y, expr_z, var_start_end_u, var_start_end_v,
**kwargs):
super(ParametricSurfaceSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.var_u = sympify(var_start_end_u[0])
self.start_u = float(var_start_end_u[1])
self.end_u = float(var_start_end_u[2])
self.var_v = sympify(var_start_end_v[0])
self.start_v = float(var_start_end_v[1])
self.end_v = float(var_start_end_v[2])
self.nb_of_points_u = kwargs.get('nb_of_points_u', 50)
self.nb_of_points_v = kwargs.get('nb_of_points_v', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('parametric cartesian surface: (%s, %s, %s) for'
' %s over %s and %s over %s') % (
str(self.expr_x),
str(self.expr_y),
str(self.expr_z),
str(self.var_u),
str((self.start_u, self.end_u)),
str(self.var_v),
str((self.start_v, self.end_v)))
def get_parameter_meshes(self):
np = import_module('numpy')
return np.meshgrid(np.linspace(self.start_u, self.end_u,
num=self.nb_of_points_u),
np.linspace(self.start_v, self.end_v,
num=self.nb_of_points_v))
def get_meshes(self):
mesh_u, mesh_v = self.get_parameter_meshes()
fx = vectorized_lambdify((self.var_u, self.var_v), self.expr_x)
fy = vectorized_lambdify((self.var_u, self.var_v), self.expr_y)
fz = vectorized_lambdify((self.var_u, self.var_v), self.expr_z)
return (fx(mesh_u, mesh_v), fy(mesh_u, mesh_v), fz(mesh_u, mesh_v))
### Contours
class ContourSeries(BaseSeries):
"""Representation for a contour plot."""
#The code is mostly repetition of SurfaceOver2DRange.
#XXX: Presently not used in any of those functions.
#XXX: Add contour plot and use this seties.
is_contour = True
def __init__(self, expr, var_start_end_x, var_start_end_y):
super(ContourSeries, self).__init__()
self.nb_of_points_x = 50
self.nb_of_points_y = 50
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.get_points = self.get_meshes
def __str__(self):
return ('contour: %s for '
'%s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
##############################################################################
# Backends
##############################################################################
class BaseBackend(object):
def __init__(self, parent):
super(BaseBackend, self).__init__()
self.parent = parent
## don't have to check for the success of importing matplotlib in each case;
## we will only be using this backend if we can successfully import matploblib
class MatplotlibBackend(BaseBackend):
def __init__(self, parent):
super(MatplotlibBackend, self).__init__(parent)
are_3D = [s.is_3D for s in self.parent._series]
self.matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['pyplot', 'cm', 'collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
self.plt = self.matplotlib.pyplot
self.cm = self.matplotlib.cm
self.LineCollection = self.matplotlib.collections.LineCollection
if any(are_3D) and not all(are_3D):
raise ValueError('The matplotlib backend can not mix 2D and 3D.')
elif not any(are_3D):
self.fig = self.plt.figure()
self.ax = self.fig.add_subplot(111)
self.ax.spines['left'].set_position('zero')
self.ax.spines['right'].set_color('none')
self.ax.spines['bottom'].set_position('zero')
self.ax.spines['top'].set_color('none')
self.ax.spines['left'].set_smart_bounds(True)
self.ax.spines['bottom'].set_smart_bounds(False)
self.ax.xaxis.set_ticks_position('bottom')
self.ax.yaxis.set_ticks_position('left')
elif all(are_3D):
## mpl_toolkits.mplot3d is necessary for
## projection='3d'
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
self.fig = self.plt.figure()
self.ax = self.fig.add_subplot(111, projection='3d')
def process_series(self):
parent = self.parent
for s in self.parent._series:
# Create the collections
if s.is_2Dline:
collection = self.LineCollection(s.get_segments())
self.ax.add_collection(collection)
elif s.is_contour:
self.ax.contour(*s.get_meshes())
elif s.is_3Dline:
# TODO too complicated, I blame matplotlib
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
art3d = mpl_toolkits.mplot3d.art3d
collection = art3d.Line3DCollection(s.get_segments())
self.ax.add_collection(collection)
x, y, z = s.get_points()
self.ax.set_xlim((min(x), max(x)))
self.ax.set_ylim((min(y), max(y)))
self.ax.set_zlim((min(z), max(z)))
elif s.is_3Dsurface:
x, y, z = s.get_meshes()
collection = self.ax.plot_surface(x, y, z, cmap=self.cm.jet,
rstride=1, cstride=1,
linewidth=0.1)
elif s.is_implicit:
#Smart bounds have to be set to False for implicit plots.
self.ax.spines['left'].set_smart_bounds(False)
self.ax.spines['bottom'].set_smart_bounds(False)
points = s.get_raster()
if len(points) == 2:
#interval math plotting
x, y = _matplotlib_list(points[0])
self.ax.fill(x, y, facecolor=s.line_color, edgecolor='None')
else:
# use contourf or contour depending on whether it is
# an inequality or equality.
#XXX: ``contour`` plots multiple lines. Should be fixed.
ListedColormap = self.matplotlib.colors.ListedColormap
colormap = ListedColormap(["white", s.line_color])
xarray, yarray, zarray, plot_type = points
if plot_type == 'contour':
self.ax.contour(xarray, yarray, zarray,
contours=(0, 0), fill=False, cmap=colormap)
else:
self.ax.contourf(xarray, yarray, zarray, cmap=colormap)
else:
raise ValueError('The matplotlib backend supports only '
'is_2Dline, is_3Dline, is_3Dsurface and '
'is_contour objects.')
# Customise the collections with the corresponding per-series
# options.
if hasattr(s, 'label'):
collection.set_label(s.label)
if s.is_line and s.line_color:
if isinstance(s.line_color, (float, int)) or isinstance(s.line_color, Callable):
color_array = s.get_color_array()
collection.set_array(color_array)
else:
collection.set_color(s.line_color)
if s.is_3Dsurface and s.surface_color:
if self.matplotlib.__version__ < "1.2.0": # TODO in the distant future remove this check
warnings.warn('The version of matplotlib is too old to use surface coloring.')
elif isinstance(s.surface_color, (float, int)) or isinstance(s.surface_color, Callable):
color_array = s.get_color_array()
color_array = color_array.reshape(color_array.size)
collection.set_array(color_array)
else:
collection.set_color(s.surface_color)
# Set global options.
# TODO The 3D stuff
# XXX The order of those is important.
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
Axes3D = mpl_toolkits.mplot3d.Axes3D
if parent.xscale and not isinstance(self.ax, Axes3D):
self.ax.set_xscale(parent.xscale)
if parent.yscale and not isinstance(self.ax, Axes3D):
self.ax.set_yscale(parent.yscale)
if parent.xlim:
self.ax.set_xlim(parent.xlim)
else:
if all(isinstance(s, LineOver1DRangeSeries) for s in parent._series):
starts = [s.start for s in parent._series]
ends = [s.end for s in parent._series]
self.ax.set_xlim(min(starts), max(ends))
if parent.ylim:
self.ax.set_ylim(parent.ylim)
if not isinstance(self.ax, Axes3D) or self.matplotlib.__version__ >= '1.2.0': # XXX in the distant future remove this check
self.ax.set_autoscale_on(parent.autoscale)
if parent.axis_center:
val = parent.axis_center
if isinstance(self.ax, Axes3D):
pass
elif val == 'center':
self.ax.spines['left'].set_position('center')
self.ax.spines['bottom'].set_position('center')
elif val == 'auto':
xl, xh = self.ax.get_xlim()
yl, yh = self.ax.get_ylim()
pos_left = ('data', 0) if xl*xh <= 0 else 'center'
pos_bottom = ('data', 0) if yl*yh <= 0 else 'center'
self.ax.spines['left'].set_position(pos_left)
self.ax.spines['bottom'].set_position(pos_bottom)
else:
self.ax.spines['left'].set_position(('data', val[0]))
self.ax.spines['bottom'].set_position(('data', val[1]))
if not parent.axis:
self.ax.set_axis_off()
if parent.legend:
if self.ax.legend():
self.ax.legend_.set_visible(parent.legend)
if parent.margin:
self.ax.set_xmargin(parent.margin)
self.ax.set_ymargin(parent.margin)
if parent.title:
self.ax.set_title(parent.title)
if parent.xlabel:
self.ax.set_xlabel(parent.xlabel, position=(1, 0))
if parent.ylabel:
self.ax.set_ylabel(parent.ylabel, position=(0, 1))
def show(self):
self.process_series()
#TODO after fixing https://github.com/ipython/ipython/issues/1255
# you can uncomment the next line and remove the pyplot.show() call
#self.fig.show()
if _show:
self.plt.show()
def save(self, path):
self.process_series()
self.fig.savefig(path)
def close(self):
self.plt.close(self.fig)
class TextBackend(BaseBackend):
def __init__(self, parent):
super(TextBackend, self).__init__(parent)
def show(self):
if len(self.parent._series) != 1:
raise ValueError(
'The TextBackend supports only one graph per Plot.')
elif not isinstance(self.parent._series[0], LineOver1DRangeSeries):
raise ValueError(
'The TextBackend supports only expressions over a 1D range')
else:
ser = self.parent._series[0]
textplot(ser.expr, ser.start, ser.end)
def close(self):
pass
class DefaultBackend(BaseBackend):
def __new__(cls, parent):
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
return MatplotlibBackend(parent)
else:
return TextBackend(parent)
plot_backends = {
'matplotlib': MatplotlibBackend,
'text': TextBackend,
'default': DefaultBackend
}
##############################################################################
# Finding the centers of line segments or mesh faces
##############################################################################
def centers_of_segments(array):
np = import_module('numpy')
return np.average(np.vstack((array[:-1], array[1:])), 0)
def centers_of_faces(array):
np = import_module('numpy')
return np.average(np.dstack((array[:-1, :-1],
array[1:, :-1],
array[:-1, 1: ],
array[:-1, :-1],
)), 2)
def flat(x, y, z, eps=1e-3):
"""Checks whether three points are almost collinear"""
np = import_module('numpy')
# Workaround plotting piecewise (#8577):
# workaround for `lambdify` in `.experimental_lambdify` fails
# to return numerical values in some cases. Lower-level fix
# in `lambdify` is possible.
vector_a = (x - y).astype(np.float)
vector_b = (z - y).astype(np.float)
dot_product = np.dot(vector_a, vector_b)
vector_a_norm = np.linalg.norm(vector_a)
vector_b_norm = np.linalg.norm(vector_b)
cos_theta = dot_product / (vector_a_norm * vector_b_norm)
return abs(cos_theta + 1) < eps
def _matplotlib_list(interval_list):
"""
Returns lists for matplotlib ``fill`` command from a list of bounding
rectangular intervals
"""
xlist = []
ylist = []
if len(interval_list):
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
xlist.extend([intervalx.start, intervalx.start,
intervalx.end, intervalx.end, None])
ylist.extend([intervaly.start, intervaly.end,
intervaly.end, intervaly.start, None])
else:
#XXX Ugly hack. Matplotlib does not accept empty lists for ``fill``
xlist.extend([None, None, None, None])
ylist.extend([None, None, None, None])
return xlist, ylist
####New API for plotting module ####
# TODO: Add color arrays for plots.
# TODO: Add more plotting options for 3d plots.
# TODO: Adaptive sampling for 3D plots.
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot(*args, **kwargs):
"""
Plots a function of a single variable and returns an instance of
the ``Plot`` class (also, see the description of the
``show`` keyword argument below).
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single Plot
``plot(expr, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot(expr1, expr2, ..., range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot((expr1, range), (expr2, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function of single variable
``range``: (x, 0, 5), A 3-tuple denoting the range of the free variable.
Keyword Arguments
=================
Arguments for ``plot`` function:
``show``: Boolean. The default value is set to ``True``. Set show to
``False`` and the function will not display the plot. The returned
instance of the ``Plot`` class can then be used to save or display
the plot by calling the ``save()`` and ``show()`` methods
respectively.
Arguments for ``LineOver1DRangeSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to False and
specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of value ``n``
samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The function
is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics options:
``line_color``: float. Specifies the color for the plot.
See ``Plot`` to see how to set color for the plots.
If there are multiple plots, then the same series series are applied to
all the plots. If you want to set these options separately, you can index
the ``Plot`` object returned and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot. It is set to the latex representation of
the expression, if the plot has only one expression.
``xlabel`` : str. Label for the x-axis.
``ylabel`` : str. Label for the y-axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x-axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y-axis.
``axis_center``: tuple of two floats denoting the coordinates of the center or
{'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x-axis limits.
``ylim`` : tuple of two floats, denoting the y-axis limits.
Examples
========
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
Single Plot
>>> plot(x**2, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x**2 for x over (-5.0, 5.0)
Multiple plots with single range.
>>> plot(x, x**2, x**3, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x for x over (-5.0, 5.0)
[1]: cartesian line: x**2 for x over (-5.0, 5.0)
[2]: cartesian line: x**3 for x over (-5.0, 5.0)
Multiple plots with different ranges.
>>> plot((x**2, (x, -6, 6)), (x, (x, -5, 5)))
Plot object containing:
[0]: cartesian line: x**2 for x over (-6.0, 6.0)
[1]: cartesian line: x for x over (-5.0, 5.0)
No adaptive sampling.
>>> plot(x**2, adaptive=False, nb_of_points=400)
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
See Also
========
Plot, LineOver1DRangeSeries.
"""
args = list(map(sympify, args))
free = set()
for a in args:
if isinstance(a, Expr):
free |= a.free_symbols
if len(free) > 1:
raise ValueError(
'The same variable should be used in all '
'univariate expressions being plotted.')
x = free.pop() if free else Symbol('x')
kwargs.setdefault('xlabel', x.name)
kwargs.setdefault('ylabel', 'f(%s)' % x.name)
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 1)
series = [LineOver1DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot_parametric(*args, **kwargs):
"""
Plots a 2D parametric plot.
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single plot.
``plot_parametric(expr_x, expr_y, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot_parametric((expr1_x, expr1_y), (expr2_x, expr2_y), range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot_parametric((expr_x, expr_y, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``range``: (u, 0, 5), A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric2DLineSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to
False and specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of
value ``n`` samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The
function is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics
----------
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same Series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``xlabel`` : str. Label for the x-axis.
``ylabel`` : str. Label for the y-axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x-axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y-axis.
``axis_center``: tuple of two floats denoting the coordinates of the center
or {'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x-axis limits.
``ylim`` : tuple of two floats, denoting the y-axis limits.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot_parametric
>>> u = symbols('u')
Single Parametric plot
>>> plot_parametric(cos(u), sin(u), (u, -5, 5))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
Multiple parametric plot with single range.
>>> plot_parametric((cos(u), sin(u)), (u, cos(u)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-10.0, 10.0)
[1]: parametric cartesian line: (u, cos(u)) for u over (-10.0, 10.0)
Multiple parametric plots.
>>> plot_parametric((cos(u), sin(u), (u, -5, 5)),
... (cos(u), u, (u, -5, 5)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
[1]: parametric cartesian line: (cos(u), u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric2DLineSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 2, 1)
series = [Parametric2DLineSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d_parametric_line(*args, **kwargs):
"""
Plots a 3D parametric line plot.
Usage
=====
Single plot:
``plot3d_parametric_line(expr_x, expr_y, expr_z, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_line((expr_x, expr_y, expr_z, range), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``expr_z`` : Expression representing the function along z.
``range``: ``(u, 0, 5)``, A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric3DLineSeries`` class.
``nb_of_points``: The range is uniformly sampled at ``nb_of_points``
number of points.
Aesthetics:
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class.
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_line
>>> u = symbols('u')
Single plot.
>>> plot3d_parametric_line(cos(u), sin(u), u, (u, -5, 5))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
Multiple plots.
>>> plot3d_parametric_line((cos(u), sin(u), u, (u, -5, 5)),
... (sin(u), u**2, u, (u, -5, 5)))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
[1]: 3D parametric cartesian line: (sin(u), u**2, u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric3DLineSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 1)
series = [Parametric3DLineSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d(*args, **kwargs):
"""
Plots a 3D surface plot.
Usage
=====
Single plot
``plot3d(expr, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plot with the same range.
``plot3d(expr1, expr2, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot3d((expr1, range_x, range_y), (expr2, range_x, range_y), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function along x.
``range_x``: (x, 0, 5), A 3-tuple denoting the range of the x
variable.
``range_y``: (y, 0, 5), A 3-tuple denoting the range of the y
variable.
Keyword Arguments
=================
Arguments for ``SurfaceOver2DRangeSeries`` class:
``nb_of_points_x``: int. The x range is sampled uniformly at
``nb_of_points_x`` of points.
``nb_of_points_y``: int. The y range is sampled uniformly at
``nb_of_points_y`` of points.
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols
>>> from sympy.plotting import plot3d
>>> x, y = symbols('x y')
Single plot
>>> plot3d(x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with same range
>>> plot3d(x*y, -x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: -x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with different ranges.
>>> plot3d((x**2 + y**2, (x, -5, 5), (y, -5, 5)),
... (x*y, (x, -3, 3), (y, -3, 3)))
Plot object containing:
[0]: cartesian surface: x**2 + y**2 for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: x*y for x over (-3.0, 3.0) and y over (-3.0, 3.0)
See Also
========
Plot, SurfaceOver2DRangeSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 2)
series = [SurfaceOver2DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d_parametric_surface(*args, **kwargs):
"""
Plots a 3D parametric surface plot.
Usage
=====
Single plot.
``plot3d_parametric_surface(expr_x, expr_y, expr_z, range_u, range_v, **kwargs)``
If the ranges is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_surface((expr_x, expr_y, expr_z, range_u, range_v), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x``: Expression representing the function along ``x``.
``expr_y``: Expression representing the function along ``y``.
``expr_z``: Expression representing the function along ``z``.
``range_u``: ``(u, 0, 5)``, A 3-tuple denoting the range of the ``u``
variable.
``range_v``: ``(v, 0, 5)``, A 3-tuple denoting the range of the v
variable.
Keyword Arguments
=================
Arguments for ``ParametricSurfaceSeries`` class:
``nb_of_points_u``: int. The ``u`` range is sampled uniformly at
``nb_of_points_v`` of points
``nb_of_points_y``: int. The ``v`` range is sampled uniformly at
``nb_of_points_y`` of points
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied for
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_surface
>>> u, v = symbols('u v')
Single plot.
>>> plot3d_parametric_surface(cos(u + v), sin(u - v), u - v,
... (u, -5, 5), (v, -5, 5))
Plot object containing:
[0]: parametric cartesian surface: (cos(u + v), sin(u - v), u - v) for u over (-5.0, 5.0) and v over (-5.0, 5.0)
See Also
========
Plot, ParametricSurfaceSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 2)
series = [ParametricSurfaceSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def check_arguments(args, expr_len, nb_of_free_symbols):
"""
Checks the arguments and converts into tuples of the
form (exprs, ranges)
Examples
========
>>> from sympy import plot, cos, sin, symbols
>>> from sympy.plotting.plot import check_arguments
>>> x = symbols('x')
>>> check_arguments([cos(x), sin(x)], 2, 1)
[(cos(x), sin(x), (x, -10, 10))]
>>> check_arguments([x, x**2], 1, 1)
[(x, (x, -10, 10)), (x**2, (x, -10, 10))]
"""
if expr_len > 1 and isinstance(args[0], Expr):
# Multiple expressions same range.
# The arguments are tuples when the expression length is
# greater than 1.
if len(args) < expr_len:
raise ValueError("len(args) should not be less than expr_len")
for i in range(len(args)):
if isinstance(args[i], Tuple):
break
else:
i = len(args) + 1
exprs = Tuple(*args[:i])
free_symbols = list(set().union(*[e.free_symbols for e in exprs]))
if len(args) == expr_len + nb_of_free_symbols:
#Ranges given
plots = [exprs + Tuple(*args[expr_len:])]
else:
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
plots = [exprs + Tuple(*ranges)]
return plots
if isinstance(args[0], Expr) or (isinstance(args[0], Tuple) and
len(args[0]) == expr_len and
expr_len != 3):
# Cannot handle expressions with number of expression = 3. It is
# not possible to differentiate between expressions and ranges.
#Series of plots with same range
for i in range(len(args)):
if isinstance(args[i], Tuple) and len(args[i]) != expr_len:
break
if not isinstance(args[i], Tuple):
args[i] = Tuple(args[i])
else:
i = len(args) + 1
exprs = args[:i]
assert all(isinstance(e, Expr) for expr in exprs for e in expr)
free_symbols = list(set().union(*[e.free_symbols for expr in exprs
for e in expr]))
if len(free_symbols) > nb_of_free_symbols:
raise ValueError("The number of free_symbols in the expression "
"is greater than %d" % nb_of_free_symbols)
if len(args) == i + nb_of_free_symbols and isinstance(args[i], Tuple):
ranges = Tuple(*[range_expr for range_expr in args[
i:i + nb_of_free_symbols]])
plots = [expr + ranges for expr in exprs]
return plots
else:
#Use default ranges.
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
ranges = Tuple(*ranges)
plots = [expr + ranges for expr in exprs]
return plots
elif isinstance(args[0], Tuple) and len(args[0]) == expr_len + nb_of_free_symbols:
#Multiple plots with different ranges.
for arg in args:
for i in range(expr_len):
if not isinstance(arg[i], Expr):
raise ValueError("Expected an expression, given %s" %
str(arg[i]))
for i in range(nb_of_free_symbols):
if not len(arg[i + expr_len]) == 3:
raise ValueError("The ranges should be a tuple of "
"length 3, got %s" % str(arg[i + expr_len]))
return args
| bsd-3-clause |
mxjl620/scikit-learn | examples/manifold/plot_mds.py | 261 | 2616 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# Licence: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
plt.scatter(X_true[:, 0], X_true[:, 1], c='r', s=20)
plt.scatter(pos[:, 0], pos[:, 1], s=20, c='g')
plt.scatter(npos[:, 0], npos[:, 1], s=20, c='b')
plt.legend(('True position', 'MDS', 'NMDS'), loc='best')
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
cosmodesi/snsurvey | src/control.py | 1 | 1120 | #!/usr/bin/env python
import numpy
import sncosmo
import scipy.optimize
import matplotlib.pyplot as plt
model=sncosmo.Model(source='salt2-extended')
def f(t ,rlim):
# print t, model.bandflux('desr',t, zp = rlim, zpsys='ab')
return model.bandflux('desr',t, zp = rlim, zpsys='ab')-1.
def controlTime(z,rlim):
model.set(z=z, t0=55000.)
model.set_source_peakabsmag(absmag=-19.3,band='bessellb',magsys='ab')
pre = scipy.optimize.fsolve(f, 55000.-15*(1+z) ,args=(rlim),xtol=1e-8)
post = scipy.optimize.fsolve(f, 55000.+20*(1+z) ,args=(rlim),xtol=1e-8)
return max(post[0]-pre[0],0)
# print scipy.optimize.fsolve(f, 55000.+40,args=(rlim),factor=1.,xtol=1e-8)
def plot():
lmag = numpy.arange(19.5,21.6,0.5)
zs = numpy.arange(0.02, 0.2501,0.02)
ans = []
for lm in lmag:
ans_=[]
for z in zs:
ans_.append(controlTime(z,lm))
ans.append(ans_)
for lm, ct in zip(lmag, ans):
plt.plot(zs, ct, label = '$r_{{lim}} = {}$'.format(str(lm)))
plt.xlabel(r'$z$')
plt.ylabel(r'control time (days)')
plt.legend()
plt.show()
| bsd-3-clause |
rajat1994/scikit-learn | examples/plot_kernel_ridge_regression.py | 230 | 6222 | """
=============================================
Comparison of kernel ridge regression and SVR
=============================================
Both kernel ridge regression (KRR) and SVR learn a non-linear function by
employing the kernel trick, i.e., they learn a linear function in the space
induced by the respective kernel which corresponds to a non-linear function in
the original space. They differ in the loss functions (ridge versus
epsilon-insensitive loss). In contrast to SVR, fitting a KRR can be done in
closed-form and is typically faster for medium-sized datasets. On the other
hand, the learned model is non-sparse and thus slower than SVR at
prediction-time.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise added to every fifth
datapoint. The first figure compares the learned model of KRR and SVR when both
complexity/regularization and bandwidth of the RBF kernel are optimized using
grid-search. The learned functions are very similar; however, fitting KRR is
approx. seven times faster than fitting SVR (both with grid-search). However,
prediction of 100000 target values is more than tree times faster with SVR
since it has learned a sparse model using only approx. 1/3 of the 100 training
datapoints as support vectors.
The next figure compares the time for fitting and prediction of KRR and SVR for
different sizes of the training set. Fitting KRR is faster than SVR for medium-
sized training sets (less than 1000 samples); however, for larger training sets
SVR scales better. With regard to prediction time, SVR is faster than
KRR for all sizes of the training set because of the learned sparse
solution. Note that the degree of sparsity and thus the prediction time depends
on the parameters epsilon and C of the SVR.
"""
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
from __future__ import division
import time
import numpy as np
from sklearn.svm import SVR
from sklearn.grid_search import GridSearchCV
from sklearn.learning_curve import learning_curve
from sklearn.kernel_ridge import KernelRidge
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
#############################################################################
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
X_plot = np.linspace(0, 5, 100000)[:, None]
#############################################################################
# Fit regression model
train_size = 100
svr = GridSearchCV(SVR(kernel='rbf', gamma=0.1), cv=5,
param_grid={"C": [1e0, 1e1, 1e2, 1e3],
"gamma": np.logspace(-2, 2, 5)})
kr = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=5,
param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3],
"gamma": np.logspace(-2, 2, 5)})
t0 = time.time()
svr.fit(X[:train_size], y[:train_size])
svr_fit = time.time() - t0
print("SVR complexity and bandwidth selected and model fitted in %.3f s"
% svr_fit)
t0 = time.time()
kr.fit(X[:train_size], y[:train_size])
kr_fit = time.time() - t0
print("KRR complexity and bandwidth selected and model fitted in %.3f s"
% kr_fit)
sv_ratio = svr.best_estimator_.support_.shape[0] / train_size
print("Support vector ratio: %.3f" % sv_ratio)
t0 = time.time()
y_svr = svr.predict(X_plot)
svr_predict = time.time() - t0
print("SVR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], svr_predict))
t0 = time.time()
y_kr = kr.predict(X_plot)
kr_predict = time.time() - t0
print("KRR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], kr_predict))
#############################################################################
# look at the results
sv_ind = svr.best_estimator_.support_
plt.scatter(X[sv_ind], y[sv_ind], c='r', s=50, label='SVR support vectors')
plt.scatter(X[:100], y[:100], c='k', label='data')
plt.hold('on')
plt.plot(X_plot, y_svr, c='r',
label='SVR (fit: %.3fs, predict: %.3fs)' % (svr_fit, svr_predict))
plt.plot(X_plot, y_kr, c='g',
label='KRR (fit: %.3fs, predict: %.3fs)' % (kr_fit, kr_predict))
plt.xlabel('data')
plt.ylabel('target')
plt.title('SVR versus Kernel Ridge')
plt.legend()
# Visualize training and prediction time
plt.figure()
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
sizes = np.logspace(1, 4, 7)
for name, estimator in {"KRR": KernelRidge(kernel='rbf', alpha=0.1,
gamma=10),
"SVR": SVR(kernel='rbf', C=1e1, gamma=10)}.items():
train_time = []
test_time = []
for train_test_size in sizes:
t0 = time.time()
estimator.fit(X[:train_test_size], y[:train_test_size])
train_time.append(time.time() - t0)
t0 = time.time()
estimator.predict(X_plot[:1000])
test_time.append(time.time() - t0)
plt.plot(sizes, train_time, 'o-', color="r" if name == "SVR" else "g",
label="%s (train)" % name)
plt.plot(sizes, test_time, 'o--', color="r" if name == "SVR" else "g",
label="%s (test)" % name)
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Train size")
plt.ylabel("Time (seconds)")
plt.title('Execution Time')
plt.legend(loc="best")
# Visualize learning curves
plt.figure()
svr = SVR(kernel='rbf', C=1e1, gamma=0.1)
kr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)
train_sizes, train_scores_svr, test_scores_svr = \
learning_curve(svr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
train_sizes_abs, train_scores_kr, test_scores_kr = \
learning_curve(kr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
plt.plot(train_sizes, test_scores_svr.mean(1), 'o-', color="r",
label="SVR")
plt.plot(train_sizes, test_scores_kr.mean(1), 'o-', color="g",
label="KRR")
plt.xlabel("Train size")
plt.ylabel("Mean Squared Error")
plt.title('Learning curves')
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
Ektorus/bohrium | ve/cpu/tools/locate.py | 1 | 8762 | from __future__ import print_function
## 3D Lattice Boltzmann (BGK) model of a fluid.
## D3Q19 model. At each timestep, particle densities propagate
## outwards in the directions indicated in the figure. An
## equivalent 'equilibrium' density is found, and the densities
## relax towards that state, in a proportion governed by omega.
## Iain Haslam, March 2006.
import util
if util.Benchmark().bohrium:
import bohrium as np
else:
import numpy as np
def main():
B = util.Benchmark()
nx = B.size[0]
ny = B.size[1]
nz = B.size[2]
ITER = B.size[3]
NO_OBST = 1
omega = 1.0
density = 1.0
deltaU = 1e-7
t1 = 1/3.0
t2 = 1/18.0
t3 = 1/36.0
B.start()
F = np.ones((19, nx, ny, nz), dtype=np.float64)
F[:] = density/19.0
FEQ = np.ones((19, nx, ny, nz), dtype=np.float64)
FEQ[:] = density/19.0
T = np.zeros((19, nx, ny, nz), dtype=np.float64)
#Create the scenery.
BOUND = np.zeros((nx, ny, nz), dtype=np.float64)
BOUNDi = np.ones((nx, ny, nz), dtype=np.float64)
"""
if not NO_OBST:
for i in xrange(nx):
for j in xrange(ny):
for k in xrange(nz):
if ((i-4)**2+(j-5)**2+(k-6)**2) < 6:
BOUND[i,j,k] += 1.0
BOUNDi[i,j,k] += 0.0
BOUND[:,0,:] += 1.0
BOUNDi[:,0,:] *= 0.0
"""
if util.Benchmark().bohrium:
np.flush()
for ts in xrange(0, ITER):
##Propagate / Streaming step
T[:] = F
#nearest-neighbours
F[1,:,:,0] = T[1,:,:,-1]
F[1,:,:,1:] = T[1,:,:,:-1]
F[2,:,:,:-1] = T[2,:,:,1:]
F[2,:,:,-1] = T[2,:,:,0]
F[3,:,0,:] = T[3,:,-1,:]
F[3,:,1:,:] = T[3,:,:-1,:]
F[4,:,:-1,:] = T[4,:,1:,:]
F[4,:,-1,:] = T[4,:,0,:]
F[5,0,:,:] = T[5,-1,:,:]
F[5,1:,:,:] = T[5,:-1,:,:]
F[6,:-1,:,:] = T[6,1:,:,:]
F[6,-1,:,:] = T[6,0,:,:]
#next-nearest neighbours
F[7,0 ,0 ,:] = T[7,-1 , -1,:]
F[7,0 ,1:,:] = T[7,-1 ,:-1,:]
F[7,1:,0 ,:] = T[7,:-1, -1,:]
F[7,1:,1:,:] = T[7,:-1,:-1,:]
F[8,0 ,:-1,:] = T[8,-1 ,1:,:]
F[8,0 , -1,:] = T[8,-1 ,0 ,:]
F[8,1:,:-1,:] = T[8,:-1,1:,:]
F[8,1:, -1,:] = T[8,:-1,0 ,:]
F[9,:-1,0 ,:] = T[9,1:, -1,:]
F[9,:-1,1:,:] = T[9,1:,:-1,:]
F[9,-1 ,0 ,:] = T[9,0 , 0,:]
F[9,-1 ,1:,:] = T[9,0 ,:-1,:]
F[10,:-1,:-1,:] = T[10,1:,1:,:]
F[10,:-1, -1,:] = T[10,1:,0 ,:]
F[10,-1 ,:-1,:] = T[10,0 ,1:,:]
F[10,-1 , -1,:] = T[10,0 ,0 ,:]
F[11,0 ,:,0 ] = T[11,0 ,:, -1]
F[11,0 ,:,1:] = T[11,0 ,:,:-1]
F[11,1:,:,0 ] = T[11,:-1,:, -1]
F[11,1:,:,1:] = T[11,:-1,:,:-1]
F[12,0 ,:,:-1] = T[12, -1,:,1:]
F[12,0 ,:, -1] = T[12, -1,:,0 ]
F[12,1:,:,:-1] = T[12,:-1,:,1:]
F[12,1:,:, -1] = T[12,:-1,:,0 ]
F[13,:-1,:,0 ] = T[13,1:,:, -1]
F[13,:-1,:,1:] = T[13,1:,:,:-1]
F[13, -1,:,0 ] = T[13,0 ,:, -1]
F[13, -1,:,1:] = T[13,0 ,:,:-1]
F[14,:-1,:,:-1] = T[14,1:,:,1:]
F[14,:-1,:, -1] = T[14,1:,:,0 ]
F[14,-1 ,:,:-1] = T[14,0 ,:,1:]
F[14,-1 ,:, -1] = T[14,0 ,:,0 ]
F[15,:,0 ,0 ] = T[15,:, -1, -1]
F[15,:,0 ,1:] = T[15,:, -1,:-1]
F[15,:,1:,0 ] = T[15,:,:-1, -1]
F[15,:,1:,1:] = T[15,:,:-1,:-1]
F[16,:,0 ,:-1] = T[16,:, -1,1:]
F[16,:,0 , -1] = T[16,:, -1,0 ]
F[16,:,1:,:-1] = T[16,:,:-1,1:]
F[16,:,1:, -1] = T[16,:,:-1,0 ]
F[17,:,:-1,0 ] = T[17,:,1:, -1]
F[17,:,:-1,1:] = T[17,:,1:,:-1]
F[17,:, -1,0 ] = T[17,:,0 , -1]
F[17,:, -1,1:] = T[17,:,0 ,:-1]
F[18,:,:-1,:-1] = T[18,:,1:,1:]
F[18,:,:-1, -1] = T[18,:,1:,0 ]
F[18,:,-1 ,:-1] = T[18,:,0 ,1:]
F[18,:,-1 , -1] = T[18,:,0 ,0 ]
#Densities bouncing back at next timestep
BB = np.empty(F.shape)
T[:] = F
T[1:,:,:,:] *= BOUND[np.newaxis,:,:,:]
BB[2 ,:,:,:] += T[1 ,:,:,:]
BB[1 ,:,:,:] += T[2 ,:,:,:]
BB[4 ,:,:,:] += T[3 ,:,:,:]
BB[3 ,:,:,:] += T[4 ,:,:,:]
BB[6 ,:,:,:] += T[5 ,:,:,:]
BB[5 ,:,:,:] += T[6 ,:,:,:]
BB[10,:,:,:] += T[7 ,:,:,:]
BB[9 ,:,:,:] += T[8 ,:,:,:]
BB[8 ,:,:,:] += T[9 ,:,:,:]
BB[7 ,:,:,:] += T[10,:,:,:]
BB[14,:,:,:] += T[11,:,:,:]
BB[13,:,:,:] += T[12,:,:,:]
BB[12,:,:,:] += T[13,:,:,:]
BB[11,:,:,:] += T[14,:,:,:]
BB[18,:,:,:] += T[15,:,:,:]
BB[17,:,:,:] += T[16,:,:,:]
BB[16,:,:,:] += T[17,:,:,:]
BB[15,:,:,:] += T[18,:,:,:]
# Relax calculate equilibrium state (FEQ) with equivalent speed and density to F
DENSITY = np.add.reduce(F)
#UX = F[5,:,:,:].copy()
UX = np.ones(F[5,:,:,:].shape, dtype=np.float64)
UX[:,:,:] = F[5,:,:,:]
UX += F[7,:,:,:]
UX += F[8,:,:,:]
UX += F[11,:,:,:]
UX += F[12,:,:,:]
UX -= F[6,:,:,:]
UX -= F[9,:,:,:]
UX -= F[10,:,:,:]
UX -= F[13,:,:,:]
UX -= F[14,:,:,:]
UX /=DENSITY
#UY = F[3,:,:,:].copy()
UY = np.ones(F[3,:,:,:].shape, dtype=np.float64)
UY[:,:,:] = F[3,:,:,:]
UY += F[7,:,:,:]
UY += F[9,:,:,:]
UY += F[15,:,:,:]
UY += F[16,:,:,:]
UY -= F[4,:,:,:]
UY -= F[8,:,:,:]
UY -= F[10,:,:,:]
UY -= F[17,:,:,:]
UY -= F[18,:,:,:]
UY /=DENSITY
#UZ = F[1,:,:,:].copy()
UZ = np.ones(F[1,:,:,:].shape, dtype=np.float64)
UZ[:,:,:] = F[1,:,:,:]
UZ += F[11,:,:,:]
UZ += F[13,:,:,:]
UZ += F[15,:,:,:]
UZ += F[17,:,:,:]
UZ -= F[2,:,:,:]
UZ -= F[12,:,:,:]
UZ -= F[14,:,:,:]
UZ -= F[16,:,:,:]
UZ -= F[18,:,:,:]
UZ /=DENSITY
UX[0,:,:] += deltaU #Increase inlet pressure
#Set bourderies to zero.
UX[:,:,:] *= BOUNDi
UY[:,:,:] *= BOUNDi
UZ[:,:,:] *= BOUNDi
DENSITY[:,:,:] *= BOUNDi
U_SQU = UX**2 + UY**2 + UZ**2
# Calculate equilibrium distribution: stationary
FEQ[0,:,:,:] = (t1*DENSITY)*(1.0-3.0*U_SQU/2.0)
# nearest-neighbours
T1 = 3.0/2.0*U_SQU
tDENSITY = t2*DENSITY
FEQ[1,:,:,:]=tDENSITY*(1.0 + 3.0*UZ + 9.0/2.0*UZ**2 - T1)
FEQ[2,:,:,:]=tDENSITY*(1.0 - 3.0*UZ + 9.0/2.0*UZ**2 - T1)
FEQ[3,:,:,:]=tDENSITY*(1.0 + 3.0*UY + 9.0/2.0*UY**2 - T1)
FEQ[4,:,:,:]=tDENSITY*(1.0 - 3.0*UY + 9.0/2.0*UY**2 - T1)
FEQ[5,:,:,:]=tDENSITY*(1.0 + 3.0*UX + 9.0/2.0*UX**2 - T1)
FEQ[6,:,:,:]=tDENSITY*(1.0 - 3.0*UX + 9.0/2.0*UX**2 - T1)
# next-nearest neighbours
T1 = 3.0*U_SQU/2.0
tDENSITY = t3*DENSITY
U8 = UX+UY
FEQ[7,:,:,:] =tDENSITY*(1.0 + 3.0*U8 + 9.0/2.0*(U8)**2 - T1)
U9 = UX-UY
FEQ[8,:,:,:] =tDENSITY*(1.0 + 3.0*U9 + 9.0/2.0*(U9)**2 - T1)
U10 = -UX+UY
FEQ[9,:,:,:] =tDENSITY*(1.0 + 3.0*U10 + 9.0/2.0*(U10)**2 - T1)
U8 *= -1.0
FEQ[10,:,:,:]=tDENSITY*(1.0 + 3.0*U8 + 9.0/2.0*(U8)**2 - T1)
U12 = UX+UZ
FEQ[11,:,:,:]=tDENSITY*(1.0 + 3.0*U12 + 9.0/2.0*(U12)**2 - T1)
U12 *= 1.0
FEQ[14,:,:,:]=tDENSITY*(1.0 + 3.0*U12 + 9.0/2.0*(U12)**2 - T1)
U13 = UX-UZ
FEQ[12,:,:,:]=tDENSITY*(1.0 + 3.0*U13 + 9.0/2.0*(U13)**2 - T1)
U13 *= -1.0
FEQ[13,:,:,:]=tDENSITY*(1.0 + 3.0*U13 + 9.0/2.0*(U13)**2 - T1)
U16 = UY+UZ
FEQ[15,:,:,:]=tDENSITY*(1.0 + 3.0*U16 + 9.0/2.0*(U16)**2 - T1)
U17 = UY-UZ
FEQ[16,:,:,:]=tDENSITY*(1.0 + 3.0*U17 + 9.0/2.0*(U17)**2 - T1)
U17 *= -1.0
FEQ[17,:,:,:]=tDENSITY*(1.0 + 3.0*U17 + 9.0/2.0*(U17)**2 - T1)
U16 *= -1.0
FEQ[18,:,:,:]=tDENSITY*(1.0 + 3.0*U16 + 9.0/2.0*(U16)**2 - T1)
F *= (1.0-omega)
F += omega * FEQ
#Densities bouncing back at next timestep
F[1:,:,:,:] *= BOUNDi[np.newaxis,:,:,:]
F[1:,:,:,:] += BB[1:,:,:,:]
del BB
del T1
del UX, UY, UZ
del U_SQU
del DENSITY, tDENSITY
del U8, U9, U10, U12, U13, U16, U17
if util.Benchmark().bohrium:
np.flush()
B.stop()
B.pprint()
if B.outputfn:
B.tofile(B.outputfn, {'res': UX})
"""
import matplotlib.pyplot as plt
UX *= -1
plt.hold(True)
plt.quiver(UY[:,:,4],UX[:,:,4], pivot='middle')
plt.imshow(BOUND[:,:,4])
plt.show()
"""
if __name__ == "__main__":
main()
| lgpl-3.0 |
dmitriz/zipline | zipline/utils/tradingcalendar.py | 6 | 11182 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import pytz
from datetime import datetime
from dateutil import rrule
from functools import partial
start = pd.Timestamp('1990-01-01', tz='UTC')
end_base = pd.Timestamp('today', tz='UTC')
# Give an aggressive buffer for logic that needs to use the next trading
# day or minute.
end = end_base + pd.Timedelta(days=365)
def canonicalize_datetime(dt):
# Strip out any HHMMSS or timezone info in the user's datetime, so that
# all the datetimes we return will be 00:00:00 UTC.
return datetime(dt.year, dt.month, dt.day, tzinfo=pytz.utc)
def get_non_trading_days(start, end):
non_trading_rules = []
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
weekends = rrule.rrule(
rrule.YEARLY,
byweekday=(rrule.SA, rrule.SU),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(weekends)
new_years = rrule.rrule(
rrule.MONTHLY,
byyearday=1,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(new_years)
new_years_sunday = rrule.rrule(
rrule.MONTHLY,
byyearday=2,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(new_years_sunday)
mlk_day = rrule.rrule(
rrule.MONTHLY,
bymonth=1,
byweekday=(rrule.MO(+3)),
cache=True,
dtstart=datetime(1998, 1, 1, tzinfo=pytz.utc),
until=end
)
non_trading_rules.append(mlk_day)
presidents_day = rrule.rrule(
rrule.MONTHLY,
bymonth=2,
byweekday=(rrule.MO(3)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(presidents_day)
good_friday = rrule.rrule(
rrule.DAILY,
byeaster=-2,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(good_friday)
memorial_day = rrule.rrule(
rrule.MONTHLY,
bymonth=5,
byweekday=(rrule.MO(-1)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(memorial_day)
july_4th = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=4,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(july_4th)
july_4th_sunday = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=5,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(july_4th_sunday)
july_4th_saturday = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=3,
byweekday=rrule.FR,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(july_4th_saturday)
labor_day = rrule.rrule(
rrule.MONTHLY,
bymonth=9,
byweekday=(rrule.MO(1)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(labor_day)
thanksgiving = rrule.rrule(
rrule.MONTHLY,
bymonth=11,
byweekday=(rrule.TH(4)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(thanksgiving)
christmas = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=25,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas)
christmas_sunday = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=26,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas_sunday)
# If Christmas is a Saturday then 24th, a Friday is observed.
christmas_saturday = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=24,
byweekday=rrule.FR,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas_saturday)
non_trading_ruleset = rrule.rruleset()
for rule in non_trading_rules:
non_trading_ruleset.rrule(rule)
non_trading_days = non_trading_ruleset.between(start, end, inc=True)
# Add September 11th closings
# http://en.wikipedia.org/wiki/Aftermath_of_the_September_11_attacks
# Due to the terrorist attacks, the stock market did not open on 9/11/2001
# It did not open again until 9/17/2001.
#
# September 2001
# Su Mo Tu We Th Fr Sa
# 1
# 2 3 4 5 6 7 8
# 9 10 11 12 13 14 15
# 16 17 18 19 20 21 22
# 23 24 25 26 27 28 29
# 30
for day_num in range(11, 17):
non_trading_days.append(
datetime(2001, 9, day_num, tzinfo=pytz.utc))
# Add closings due to Hurricane Sandy in 2012
# http://en.wikipedia.org/wiki/Hurricane_sandy
#
# The stock exchange was closed due to Hurricane Sandy's
# impact on New York.
# It closed on 10/29 and 10/30, reopening on 10/31
# October 2012
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6
# 7 8 9 10 11 12 13
# 14 15 16 17 18 19 20
# 21 22 23 24 25 26 27
# 28 29 30 31
for day_num in range(29, 31):
non_trading_days.append(
datetime(2012, 10, day_num, tzinfo=pytz.utc))
# Misc closings from NYSE listing.
# http://www.nyse.com/pdfs/closings.pdf
#
# National Days of Mourning
# - President Richard Nixon
non_trading_days.append(datetime(1994, 4, 27, tzinfo=pytz.utc))
# - President Ronald W. Reagan - June 11, 2004
non_trading_days.append(datetime(2004, 6, 11, tzinfo=pytz.utc))
# - President Gerald R. Ford - Jan 2, 2007
non_trading_days.append(datetime(2007, 1, 2, tzinfo=pytz.utc))
non_trading_days.sort()
return pd.DatetimeIndex(non_trading_days)
non_trading_days = get_non_trading_days(start, end)
trading_day = pd.tseries.offsets.CDay(holidays=non_trading_days)
def get_trading_days(start, end, trading_day=trading_day):
return pd.date_range(start=start.date(),
end=end.date(),
freq=trading_day).tz_localize('UTC')
trading_days = get_trading_days(start, end)
def get_early_closes(start, end):
# 1:00 PM close rules based on
# http://quant.stackexchange.com/questions/4083/nyse-early-close-rules-july-4th-and-dec-25th # noqa
# and verified against http://www.nyse.com/pdfs/closings.pdf
# These rules are valid starting in 1993
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
start = max(start, datetime(1993, 1, 1, tzinfo=pytz.utc))
end = max(end, datetime(1993, 1, 1, tzinfo=pytz.utc))
# Not included here are early closes prior to 1993
# or unplanned early closes
early_close_rules = []
day_after_thanksgiving = rrule.rrule(
rrule.MONTHLY,
bymonth=11,
# 4th Friday isn't correct if month starts on Friday, so restrict to
# day range:
byweekday=(rrule.FR),
bymonthday=range(23, 30),
cache=True,
dtstart=start,
until=end
)
early_close_rules.append(day_after_thanksgiving)
christmas_eve = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=24,
byweekday=(rrule.MO, rrule.TU, rrule.WE, rrule.TH),
cache=True,
dtstart=start,
until=end
)
early_close_rules.append(christmas_eve)
friday_after_christmas = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=26,
byweekday=rrule.FR,
cache=True,
dtstart=start,
# valid 1993-2007
until=min(end, datetime(2007, 12, 31, tzinfo=pytz.utc))
)
early_close_rules.append(friday_after_christmas)
day_before_independence_day = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=3,
byweekday=(rrule.MO, rrule.TU, rrule.TH),
cache=True,
dtstart=start,
until=end
)
early_close_rules.append(day_before_independence_day)
day_after_independence_day = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=5,
byweekday=rrule.FR,
cache=True,
dtstart=start,
# starting in 2013: wednesday before independence day
until=min(end, datetime(2012, 12, 31, tzinfo=pytz.utc))
)
early_close_rules.append(day_after_independence_day)
wednesday_before_independence_day = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=3,
byweekday=rrule.WE,
cache=True,
# starting in 2013
dtstart=max(start, datetime(2013, 1, 1, tzinfo=pytz.utc)),
until=max(end, datetime(2013, 1, 1, tzinfo=pytz.utc))
)
early_close_rules.append(wednesday_before_independence_day)
early_close_ruleset = rrule.rruleset()
for rule in early_close_rules:
early_close_ruleset.rrule(rule)
early_closes = early_close_ruleset.between(start, end, inc=True)
# Misc early closings from NYSE listing.
# http://www.nyse.com/pdfs/closings.pdf
#
# New Year's Eve
nye_1999 = datetime(1999, 12, 31, tzinfo=pytz.utc)
if start <= nye_1999 and nye_1999 <= end:
early_closes.append(nye_1999)
early_closes.sort()
return pd.DatetimeIndex(early_closes)
early_closes = get_early_closes(start, end)
def get_open_and_close(day, early_closes):
market_open = pd.Timestamp(
datetime(
year=day.year,
month=day.month,
day=day.day,
hour=9,
minute=31),
tz='US/Eastern').tz_convert('UTC')
# 1 PM if early close, 4 PM otherwise
close_hour = 13 if day in early_closes else 16
market_close = pd.Timestamp(
datetime(
year=day.year,
month=day.month,
day=day.day,
hour=close_hour),
tz='US/Eastern').tz_convert('UTC')
return market_open, market_close
def get_open_and_closes(trading_days, early_closes, get_open_and_close):
open_and_closes = pd.DataFrame(index=trading_days,
columns=('market_open', 'market_close'))
get_o_and_c = partial(get_open_and_close, early_closes=early_closes)
open_and_closes['market_open'], open_and_closes['market_close'] = \
zip(*open_and_closes.index.map(get_o_and_c))
return open_and_closes
open_and_closes = get_open_and_closes(trading_days, early_closes,
get_open_and_close)
| apache-2.0 |
andrewnc/scikit-learn | examples/plot_isotonic_regression.py | 303 | 1767 | """
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
| bsd-3-clause |
wzbozon/scikit-learn | benchmarks/bench_tree.py | 297 | 3617 | """
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import pylab as pl
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
pl.figure('scikit-learn tree benchmark results')
pl.subplot(211)
pl.title('Learning with varying number of samples')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
pl.subplot(212)
pl.title('Learning in high dimensional spaces')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of dimensions')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
macks22/scikit-learn | examples/manifold/plot_mds.py | 261 | 2616 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# Licence: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
plt.scatter(X_true[:, 0], X_true[:, 1], c='r', s=20)
plt.scatter(pos[:, 0], pos[:, 1], s=20, c='g')
plt.scatter(npos[:, 0], npos[:, 1], s=20, c='b')
plt.legend(('True position', 'MDS', 'NMDS'), loc='best')
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
justincassidy/scikit-learn | examples/mixture/plot_gmm_pdf.py | 284 | 1528 | """
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
| bsd-3-clause |
Silmathoron/nest-simulator | pynest/examples/spatial/grid_iaf_irr.py | 20 | 1453 | # -*- coding: utf-8 -*-
#
# grid_iaf_irr.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Create 12 freely placed iaf_psc_alpha neurons
-----------------------------------------------
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
"""
import nest
import matplotlib.pyplot as plt
nest.ResetKernel()
pos = nest.spatial.free([nest.random.uniform(-0.75, 0.75), nest.random.uniform(-0.5, 0.5)], extent=[2., 1.5])
l1 = nest.Create('iaf_psc_alpha', 12, positions=pos)
nest.PrintNodes()
nest.PlotLayer(l1, nodesize=50)
# beautify
plt.axis([-1.0, 1.0, -0.75, 0.75])
plt.axes().set_aspect('equal', 'box')
plt.axes().set_xticks((-0.75, -0.25, 0.25, 0.75))
plt.axes().set_yticks((-0.5, 0, 0.5))
plt.grid(True)
plt.xlabel('Extent: 2.0')
plt.ylabel('Extent: 1.5')
plt.show()
# plt.savefig('grid_iaf_irr.png')
| gpl-2.0 |
eickenberg/scikit-learn | sklearn/cluster/tests/test_mean_shift.py | 19 | 2844 | """
Testing for mean shift clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
"""Test estimate_bandwidth"""
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
""" Test MeanShift algorithm """
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_meanshift_predict():
"""Test MeanShift.predict"""
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_unfitted():
"""Non-regression: before fit, there should be not fitted attributes."""
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
"""
Test the bin seeding technique which can be used in the mean shift
algorithm
"""
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.5, 1.5], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
test_bins = get_bin_seeds(X, 0.01, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(test_result) == 6)
| bsd-3-clause |
aev3/trading-with-python | historicDataDownloader/historicDataDownloader.py | 77 | 4526 | '''
Created on 4 aug. 2012
Copyright: Jev Kuznetsov
License: BSD
a module for downloading historic data from IB
'''
import ib
import pandas
from ib.ext.Contract import Contract
from ib.opt import ibConnection, message
from time import sleep
import tradingWithPython.lib.logger as logger
from pandas import DataFrame, Index
import datetime as dt
from timeKeeper import TimeKeeper
import time
timeFormat = "%Y%m%d %H:%M:%S"
class DataHandler(object):
''' handles incoming messages '''
def __init__(self,tws):
self._log = logger.getLogger('DH')
tws.register(self.msgHandler,message.HistoricalData)
self.reset()
def reset(self):
self._log.debug('Resetting data')
self.dataReady = False
self._timestamp = []
self._data = {'open':[],'high':[],'low':[],'close':[],'volume':[],'count':[],'WAP':[]}
def msgHandler(self,msg):
#print '[msg]', msg
if msg.date[:8] == 'finished':
self._log.debug('Data recieved')
self.dataReady = True
return
self._timestamp.append(dt.datetime.strptime(msg.date,timeFormat))
for k in self._data.keys():
self._data[k].append(getattr(msg, k))
@property
def data(self):
''' return downloaded data as a DataFrame '''
df = DataFrame(data=self._data,index=Index(self._timestamp))
return df
class Downloader(object):
def __init__(self,debug=False):
self._log = logger.getLogger('DLD')
self._log.debug('Initializing data dwonloader. Pandas version={0}, ibpy version:{1}'.format(pandas.__version__,ib.version))
self.tws = ibConnection()
self._dataHandler = DataHandler(self.tws)
if debug:
self.tws.registerAll(self._debugHandler)
self.tws.unregister(self._debugHandler,message.HistoricalData)
self._log.debug('Connecting to tws')
self.tws.connect()
self._timeKeeper = TimeKeeper() # keep track of past requests
self._reqId = 1 # current request id
def _debugHandler(self,msg):
print '[debug]', msg
def requestData(self,contract,endDateTime,durationStr='1800 S',barSizeSetting='1 secs',whatToShow='TRADES',useRTH=1,formatDate=1):
self._log.debug('Requesting data for %s end time %s.' % (contract.m_symbol,endDateTime))
while self._timeKeeper.nrRequests(timeSpan=600) > 59:
print 'Too many requests done. Waiting... '
time.sleep(1)
self._timeKeeper.addRequest()
self._dataHandler.reset()
self.tws.reqHistoricalData(self._reqId,contract,endDateTime,durationStr,barSizeSetting,whatToShow,useRTH,formatDate)
self._reqId+=1
#wait for data
startTime = time.time()
timeout = 3
while not self._dataHandler.dataReady and (time.time()-startTime < timeout):
sleep(2)
if not self._dataHandler.dataReady:
self._log.error('Data timeout')
print self._dataHandler.data
return self._dataHandler.data
def getIntradayData(self,contract, dateTuple ):
''' get full day data on 1-s interval
date: a tuple of (yyyy,mm,dd)
'''
openTime = dt.datetime(*dateTuple)+dt.timedelta(hours=16)
closeTime = dt.datetime(*dateTuple)+dt.timedelta(hours=22)
timeRange = pandas.date_range(openTime,closeTime,freq='30min')
datasets = []
for t in timeRange:
datasets.append(self.requestData(contract,t.strftime(timeFormat)))
return pandas.concat(datasets)
def disconnect(self):
self.tws.disconnect()
if __name__=='__main__':
dl = Downloader(debug=True)
c = Contract()
c.m_symbol = 'SPY'
c.m_secType = 'STK'
c.m_exchange = 'SMART'
c.m_currency = 'USD'
df = dl.getIntradayData(c, (2012,8,6))
df.to_csv('test.csv')
# df = dl.requestData(c, '20120803 22:00:00')
# df.to_csv('test1.csv')
# df = dl.requestData(c, '20120803 21:30:00')
# df.to_csv('test2.csv')
dl.disconnect()
print 'Done.' | bsd-3-clause |
michaelyin/code-for-blog | 2008/wx_mpl_bars.py | 12 | 7994 | """
This demo demonstrates how to embed a matplotlib (mpl) plot
into a wxPython GUI application, including:
* Using the navigation toolbar
* Adding data to the plot
* Dynamically modifying the plot's properties
* Processing mpl events
* Saving the plot to a file from a menu
The main goal is to serve as a basis for developing rich wx GUI
applications featuring mpl plots (using the mpl OO API).
Eli Bendersky (eliben@gmail.com)
License: this code is in the public domain
Last modified: 30.07.2008
"""
import os
import pprint
import random
import wx
# The recommended way to use wx with mpl is with the WXAgg
# backend.
#
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import \
FigureCanvasWxAgg as FigCanvas, \
NavigationToolbar2WxAgg as NavigationToolbar
class BarsFrame(wx.Frame):
""" The main frame of the application
"""
title = 'Demo: wxPython with matplotlib'
def __init__(self):
wx.Frame.__init__(self, None, -1, self.title)
self.data = [5, 6, 9, 14]
self.create_menu()
self.create_status_bar()
self.create_main_panel()
self.textbox.SetValue(' '.join(map(str, self.data)))
self.draw_figure()
def create_menu(self):
self.menubar = wx.MenuBar()
menu_file = wx.Menu()
m_expt = menu_file.Append(-1, "&Save plot\tCtrl-S", "Save plot to file")
self.Bind(wx.EVT_MENU, self.on_save_plot, m_expt)
menu_file.AppendSeparator()
m_exit = menu_file.Append(-1, "E&xit\tCtrl-X", "Exit")
self.Bind(wx.EVT_MENU, self.on_exit, m_exit)
menu_help = wx.Menu()
m_about = menu_help.Append(-1, "&About\tF1", "About the demo")
self.Bind(wx.EVT_MENU, self.on_about, m_about)
self.menubar.Append(menu_file, "&File")
self.menubar.Append(menu_help, "&Help")
self.SetMenuBar(self.menubar)
def create_main_panel(self):
""" Creates the main panel with all the controls on it:
* mpl canvas
* mpl navigation toolbar
* Control panel for interaction
"""
self.panel = wx.Panel(self)
# Create the mpl Figure and FigCanvas objects.
# 5x4 inches, 100 dots-per-inch
#
self.dpi = 100
self.fig = Figure((5.0, 4.0), dpi=self.dpi)
self.canvas = FigCanvas(self.panel, -1, self.fig)
# Since we have only one plot, we can use add_axes
# instead of add_subplot, but then the subplot
# configuration tool in the navigation toolbar wouldn't
# work.
#
self.axes = self.fig.add_subplot(111)
# Bind the 'pick' event for clicking on one of the bars
#
self.canvas.mpl_connect('pick_event', self.on_pick)
self.textbox = wx.TextCtrl(
self.panel,
size=(200,-1),
style=wx.TE_PROCESS_ENTER)
self.Bind(wx.EVT_TEXT_ENTER, self.on_text_enter, self.textbox)
self.drawbutton = wx.Button(self.panel, -1, "Draw!")
self.Bind(wx.EVT_BUTTON, self.on_draw_button, self.drawbutton)
self.cb_grid = wx.CheckBox(self.panel, -1,
"Show Grid",
style=wx.ALIGN_RIGHT)
self.Bind(wx.EVT_CHECKBOX, self.on_cb_grid, self.cb_grid)
self.slider_label = wx.StaticText(self.panel, -1,
"Bar width (%): ")
self.slider_width = wx.Slider(self.panel, -1,
value=20,
minValue=1,
maxValue=100,
style=wx.SL_AUTOTICKS | wx.SL_LABELS)
self.slider_width.SetTickFreq(10, 1)
self.Bind(wx.EVT_COMMAND_SCROLL_THUMBTRACK, self.on_slider_width, self.slider_width)
# Create the navigation toolbar, tied to the canvas
#
self.toolbar = NavigationToolbar(self.canvas)
#
# Layout with box sizers
#
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.vbox.Add(self.toolbar, 0, wx.EXPAND)
self.vbox.AddSpacer(10)
self.hbox = wx.BoxSizer(wx.HORIZONTAL)
flags = wx.ALIGN_LEFT | wx.ALL | wx.ALIGN_CENTER_VERTICAL
self.hbox.Add(self.textbox, 0, border=3, flag=flags)
self.hbox.Add(self.drawbutton, 0, border=3, flag=flags)
self.hbox.Add(self.cb_grid, 0, border=3, flag=flags)
self.hbox.AddSpacer(30)
self.hbox.Add(self.slider_label, 0, flag=flags)
self.hbox.Add(self.slider_width, 0, border=3, flag=flags)
self.vbox.Add(self.hbox, 0, flag = wx.ALIGN_LEFT | wx.TOP)
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self)
def create_status_bar(self):
self.statusbar = self.CreateStatusBar()
def draw_figure(self):
""" Redraws the figure
"""
str = self.textbox.GetValue()
self.data = map(int, str.split())
x = range(len(self.data))
# clear the axes and redraw the plot anew
#
self.axes.clear()
self.axes.grid(self.cb_grid.IsChecked())
self.axes.bar(
left=x,
height=self.data,
width=self.slider_width.GetValue() / 100.0,
align='center',
alpha=0.44,
picker=5)
self.canvas.draw()
def on_cb_grid(self, event):
self.draw_figure()
def on_slider_width(self, event):
self.draw_figure()
def on_draw_button(self, event):
self.draw_figure()
def on_pick(self, event):
# The event received here is of the type
# matplotlib.backend_bases.PickEvent
#
# It carries lots of information, of which we're using
# only a small amount here.
#
box_points = event.artist.get_bbox().get_points()
msg = "You've clicked on a bar with coords:\n %s" % box_points
dlg = wx.MessageDialog(
self,
msg,
"Click!",
wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
def on_text_enter(self, event):
self.draw_figure()
def on_save_plot(self, event):
file_choices = "PNG (*.png)|*.png"
dlg = wx.FileDialog(
self,
message="Save plot as...",
defaultDir=os.getcwd(),
defaultFile="plot.png",
wildcard=file_choices,
style=wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.canvas.print_figure(path, dpi=self.dpi)
self.flash_status_message("Saved to %s" % path)
def on_exit(self, event):
self.Destroy()
def on_about(self, event):
msg = """ A demo using wxPython with matplotlib:
* Use the matplotlib navigation bar
* Add values to the text box and press Enter (or click "Draw!")
* Show or hide the grid
* Drag the slider to modify the width of the bars
* Save the plot to a file using the File menu
* Click on a bar to receive an informative message
"""
dlg = wx.MessageDialog(self, msg, "About", wx.OK)
dlg.ShowModal()
dlg.Destroy()
def flash_status_message(self, msg, flash_len_ms=1500):
self.statusbar.SetStatusText(msg)
self.timeroff = wx.Timer(self)
self.Bind(
wx.EVT_TIMER,
self.on_flash_status_off,
self.timeroff)
self.timeroff.Start(flash_len_ms, oneShot=True)
def on_flash_status_off(self, event):
self.statusbar.SetStatusText('')
if __name__ == '__main__':
app = wx.PySimpleApp()
app.frame = BarsFrame()
app.frame.Show()
app.MainLoop()
| unlicense |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/core/ops.py | 7 | 54105 | """
Arithmetic operations for PandasObjects
This is not a public API.
"""
# necessary to enforce truediv in Python 2.X
from __future__ import division
import operator
import warnings
import numpy as np
import pandas as pd
import datetime
from pandas import compat, lib, tslib
import pandas.index as _index
from pandas.util.decorators import Appender
import pandas.computation.expressions as expressions
from pandas.lib import isscalar
from pandas.tslib import iNaT
from pandas.compat import bind_method
import pandas.core.missing as missing
import pandas.algos as _algos
from pandas.core.common import (_values_from_object, _maybe_match_name,
PerformanceWarning)
from pandas.types.missing import notnull, isnull
from pandas.types.common import (needs_i8_conversion,
is_datetimelike_v_numeric,
is_integer_dtype, is_categorical_dtype,
is_object_dtype, is_timedelta64_dtype,
is_datetime64_dtype, is_datetime64tz_dtype,
is_bool_dtype, is_datetimetz,
is_list_like,
_ensure_object)
from pandas.types.cast import _maybe_upcast_putmask, _find_common_type
from pandas.types.generic import ABCSeries, ABCIndex, ABCPeriodIndex
# -----------------------------------------------------------------------------
# Functions that add arithmetic methods to objects, given arithmetic factory
# methods
def _create_methods(arith_method, comp_method, bool_method,
use_numexpr, special=False, default_axis='columns',
have_divmod=False):
# creates actual methods based upon arithmetic, comp and bool method
# constructors.
# NOTE: Only frame cares about default_axis, specifically: special methods
# have default axis None, whereas flex methods have default axis 'columns'
# if we're not using numexpr, then don't pass a str_rep
if use_numexpr:
op = lambda x: x
else:
op = lambda x: None
if special:
def names(x):
if x[-1] == "_":
return "__%s_" % x
else:
return "__%s__" % x
else:
names = lambda x: x
# Inframe, all special methods have default_axis=None, flex methods have
# default_axis set to the default (columns)
# yapf: disable
new_methods = dict(
add=arith_method(operator.add, names('add'), op('+'),
default_axis=default_axis),
radd=arith_method(lambda x, y: y + x, names('radd'), op('+'),
default_axis=default_axis),
sub=arith_method(operator.sub, names('sub'), op('-'),
default_axis=default_axis),
mul=arith_method(operator.mul, names('mul'), op('*'),
default_axis=default_axis),
truediv=arith_method(operator.truediv, names('truediv'), op('/'),
truediv=True, fill_zeros=np.inf,
default_axis=default_axis),
floordiv=arith_method(operator.floordiv, names('floordiv'), op('//'),
default_axis=default_axis, fill_zeros=np.inf),
# Causes a floating point exception in the tests when numexpr enabled,
# so for now no speedup
mod=arith_method(operator.mod, names('mod'), None,
default_axis=default_axis, fill_zeros=np.nan),
pow=arith_method(operator.pow, names('pow'), op('**'),
default_axis=default_axis),
# not entirely sure why this is necessary, but previously was included
# so it's here to maintain compatibility
rmul=arith_method(operator.mul, names('rmul'), op('*'),
default_axis=default_axis, reversed=True),
rsub=arith_method(lambda x, y: y - x, names('rsub'), op('-'),
default_axis=default_axis, reversed=True),
rtruediv=arith_method(lambda x, y: operator.truediv(y, x),
names('rtruediv'), op('/'), truediv=True,
fill_zeros=np.inf, default_axis=default_axis,
reversed=True),
rfloordiv=arith_method(lambda x, y: operator.floordiv(y, x),
names('rfloordiv'), op('//'),
default_axis=default_axis, fill_zeros=np.inf,
reversed=True),
rpow=arith_method(lambda x, y: y**x, names('rpow'), op('**'),
default_axis=default_axis, reversed=True),
rmod=arith_method(lambda x, y: y % x, names('rmod'), op('%'),
default_axis=default_axis, fill_zeros=np.nan,
reversed=True),)
# yapf: enable
new_methods['div'] = new_methods['truediv']
new_methods['rdiv'] = new_methods['rtruediv']
# Comp methods never had a default axis set
if comp_method:
new_methods.update(dict(
eq=comp_method(operator.eq, names('eq'), op('==')),
ne=comp_method(operator.ne, names('ne'), op('!='), masker=True),
lt=comp_method(operator.lt, names('lt'), op('<')),
gt=comp_method(operator.gt, names('gt'), op('>')),
le=comp_method(operator.le, names('le'), op('<=')),
ge=comp_method(operator.ge, names('ge'), op('>=')), ))
if bool_method:
new_methods.update(
dict(and_=bool_method(operator.and_, names('and_'), op('&')),
or_=bool_method(operator.or_, names('or_'), op('|')),
# For some reason ``^`` wasn't used in original.
xor=bool_method(operator.xor, names('xor'), op('^')),
rand_=bool_method(lambda x, y: operator.and_(y, x),
names('rand_'), op('&')),
ror_=bool_method(lambda x, y: operator.or_(y, x),
names('ror_'), op('|')),
rxor=bool_method(lambda x, y: operator.xor(y, x),
names('rxor'), op('^'))))
if have_divmod:
# divmod doesn't have an op that is supported by numexpr
new_methods['divmod'] = arith_method(
divmod,
names('divmod'),
None,
default_axis=default_axis,
construct_result=_construct_divmod_result,
)
new_methods = dict((names(k), v) for k, v in new_methods.items())
return new_methods
def add_methods(cls, new_methods, force, select, exclude):
if select and exclude:
raise TypeError("May only pass either select or exclude")
methods = new_methods
if select:
select = set(select)
methods = {}
for key, method in new_methods.items():
if key in select:
methods[key] = method
if exclude:
for k in exclude:
new_methods.pop(k, None)
for name, method in new_methods.items():
if force or name not in cls.__dict__:
bind_method(cls, name, method)
# ----------------------------------------------------------------------
# Arithmetic
def add_special_arithmetic_methods(cls, arith_method=None,
comp_method=None, bool_method=None,
use_numexpr=True, force=False, select=None,
exclude=None, have_divmod=False):
"""
Adds the full suite of special arithmetic methods (``__add__``,
``__sub__``, etc.) to the class.
Parameters
----------
arith_method : function (optional)
factory for special arithmetic methods, with op string:
f(op, name, str_rep, default_axis=None, fill_zeros=None, **eval_kwargs)
comp_method : function, optional,
factory for rich comparison - signature: f(op, name, str_rep)
use_numexpr : bool, default True
whether to accelerate with numexpr, defaults to True
force : bool, default False
if False, checks whether function is defined **on ``cls.__dict__``**
before defining if True, always defines functions on class base
select : iterable of strings (optional)
if passed, only sets functions with names in select
exclude : iterable of strings (optional)
if passed, will not set functions with names in exclude
have_divmod : bool, (optional)
should a divmod method be added? this method is special because it
returns a tuple of cls instead of a single element of type cls
"""
# in frame, special methods have default_axis = None, comp methods use
# 'columns'
new_methods = _create_methods(arith_method, comp_method,
bool_method, use_numexpr, default_axis=None,
special=True, have_divmod=have_divmod)
# inplace operators (I feel like these should get passed an `inplace=True`
# or just be removed
def _wrap_inplace_method(method):
"""
return an inplace wrapper for this method
"""
def f(self, other):
result = method(self, other)
# this makes sure that we are aligned like the input
# we are updating inplace so we want to ignore is_copy
self._update_inplace(result.reindex_like(self, copy=False)._data,
verify_is_copy=False)
return self
return f
new_methods.update(
dict(__iadd__=_wrap_inplace_method(new_methods["__add__"]),
__isub__=_wrap_inplace_method(new_methods["__sub__"]),
__imul__=_wrap_inplace_method(new_methods["__mul__"]),
__itruediv__=_wrap_inplace_method(new_methods["__truediv__"]),
__ipow__=_wrap_inplace_method(new_methods["__pow__"]), ))
if not compat.PY3:
new_methods["__idiv__"] = new_methods["__div__"]
add_methods(cls, new_methods=new_methods, force=force, select=select,
exclude=exclude)
def add_flex_arithmetic_methods(cls, flex_arith_method,
flex_comp_method=None, flex_bool_method=None,
use_numexpr=True, force=False, select=None,
exclude=None):
"""
Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``)
to the class.
Parameters
----------
flex_arith_method : function
factory for special arithmetic methods, with op string:
f(op, name, str_rep, default_axis=None, fill_zeros=None, **eval_kwargs)
flex_comp_method : function, optional,
factory for rich comparison - signature: f(op, name, str_rep)
use_numexpr : bool, default True
whether to accelerate with numexpr, defaults to True
force : bool, default False
if False, checks whether function is defined **on ``cls.__dict__``**
before defining if True, always defines functions on class base
select : iterable of strings (optional)
if passed, only sets functions with names in select
exclude : iterable of strings (optional)
if passed, will not set functions with names in exclude
"""
# in frame, default axis is 'columns', doesn't matter for series and panel
new_methods = _create_methods(flex_arith_method,
flex_comp_method, flex_bool_method,
use_numexpr, default_axis='columns',
special=False)
new_methods.update(dict(multiply=new_methods['mul'],
subtract=new_methods['sub'],
divide=new_methods['div']))
# opt out of bool flex methods for now
for k in ('ror_', 'rxor', 'rand_'):
if k in new_methods:
new_methods.pop(k)
add_methods(cls, new_methods=new_methods, force=force, select=select,
exclude=exclude)
class _Op(object):
"""
Wrapper around Series arithmetic operations.
Generally, you should use classmethod ``_Op.get_op`` as an entry point.
This validates and coerces lhs and rhs depending on its dtype and
based on op. See _TimeOp also.
Parameters
----------
left : Series
lhs of op
right : object
rhs of op
name : str
name of op
na_op : callable
a function which wraps op
"""
fill_value = np.nan
wrap_results = staticmethod(lambda x: x)
dtype = None
def __init__(self, left, right, name, na_op):
self.left = left
self.right = right
self.name = name
self.na_op = na_op
self.lvalues = left
self.rvalues = right
@classmethod
def get_op(cls, left, right, name, na_op):
"""
Get op dispatcher, returns _Op or _TimeOp.
If ``left`` and ``right`` are appropriate for datetime arithmetic with
operation ``name``, processes them and returns a ``_TimeOp`` object
that stores all the required values. Otherwise, it will generate
either a ``_Op``, indicating that the operation is performed via
normal numpy path.
"""
is_timedelta_lhs = is_timedelta64_dtype(left)
is_datetime_lhs = (is_datetime64_dtype(left) or
is_datetime64tz_dtype(left))
if not (is_datetime_lhs or is_timedelta_lhs):
return _Op(left, right, name, na_op)
else:
return _TimeOp(left, right, name, na_op)
class _TimeOp(_Op):
"""
Wrapper around Series datetime/time/timedelta arithmetic operations.
Generally, you should use classmethod ``_Op.get_op`` as an entry point.
"""
fill_value = iNaT
def __init__(self, left, right, name, na_op):
super(_TimeOp, self).__init__(left, right, name, na_op)
lvalues = self._convert_to_array(left, name=name)
rvalues = self._convert_to_array(right, name=name, other=lvalues)
# left
self.is_offset_lhs = self._is_offset(left)
self.is_timedelta_lhs = is_timedelta64_dtype(lvalues)
self.is_datetime64_lhs = is_datetime64_dtype(lvalues)
self.is_datetime64tz_lhs = is_datetime64tz_dtype(lvalues)
self.is_datetime_lhs = (self.is_datetime64_lhs or
self.is_datetime64tz_lhs)
self.is_integer_lhs = left.dtype.kind in ['i', 'u']
self.is_floating_lhs = left.dtype.kind == 'f'
# right
self.is_offset_rhs = self._is_offset(right)
self.is_datetime64_rhs = is_datetime64_dtype(rvalues)
self.is_datetime64tz_rhs = is_datetime64tz_dtype(rvalues)
self.is_datetime_rhs = (self.is_datetime64_rhs or
self.is_datetime64tz_rhs)
self.is_timedelta_rhs = is_timedelta64_dtype(rvalues)
self.is_integer_rhs = rvalues.dtype.kind in ('i', 'u')
self.is_floating_rhs = rvalues.dtype.kind == 'f'
self._validate(lvalues, rvalues, name)
self.lvalues, self.rvalues = self._convert_for_datetime(lvalues,
rvalues)
def _validate(self, lvalues, rvalues, name):
# timedelta and integer mul/div
if ((self.is_timedelta_lhs and
(self.is_integer_rhs or self.is_floating_rhs)) or
(self.is_timedelta_rhs and
(self.is_integer_lhs or self.is_floating_lhs))):
if name not in ('__div__', '__truediv__', '__mul__', '__rmul__'):
raise TypeError("can only operate on a timedelta and an "
"integer or a float for division and "
"multiplication, but the operator [%s] was"
"passed" % name)
# 2 timedeltas
elif ((self.is_timedelta_lhs and
(self.is_timedelta_rhs or self.is_offset_rhs)) or
(self.is_timedelta_rhs and
(self.is_timedelta_lhs or self.is_offset_lhs))):
if name not in ('__div__', '__rdiv__', '__truediv__',
'__rtruediv__', '__add__', '__radd__', '__sub__',
'__rsub__'):
raise TypeError("can only operate on a timedeltas for "
"addition, subtraction, and division, but the"
" operator [%s] was passed" % name)
# datetime and timedelta/DateOffset
elif (self.is_datetime_lhs and
(self.is_timedelta_rhs or self.is_offset_rhs)):
if name not in ('__add__', '__radd__', '__sub__'):
raise TypeError("can only operate on a datetime with a rhs of "
"a timedelta/DateOffset for addition and "
"subtraction, but the operator [%s] was "
"passed" % name)
elif (self.is_datetime_rhs and
(self.is_timedelta_lhs or self.is_offset_lhs)):
if name not in ('__add__', '__radd__', '__rsub__'):
raise TypeError("can only operate on a timedelta/DateOffset "
"with a rhs of a datetime for addition, "
"but the operator [%s] was passed" % name)
# 2 datetimes
elif self.is_datetime_lhs and self.is_datetime_rhs:
if name not in ('__sub__', '__rsub__'):
raise TypeError("can only operate on a datetimes for"
" subtraction, but the operator [%s] was"
" passed" % name)
# if tz's must be equal (same or None)
if getattr(lvalues, 'tz', None) != getattr(rvalues, 'tz', None):
raise ValueError("Incompatbile tz's on datetime subtraction "
"ops")
elif ((self.is_timedelta_lhs or self.is_offset_lhs) and
self.is_datetime_rhs):
if name not in ('__add__', '__radd__'):
raise TypeError("can only operate on a timedelta/DateOffset "
"and a datetime for addition, but the "
"operator [%s] was passed" % name)
else:
raise TypeError('cannot operate on a series without a rhs '
'of a series/ndarray of type datetime64[ns] '
'or a timedelta')
def _convert_to_array(self, values, name=None, other=None):
"""converts values to ndarray"""
from pandas.tseries.timedeltas import to_timedelta
ovalues = values
supplied_dtype = None
if not is_list_like(values):
values = np.array([values])
# if this is a Series that contains relevant dtype info, then use this
# instead of the inferred type; this avoids coercing Series([NaT],
# dtype='datetime64[ns]') to Series([NaT], dtype='timedelta64[ns]')
elif (isinstance(values, pd.Series) and
(is_timedelta64_dtype(values) or is_datetime64_dtype(values))):
supplied_dtype = values.dtype
inferred_type = supplied_dtype or lib.infer_dtype(values)
if (inferred_type in ('datetime64', 'datetime', 'date', 'time') or
is_datetimetz(inferred_type)):
# if we have a other of timedelta, but use pd.NaT here we
# we are in the wrong path
if (supplied_dtype is None and other is not None and
(other.dtype in ('timedelta64[ns]', 'datetime64[ns]')) and
isnull(values).all()):
values = np.empty(values.shape, dtype='timedelta64[ns]')
values[:] = iNaT
# a datelike
elif isinstance(values, pd.DatetimeIndex):
values = values.to_series()
# datetime with tz
elif (isinstance(ovalues, datetime.datetime) and
hasattr(ovalues, 'tzinfo')):
values = pd.DatetimeIndex(values)
# datetime array with tz
elif is_datetimetz(values):
if isinstance(values, ABCSeries):
values = values._values
elif not (isinstance(values, (np.ndarray, ABCSeries)) and
is_datetime64_dtype(values)):
values = tslib.array_to_datetime(values)
elif inferred_type in ('timedelta', 'timedelta64'):
# have a timedelta, convert to to ns here
values = to_timedelta(values, errors='coerce', box=False)
elif inferred_type == 'integer':
# py3 compat where dtype is 'm' but is an integer
if values.dtype.kind == 'm':
values = values.astype('timedelta64[ns]')
elif isinstance(values, pd.PeriodIndex):
values = values.to_timestamp().to_series()
elif name not in ('__truediv__', '__div__', '__mul__', '__rmul__'):
raise TypeError("incompatible type for a datetime/timedelta "
"operation [{0}]".format(name))
elif inferred_type == 'floating':
if (isnull(values).all() and
name in ('__add__', '__radd__', '__sub__', '__rsub__')):
values = np.empty(values.shape, dtype=other.dtype)
values[:] = iNaT
return values
elif self._is_offset(values):
return values
else:
raise TypeError("incompatible type [{0}] for a datetime/timedelta"
" operation".format(np.array(values).dtype))
return values
def _convert_for_datetime(self, lvalues, rvalues):
from pandas.tseries.timedeltas import to_timedelta
mask = isnull(lvalues) | isnull(rvalues)
# datetimes require views
if self.is_datetime_lhs or self.is_datetime_rhs:
# datetime subtraction means timedelta
if self.is_datetime_lhs and self.is_datetime_rhs:
if self.name in ('__sub__', '__rsub__'):
self.dtype = 'timedelta64[ns]'
else:
self.dtype = 'datetime64[ns]'
elif self.is_datetime64tz_lhs:
self.dtype = lvalues.dtype
elif self.is_datetime64tz_rhs:
self.dtype = rvalues.dtype
else:
self.dtype = 'datetime64[ns]'
# if adding single offset try vectorized path
# in DatetimeIndex; otherwise elementwise apply
def _offset(lvalues, rvalues):
if len(lvalues) == 1:
rvalues = pd.DatetimeIndex(rvalues)
lvalues = lvalues[0]
else:
warnings.warn("Adding/subtracting array of DateOffsets to "
"Series not vectorized", PerformanceWarning)
rvalues = rvalues.astype('O')
# pass thru on the na_op
self.na_op = lambda x, y: getattr(x, self.name)(y)
return lvalues, rvalues
if self.is_offset_lhs:
lvalues, rvalues = _offset(lvalues, rvalues)
elif self.is_offset_rhs:
rvalues, lvalues = _offset(rvalues, lvalues)
else:
# with tz, convert to UTC
if self.is_datetime64tz_lhs:
lvalues = lvalues.tz_localize(None)
if self.is_datetime64tz_rhs:
rvalues = rvalues.tz_localize(None)
lvalues = lvalues.view(np.int64)
rvalues = rvalues.view(np.int64)
# otherwise it's a timedelta
else:
self.dtype = 'timedelta64[ns]'
# convert Tick DateOffset to underlying delta
if self.is_offset_lhs:
lvalues = to_timedelta(lvalues, box=False)
if self.is_offset_rhs:
rvalues = to_timedelta(rvalues, box=False)
lvalues = lvalues.astype(np.int64)
if not self.is_floating_rhs:
rvalues = rvalues.astype(np.int64)
# time delta division -> unit less
# integer gets converted to timedelta in np < 1.6
if ((self.is_timedelta_lhs and self.is_timedelta_rhs) and
not self.is_integer_rhs and not self.is_integer_lhs and
self.name in ('__div__', '__truediv__')):
self.dtype = 'float64'
self.fill_value = np.nan
lvalues = lvalues.astype(np.float64)
rvalues = rvalues.astype(np.float64)
# if we need to mask the results
if mask.any():
def f(x):
# datetime64[ns]/timedelta64[ns] masking
try:
x = np.array(x, dtype=self.dtype)
except TypeError:
x = np.array(x, dtype='datetime64[ns]')
np.putmask(x, mask, self.fill_value)
return x
self.wrap_results = f
return lvalues, rvalues
def _is_offset(self, arr_or_obj):
""" check if obj or all elements of list-like is DateOffset """
if isinstance(arr_or_obj, pd.DateOffset):
return True
elif is_list_like(arr_or_obj) and len(arr_or_obj):
return all(isinstance(x, pd.DateOffset) for x in arr_or_obj)
return False
def _align_method_SERIES(left, right, align_asobject=False):
""" align lhs and rhs Series """
# ToDo: Different from _align_method_FRAME, list, tuple and ndarray
# are not coerced here
# because Series has inconsistencies described in #13637
if isinstance(right, ABCSeries):
# avoid repeated alignment
if not left.index.equals(right.index):
if align_asobject:
# to keep original value's dtype for bool ops
left = left.astype(object)
right = right.astype(object)
left, right = left.align(right, copy=False)
return left, right
def _construct_result(left, result, index, name, dtype):
return left._constructor(result, index=index, name=name, dtype=dtype)
def _construct_divmod_result(left, result, index, name, dtype):
"""divmod returns a tuple of like indexed series instead of a single series.
"""
constructor = left._constructor
return (
constructor(result[0], index=index, name=name, dtype=dtype),
constructor(result[1], index=index, name=name, dtype=dtype),
)
def _arith_method_SERIES(op, name, str_rep, fill_zeros=None, default_axis=None,
construct_result=_construct_result, **eval_kwargs):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True, **eval_kwargs)
except TypeError:
if isinstance(y, (np.ndarray, ABCSeries, pd.Index)):
dtype = _find_common_type([x.dtype, y.dtype])
result = np.empty(x.size, dtype=dtype)
mask = notnull(x) & notnull(y)
result[mask] = op(x[mask], _values_from_object(y[mask]))
elif isinstance(x, np.ndarray):
result = np.empty(len(x), dtype=x.dtype)
mask = notnull(x)
result[mask] = op(x[mask], y)
else:
raise TypeError("{typ} cannot perform the operation "
"{op}".format(typ=type(x).__name__,
op=str_rep))
result, changed = _maybe_upcast_putmask(result, ~mask, np.nan)
result = missing.fill_zeros(result, x, y, name, fill_zeros)
return result
def safe_na_op(lvalues, rvalues):
try:
with np.errstate(all='ignore'):
return na_op(lvalues, rvalues)
except Exception:
if isinstance(rvalues, ABCSeries):
if is_object_dtype(rvalues):
# if dtype is object, try elementwise op
return _algos.arrmap_object(rvalues,
lambda x: op(lvalues, x))
else:
if is_object_dtype(lvalues):
return _algos.arrmap_object(lvalues,
lambda x: op(x, rvalues))
raise
def wrapper(left, right, name=name, na_op=na_op):
if isinstance(right, pd.DataFrame):
return NotImplemented
left, right = _align_method_SERIES(left, right)
converted = _Op.get_op(left, right, name, na_op)
left, right = converted.left, converted.right
lvalues, rvalues = converted.lvalues, converted.rvalues
dtype = converted.dtype
wrap_results = converted.wrap_results
na_op = converted.na_op
if isinstance(rvalues, ABCSeries):
name = _maybe_match_name(left, rvalues)
lvalues = getattr(lvalues, 'values', lvalues)
rvalues = getattr(rvalues, 'values', rvalues)
# _Op aligns left and right
else:
name = left.name
if (hasattr(lvalues, 'values') and
not isinstance(lvalues, pd.DatetimeIndex)):
lvalues = lvalues.values
result = wrap_results(safe_na_op(lvalues, rvalues))
return construct_result(
left,
result,
index=left.index,
name=name,
dtype=dtype,
)
return wrapper
def _comp_method_OBJECT_ARRAY(op, x, y):
if isinstance(y, list):
y = lib.list_to_object_array(y)
if isinstance(y, (np.ndarray, ABCSeries, ABCIndex)):
if not is_object_dtype(y.dtype):
y = y.astype(np.object_)
if isinstance(y, (ABCSeries, ABCIndex)):
y = y.values
result = lib.vec_compare(x, y, op)
else:
result = lib.scalar_compare(x, y, op)
return result
def _comp_method_SERIES(op, name, str_rep, masker=False):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def na_op(x, y):
# dispatch to the categorical if we have a categorical
# in either operand
if is_categorical_dtype(x):
return op(x, y)
elif is_categorical_dtype(y) and not isscalar(y):
return op(y, x)
if is_object_dtype(x.dtype):
result = _comp_method_OBJECT_ARRAY(op, x, y)
else:
# we want to compare like types
# we only want to convert to integer like if
# we are not NotImplemented, otherwise
# we would allow datetime64 (but viewed as i8) against
# integer comparisons
if is_datetimelike_v_numeric(x, y):
raise TypeError("invalid type comparison")
# numpy does not like comparisons vs None
if isscalar(y) and isnull(y):
if name == '__ne__':
return np.ones(len(x), dtype=bool)
else:
return np.zeros(len(x), dtype=bool)
# we have a datetime/timedelta and may need to convert
mask = None
if (needs_i8_conversion(x) or
(not isscalar(y) and needs_i8_conversion(y))):
if isscalar(y):
mask = isnull(x)
y = _index.convert_scalar(x, _values_from_object(y))
else:
mask = isnull(x) | isnull(y)
y = y.view('i8')
x = x.view('i8')
try:
with np.errstate(all='ignore'):
result = getattr(x, name)(y)
if result is NotImplemented:
raise TypeError("invalid type comparison")
except AttributeError:
result = op(x, y)
if mask is not None and mask.any():
result[mask] = masker
return result
def wrapper(self, other, axis=None):
# Validate the axis parameter
if axis is not None:
self._get_axis_number(axis)
if isinstance(other, ABCSeries):
name = _maybe_match_name(self, other)
if not self._indexed_same(other):
msg = 'Can only compare identically-labeled Series objects'
raise ValueError(msg)
return self._constructor(na_op(self.values, other.values),
index=self.index, name=name)
elif isinstance(other, pd.DataFrame): # pragma: no cover
return NotImplemented
elif isinstance(other, (np.ndarray, pd.Index)):
# do not check length of zerodim array
# as it will broadcast
if (not lib.isscalar(lib.item_from_zerodim(other)) and
len(self) != len(other)):
raise ValueError('Lengths must match to compare')
if isinstance(other, ABCPeriodIndex):
# temp workaround until fixing GH 13637
# tested in test_nat_comparisons
# (pandas.tests.series.test_operators.TestSeriesOperators)
return self._constructor(na_op(self.values,
other.asobject.values),
index=self.index)
return self._constructor(na_op(self.values, np.asarray(other)),
index=self.index).__finalize__(self)
elif isinstance(other, pd.Categorical):
if not is_categorical_dtype(self):
msg = ("Cannot compare a Categorical for op {op} with Series "
"of dtype {typ}.\nIf you want to compare values, use "
"'series <op> np.asarray(other)'.")
raise TypeError(msg.format(op=op, typ=self.dtype))
if is_categorical_dtype(self):
# cats are a special case as get_values() would return an ndarray,
# which would then not take categories ordering into account
# we can go directly to op, as the na_op would just test again and
# dispatch to it.
with np.errstate(all='ignore'):
res = op(self.values, other)
else:
values = self.get_values()
if isinstance(other, (list, np.ndarray)):
other = np.asarray(other)
with np.errstate(all='ignore'):
res = na_op(values, other)
if isscalar(res):
raise TypeError('Could not compare %s type with Series' %
type(other))
# always return a full value series here
res = _values_from_object(res)
res = pd.Series(res, index=self.index, name=self.name, dtype='bool')
return res
return wrapper
def _bool_method_SERIES(op, name, str_rep):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def na_op(x, y):
try:
result = op(x, y)
except TypeError:
if isinstance(y, list):
y = lib.list_to_object_array(y)
if isinstance(y, (np.ndarray, ABCSeries)):
if (is_bool_dtype(x.dtype) and is_bool_dtype(y.dtype)):
result = op(x, y) # when would this be hit?
else:
x = _ensure_object(x)
y = _ensure_object(y)
result = lib.vec_binop(x, y, op)
else:
try:
# let null fall thru
if not isnull(y):
y = bool(y)
result = lib.scalar_binop(x, y, op)
except:
raise TypeError("cannot compare a dtyped [{0}] array with "
"a scalar of type [{1}]".format(
x.dtype, type(y).__name__))
return result
def wrapper(self, other):
is_self_int_dtype = is_integer_dtype(self.dtype)
fill_int = lambda x: x.fillna(0)
fill_bool = lambda x: x.fillna(False).astype(bool)
self, other = _align_method_SERIES(self, other, align_asobject=True)
if isinstance(other, ABCSeries):
name = _maybe_match_name(self, other)
is_other_int_dtype = is_integer_dtype(other.dtype)
other = fill_int(other) if is_other_int_dtype else fill_bool(other)
filler = (fill_int if is_self_int_dtype and is_other_int_dtype
else fill_bool)
return filler(self._constructor(na_op(self.values, other.values),
index=self.index, name=name))
elif isinstance(other, pd.DataFrame):
return NotImplemented
else:
# scalars, list, tuple, np.array
filler = (fill_int if is_self_int_dtype and
is_integer_dtype(np.asarray(other)) else fill_bool)
return filler(self._constructor(
na_op(self.values, other),
index=self.index)).__finalize__(self)
return wrapper
_op_descriptions = {'add': {'op': '+',
'desc': 'Addition',
'reversed': False,
'reverse': 'radd'},
'sub': {'op': '-',
'desc': 'Subtraction',
'reversed': False,
'reverse': 'rsub'},
'mul': {'op': '*',
'desc': 'Multiplication',
'reversed': False,
'reverse': 'rmul'},
'mod': {'op': '%',
'desc': 'Modulo',
'reversed': False,
'reverse': 'rmod'},
'pow': {'op': '**',
'desc': 'Exponential power',
'reversed': False,
'reverse': 'rpow'},
'truediv': {'op': '/',
'desc': 'Floating division',
'reversed': False,
'reverse': 'rtruediv'},
'floordiv': {'op': '//',
'desc': 'Integer division',
'reversed': False,
'reverse': 'rfloordiv'},
'divmod': {'op': 'divmod',
'desc': 'Integer division and modulo',
'reversed': False,
'reverse': None},
'eq': {'op': '==',
'desc': 'Equal to',
'reversed': False,
'reverse': None},
'ne': {'op': '!=',
'desc': 'Not equal to',
'reversed': False,
'reverse': None},
'lt': {'op': '<',
'desc': 'Less than',
'reversed': False,
'reverse': None},
'le': {'op': '<=',
'desc': 'Less than or equal to',
'reversed': False,
'reverse': None},
'gt': {'op': '>',
'desc': 'Greater than',
'reversed': False,
'reverse': None},
'ge': {'op': '>=',
'desc': 'Greater than or equal to',
'reversed': False,
'reverse': None}}
_op_names = list(_op_descriptions.keys())
for k in _op_names:
reverse_op = _op_descriptions[k]['reverse']
_op_descriptions[reverse_op] = _op_descriptions[k].copy()
_op_descriptions[reverse_op]['reversed'] = True
_op_descriptions[reverse_op]['reverse'] = k
_flex_doc_SERIES = """
%s of series and other, element-wise (binary operator `%s`).
Equivalent to ``%s``, but with support to substitute a fill_value for
missing data in one of the inputs.
Parameters
----------
other: Series or scalar value
fill_value : None or float value, default None (NaN)
Fill missing (NaN) values with this value. If both Series are
missing, the result will be missing
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
Returns
-------
result : Series
See also
--------
Series.%s
"""
def _flex_method_SERIES(op, name, str_rep, default_axis=None, fill_zeros=None,
**eval_kwargs):
op_name = name.replace('__', '')
op_desc = _op_descriptions[op_name]
if op_desc['reversed']:
equiv = 'other ' + op_desc['op'] + ' series'
else:
equiv = 'series ' + op_desc['op'] + ' other'
doc = _flex_doc_SERIES % (op_desc['desc'], op_name, equiv,
op_desc['reverse'])
@Appender(doc)
def flex_wrapper(self, other, level=None, fill_value=None, axis=0):
# validate axis
if axis is not None:
self._get_axis_number(axis)
if isinstance(other, ABCSeries):
return self._binop(other, op, level=level, fill_value=fill_value)
elif isinstance(other, (np.ndarray, list, tuple)):
if len(other) != len(self):
raise ValueError('Lengths must be equal')
return self._binop(self._constructor(other, self.index), op,
level=level, fill_value=fill_value)
else:
if fill_value is not None:
self = self.fillna(fill_value)
return self._constructor(op(self, other),
self.index).__finalize__(self)
flex_wrapper.__name__ = name
return flex_wrapper
series_flex_funcs = dict(flex_arith_method=_flex_method_SERIES,
flex_comp_method=_flex_method_SERIES)
series_special_funcs = dict(arith_method=_arith_method_SERIES,
comp_method=_comp_method_SERIES,
bool_method=_bool_method_SERIES,
have_divmod=True)
_arith_doc_FRAME = """
Binary operator %s with support to substitute a fill_value for missing data in
one of the inputs
Parameters
----------
other : Series, DataFrame, or constant
axis : {0, 1, 'index', 'columns'}
For Series input, axis to match Series index on
fill_value : None or float value, default None
Fill missing (NaN) values with this value. If both DataFrame locations are
missing, the result will be missing
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
Notes
-----
Mismatched indices will be unioned together
Returns
-------
result : DataFrame
"""
_flex_doc_FRAME = """
%s of dataframe and other, element-wise (binary operator `%s`).
Equivalent to ``%s``, but with support to substitute a fill_value for
missing data in one of the inputs.
Parameters
----------
other : Series, DataFrame, or constant
axis : {0, 1, 'index', 'columns'}
For Series input, axis to match Series index on
fill_value : None or float value, default None
Fill missing (NaN) values with this value. If both DataFrame
locations are missing, the result will be missing
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
Notes
-----
Mismatched indices will be unioned together
Returns
-------
result : DataFrame
See also
--------
DataFrame.%s
"""
def _align_method_FRAME(left, right, axis):
""" convert rhs to meet lhs dims if input is list, tuple or np.ndarray """
def to_series(right):
msg = 'Unable to coerce to Series, length must be {0}: given {1}'
if axis is not None and left._get_axis_name(axis) == 'index':
if len(left.index) != len(right):
raise ValueError(msg.format(len(left.index), len(right)))
right = left._constructor_sliced(right, index=left.index)
else:
if len(left.columns) != len(right):
raise ValueError(msg.format(len(left.columns), len(right)))
right = left._constructor_sliced(right, index=left.columns)
return right
if isinstance(right, (list, tuple)):
right = to_series(right)
elif isinstance(right, np.ndarray) and right.ndim: # skips np scalar
if right.ndim == 1:
right = to_series(right)
elif right.ndim == 2:
if left.shape != right.shape:
msg = ("Unable to coerce to DataFrame, "
"shape must be {0}: given {1}")
raise ValueError(msg.format(left.shape, right.shape))
right = left._constructor(right, index=left.index,
columns=left.columns)
else:
msg = 'Unable to coerce to Series/DataFrame, dim must be <= 2: {0}'
raise ValueError(msg.format(right.shape, ))
return right
def _arith_method_FRAME(op, name, str_rep=None, default_axis='columns',
fill_zeros=None, **eval_kwargs):
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True, **eval_kwargs)
except TypeError:
xrav = x.ravel()
if isinstance(y, (np.ndarray, ABCSeries)):
dtype = np.find_common_type([x.dtype, y.dtype], [])
result = np.empty(x.size, dtype=dtype)
yrav = y.ravel()
mask = notnull(xrav) & notnull(yrav)
xrav = xrav[mask]
# we may need to manually
# broadcast a 1 element array
if yrav.shape != mask.shape:
yrav = np.empty(mask.shape, dtype=yrav.dtype)
yrav.fill(yrav.item())
yrav = yrav[mask]
if np.prod(xrav.shape) and np.prod(yrav.shape):
with np.errstate(all='ignore'):
result[mask] = op(xrav, yrav)
elif hasattr(x, 'size'):
result = np.empty(x.size, dtype=x.dtype)
mask = notnull(xrav)
xrav = xrav[mask]
if np.prod(xrav.shape):
with np.errstate(all='ignore'):
result[mask] = op(xrav, y)
else:
raise TypeError("cannot perform operation {op} between "
"objects of type {x} and {y}".format(
op=name, x=type(x), y=type(y)))
result, changed = _maybe_upcast_putmask(result, ~mask, np.nan)
result = result.reshape(x.shape)
result = missing.fill_zeros(result, x, y, name, fill_zeros)
return result
if name in _op_descriptions:
op_name = name.replace('__', '')
op_desc = _op_descriptions[op_name]
if op_desc['reversed']:
equiv = 'other ' + op_desc['op'] + ' dataframe'
else:
equiv = 'dataframe ' + op_desc['op'] + ' other'
doc = _flex_doc_FRAME % (op_desc['desc'], op_name, equiv,
op_desc['reverse'])
else:
doc = _arith_doc_FRAME % name
@Appender(doc)
def f(self, other, axis=default_axis, level=None, fill_value=None):
other = _align_method_FRAME(self, other, axis)
if isinstance(other, pd.DataFrame): # Another DataFrame
return self._combine_frame(other, na_op, fill_value, level)
elif isinstance(other, ABCSeries):
return self._combine_series(other, na_op, fill_value, axis, level)
else:
if fill_value is not None:
self = self.fillna(fill_value)
return self._combine_const(other, na_op)
f.__name__ = name
return f
# Masker unused for now
def _flex_comp_method_FRAME(op, name, str_rep=None, default_axis='columns',
masker=False):
def na_op(x, y):
try:
result = op(x, y)
except TypeError:
xrav = x.ravel()
result = np.empty(x.size, dtype=x.dtype)
if isinstance(y, (np.ndarray, ABCSeries)):
yrav = y.ravel()
mask = notnull(xrav) & notnull(yrav)
result[mask] = op(np.array(list(xrav[mask])),
np.array(list(yrav[mask])))
else:
mask = notnull(xrav)
result[mask] = op(np.array(list(xrav[mask])), y)
if op == operator.ne: # pragma: no cover
np.putmask(result, ~mask, True)
else:
np.putmask(result, ~mask, False)
result = result.reshape(x.shape)
return result
@Appender('Wrapper for flexible comparison methods %s' % name)
def f(self, other, axis=default_axis, level=None):
other = _align_method_FRAME(self, other, axis)
if isinstance(other, pd.DataFrame): # Another DataFrame
return self._flex_compare_frame(other, na_op, str_rep, level)
elif isinstance(other, ABCSeries):
return self._combine_series(other, na_op, None, axis, level)
else:
return self._combine_const(other, na_op)
f.__name__ = name
return f
def _comp_method_FRAME(func, name, str_rep, masker=False):
@Appender('Wrapper for comparison method %s' % name)
def f(self, other):
if isinstance(other, pd.DataFrame): # Another DataFrame
return self._compare_frame(other, func, str_rep)
elif isinstance(other, ABCSeries):
return self._combine_series_infer(other, func)
else:
# straight boolean comparisions we want to allow all columns
# (regardless of dtype to pass thru) See #4537 for discussion.
res = self._combine_const(other, func, raise_on_error=False)
return res.fillna(True).astype(bool)
f.__name__ = name
return f
frame_flex_funcs = dict(flex_arith_method=_arith_method_FRAME,
flex_comp_method=_flex_comp_method_FRAME)
frame_special_funcs = dict(arith_method=_arith_method_FRAME,
comp_method=_comp_method_FRAME,
bool_method=_arith_method_FRAME)
def _arith_method_PANEL(op, name, str_rep=None, fill_zeros=None,
default_axis=None, **eval_kwargs):
# copied from Series na_op above, but without unnecessary branch for
# non-scalar
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True, **eval_kwargs)
except TypeError:
# TODO: might need to find_common_type here?
result = np.empty(len(x), dtype=x.dtype)
mask = notnull(x)
result[mask] = op(x[mask], y)
result, changed = _maybe_upcast_putmask(result, ~mask, np.nan)
result = missing.fill_zeros(result, x, y, name, fill_zeros)
return result
# work only for scalars
def f(self, other):
if not isscalar(other):
raise ValueError('Simple arithmetic with %s can only be '
'done with scalar values' %
self._constructor.__name__)
return self._combine(other, op)
f.__name__ = name
return f
def _comp_method_PANEL(op, name, str_rep=None, masker=False):
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True)
except TypeError:
xrav = x.ravel()
result = np.empty(x.size, dtype=bool)
if isinstance(y, np.ndarray):
yrav = y.ravel()
mask = notnull(xrav) & notnull(yrav)
result[mask] = op(np.array(list(xrav[mask])),
np.array(list(yrav[mask])))
else:
mask = notnull(xrav)
result[mask] = op(np.array(list(xrav[mask])), y)
if op == operator.ne: # pragma: no cover
np.putmask(result, ~mask, True)
else:
np.putmask(result, ~mask, False)
result = result.reshape(x.shape)
return result
@Appender('Wrapper for comparison method %s' % name)
def f(self, other, axis=None):
# Validate the axis parameter
if axis is not None:
axis = self._get_axis_number(axis)
if isinstance(other, self._constructor):
return self._compare_constructor(other, na_op)
elif isinstance(other, (self._constructor_sliced, pd.DataFrame,
ABCSeries)):
raise Exception("input needs alignment for this object [%s]" %
self._constructor)
else:
return self._combine_const(other, na_op)
f.__name__ = name
return f
panel_special_funcs = dict(arith_method=_arith_method_PANEL,
comp_method=_comp_method_PANEL,
bool_method=_arith_method_PANEL)
| gpl-3.0 |
darcy0511/Dato-Core | src/unity/python/graphlab/test/test_io.py | 13 | 15881 | '''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the DATO-PYTHON-LICENSE file for details.
'''
import commands
import json
import logging
import os
import re
import tempfile
import unittest
import pandas
import graphlab
import graphlab.connect.main as glconnect
import graphlab.sys_util as _sys_util
from graphlab.test.util import create_server, start_test_tcp_server
from pandas.util.testing import assert_frame_equal
def _test_save_load_object_helper(testcase, obj, url):
"""
Helper function to test save and load a server side object to a given url.
"""
def cleanup(url):
"""
Remove the saved file from temp directory.
"""
protocol = None
path = None
splits = url.split("://")
if len(splits) > 1:
protocol = splits[0]
path = splits[1]
else:
path = url
if not protocol or protocol is "local" or protocol is "remote":
tempdir = tempfile.gettempdir()
pattern = path + ".*"
for f in os.listdir(tempdir):
if re.search(pattern, f):
os.remove(os.path.join(tempdir, f))
if isinstance(obj, graphlab.SGraph):
obj.save(url + ".graph")
newobj = graphlab.load_graph(url + ".graph")
testcase.assertItemsEqual(obj.get_fields(), newobj.get_fields())
testcase.assertDictEqual(obj.summary(), newobj.summary())
elif isinstance(obj, graphlab.Model):
obj.save(url + ".model")
newobj = graphlab.load_model(url + ".model")
testcase.assertItemsEqual(obj.list_fields(), newobj.list_fields())
testcase.assertEqual(type(obj), type(newobj))
elif isinstance(obj, graphlab.SFrame):
obj.save(url + ".frame_idx")
newobj = graphlab.load_sframe(url + ".frame_idx")
testcase.assertEqual(obj.shape, newobj.shape)
testcase.assertEqual(obj.column_names(), newobj.column_names())
testcase.assertEqual(obj.column_types(), newobj.column_types())
assert_frame_equal(obj.head(obj.num_rows()).to_dataframe(),
newobj.head(newobj.num_rows()).to_dataframe())
else:
raise TypeError
cleanup(url)
def create_test_objects():
vertices = pandas.DataFrame({'vid': ['1', '2', '3'],
'color': ['g', 'r', 'b'],
'vec': [[.1, .1, .1], [.1, .1, .1], [.1, .1, .1]]})
edges = pandas.DataFrame({'src_id': ['1', '2', '3'],
'dst_id': ['2', '3', '4'],
'weight': [0., 0.1, 1.]})
graph = graphlab.SGraph().add_vertices(vertices, 'vid').add_edges(edges, 'src_id', 'dst_id')
sframe = graphlab.SFrame(edges)
model = graphlab.pagerank.create(graph)
return (graph, sframe, model)
class LocalFSConnectorTests(unittest.TestCase):
@classmethod
def setUpClass(self):
self.tempfile = tempfile.NamedTemporaryFile().name
(self.graph, self.sframe, self.model) = create_test_objects()
def _test_read_write_helper(self, url, content):
url = graphlab.util._make_internal_url(url)
glconnect.get_unity().__write__(url, content)
content_read = glconnect.get_unity().__read__(url)
self.assertEquals(content_read, content)
if os.path.exists(url):
os.remove(url)
def test_object_save_load(self):
for prefix in ['', 'local://', 'remote://']:
_test_save_load_object_helper(self, self.graph, prefix + self.tempfile)
_test_save_load_object_helper(self, self.model, prefix + self.tempfile)
_test_save_load_object_helper(self, self.sframe, prefix + self.tempfile)
def test_basic(self):
self._test_read_write_helper(self.tempfile, 'hello world')
self._test_read_write_helper("local://" + self.tempfile + ".csv", 'hello,world,woof')
self._test_read_write_helper("remote://" + self.tempfile + ".csv", 'hello,world,woof')
def test_gzip(self):
self._test_read_write_helper(self.tempfile + ".gz", 'hello world')
self._test_read_write_helper(self.tempfile + ".csv.gz", 'hello world')
self._test_read_write_helper("local://" + self.tempfile + ".csv.gz", 'hello world')
self._test_read_write_helper("remote://" + self.tempfile + ".csv.gz", 'hello world')
def test_exception(self):
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("/root/tmp"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__("/root/tmp", '.....'))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("/root/tmp"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__("/root/tmp", '.....'))
self.assertRaises(IOError, lambda: self.graph.save("/root/tmp.graph"))
self.assertRaises(IOError, lambda: self.sframe.save("/root/tmp.frame_idx"))
self.assertRaises(IOError, lambda: self.model.save("/root/tmp.model"))
self.assertRaises(IOError, lambda: graphlab.load_graph("/root/tmp.graph"))
self.assertRaises(IOError, lambda: graphlab.load_sframe("/root/tmp.frame_idx"))
self.assertRaises(IOError, lambda: graphlab.load_model("/root/tmp.model"))
class RemoteFSConnectorTests(unittest.TestCase):
@classmethod
def setUpClass(self):
glconnect.stop()
auth_token = 'graphlab_awesome'
self.server = start_test_tcp_server(auth_token=auth_token)
glconnect.launch(self.server.get_server_addr(), auth_token=auth_token)
self.tempfile = tempfile.NamedTemporaryFile().name
(self.graph, self.sframe, self.model) = create_test_objects()
@classmethod
def tearDownClass(self):
glconnect.stop()
self.server.stop()
def _test_read_write_helper(self, url, content):
url = graphlab.util._make_internal_url(url)
glconnect.get_unity().__write__(url, content)
content_read = glconnect.get_unity().__read__(url)
self.assertEquals(content_read, content)
def test_basic(self):
self._test_read_write_helper("remote://" + self.tempfile, 'hello,world,woof')
def test_gzip(self):
self._test_read_write_helper("remote://" + self.tempfile + ".csv.gz", 'hello,world,woof')
def test_object_save_load(self):
prefix = "remote://"
_test_save_load_object_helper(self, self.graph, prefix + self.tempfile)
_test_save_load_object_helper(self, self.model, prefix + self.tempfile)
_test_save_load_object_helper(self, self.sframe, prefix + self.tempfile)
def test_exception(self):
self.assertRaises(ValueError, lambda: self._test_read_write_helper(self.tempfile, 'hello world'))
self.assertRaises(ValueError, lambda: self._test_read_write_helper("local://" + self.tempfile + ".csv.gz", 'hello,world,woof'))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("remote:///root/tmp"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("remote:///root/tmp"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__("remote:///root/tmp", '.....'))
self.assertRaises(IOError, lambda: self.graph.save("remote:///root/tmp.graph"))
self.assertRaises(IOError, lambda: self.sframe.save("remote:///root/tmp.frame_idx"))
self.assertRaises(IOError, lambda: self.model.save("remote:///root/tmp.model"))
self.assertRaises(IOError, lambda: graphlab.load_graph("remote:///root/tmp.graph"))
self.assertRaises(IOError, lambda: graphlab.load_sframe("remote:///root/tmp.frame_idx"))
self.assertRaises(IOError, lambda: graphlab.load_model("remote:///root/tmp.model"))
class HttpConnectorTests(unittest.TestCase):
@classmethod
def setUpClass(self):
self.url = "http://s3-us-west-2.amazonaws.com/testdatasets/a_to_z.txt.gz"
def _test_read_helper(self, url, content_expected):
url = graphlab.util._make_internal_url(url)
content_read = glconnect.get_unity().__read__(url)
self.assertEquals(content_read, content_expected)
def test_read(self):
expected = "\n".join([str(unichr(i + ord('a'))) for i in range(26)])
expected = expected + "\n"
self._test_read_helper(self.url, expected)
def test_exception(self):
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__(self.url, '.....'))
@unittest.skip("Disabling HDFS Connector Tests")
class HDFSConnectorTests(unittest.TestCase):
# This test requires hadoop to be installed and avaiable in $PATH.
# If not, the tests will be skipped.
@classmethod
def setUpClass(self):
self.has_hdfs = len(_sys_util.get_hadoop_class_path()) > 0
self.tempfile = tempfile.NamedTemporaryFile().name
(self.graph, self.sframe, self.model) = create_test_objects()
def _test_read_write_helper(self, url, content_expected):
url = graphlab.util._make_internal_url(url)
glconnect.get_unity().__write__(url, content_expected)
content_read = glconnect.get_unity().__read__(url)
self.assertEquals(content_read, content_expected)
# clean up the file we wrote
status, output = commands.getstatusoutput('hadoop fs -test -e ' + url)
if status is 0:
commands.getstatusoutput('hadoop fs -rm ' + url)
def test_basic(self):
if self.has_hdfs:
self._test_read_write_helper("hdfs://" + self.tempfile, 'hello,world,woof')
else:
logging.getLogger(__name__).info("No hdfs avaiable. Test pass.")
def test_gzip(self):
if self.has_hdfs:
self._test_read_write_helper("hdfs://" + self.tempfile + ".gz", 'hello,world,woof')
self._test_read_write_helper("hdfs://" + self.tempfile + ".csv.gz", 'hello,world,woof')
else:
logging.getLogger(__name__).info("No hdfs avaiable. Test pass.")
def test_object_save_load(self):
if self.has_hdfs:
prefix = "hdfs://"
_test_save_load_object_helper(self, self.graph, prefix + self.tempfile)
_test_save_load_object_helper(self, self.model, prefix + self.tempfile)
_test_save_load_object_helper(self, self.sframe, prefix + self.tempfile)
else:
logging.getLogger(__name__).info("No hdfs avaiable. Test pass.")
def test_exception(self):
bad_url = "hdfs:///root/"
if self.has_hdfs:
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("hdfs:///"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("hdfs:///tmp"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("hdfs://" + self.tempfile))
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__(bad_url + "/tmp", "somerandomcontent"))
self.assertRaises(IOError, lambda: self.graph.save(bad_url + "x.graph"))
self.assertRaises(IOError, lambda: self.sframe.save(bad_url + "x.frame_idx"))
self.assertRaises(IOError, lambda: self.model.save(bad_url + "x.model"))
self.assertRaises(IOError, lambda: graphlab.load_graph(bad_url + "mygraph"))
self.assertRaises(IOError, lambda: graphlab.load_sframe(bad_url + "x.frame_idx"))
self.assertRaises(IOError, lambda: graphlab.load_model(bad_url + "x.model"))
else:
logging.getLogger(__name__).info("No hdfs avaiable. Test pass.")
@unittest.skip("Disabling S3 Connector Tests")
class S3ConnectorTests(unittest.TestCase):
# This test requires aws cli to be installed. If not, the tests will be skipped.
@classmethod
def setUpClass(self):
status, output = commands.getstatusoutput('aws s3api list-buckets')
self.has_s3 = (status is 0)
self.standard_bucket = None
self.regional_bucket = None
# Use aws cli s3api to find a bucket with "gl-testdata" in the name, and use it as out test bucket.
# Temp files will be read from /written to the test bucket's /tmp folder and be cleared on exist.
if self.has_s3:
try:
json_output = json.loads(output)
bucket_list = [b['Name'] for b in json_output['Buckets']]
assert 'gl-testdata' in bucket_list
assert 'gl-testdata-oregon' in bucket_list
self.standard_bucket = 'gl-testdata'
self.regional_bucket = 'gl-testdata-oregon'
self.tempfile = tempfile.NamedTemporaryFile().name
(self.graph, self.sframe, self.model) = create_test_objects()
except:
logging.getLogger(__name__).warning("Fail parsing ioutput of s3api into json. Please check your awscli version.")
self.has_s3 = False
def _test_read_write_helper(self, url, content_expected):
s3url = graphlab.util._make_internal_url(url)
glconnect.get_unity().__write__(s3url, content_expected)
content_read = glconnect.get_unity().__read__(s3url)
self.assertEquals(content_read, content_expected)
(status, output) = commands.getstatusoutput('aws s3 rm --region us-west-2 ' + url)
if status is not 0:
logging.getLogger(__name__).warning("Cannot remove file: " + url)
def test_basic(self):
if self.has_s3:
for bucket in [self.standard_bucket, self.regional_bucket]:
self._test_read_write_helper("s3://" + bucket + self.tempfile, 'hello,world,woof')
else:
logging.getLogger(__name__).info("No s3 bucket avaiable. Test pass.")
def test_gzip(self):
if self.has_s3:
self._test_read_write_helper("s3://" + self.standard_bucket + self.tempfile + ".gz", 'hello,world,woof')
else:
logging.getLogger(__name__).info("No s3 bucket avaiable. Test pass.")
def test_object_save_load(self):
if self.has_s3:
prefix = "s3://" + self.standard_bucket
_test_save_load_object_helper(self, self.graph, prefix + self.tempfile)
_test_save_load_object_helper(self, self.model, prefix + self.tempfile)
_test_save_load_object_helper(self, self.sframe, prefix + self.tempfile)
else:
logging.getLogger(__name__).info("No s3 bucket avaiable. Test pass.")
def test_exception(self):
if self.has_s3:
bad_bucket = "i_am_a_bad_bucket"
prefix = "s3://" + bad_bucket
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("s3:///"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("s3://" + self.standard_bucket + "/somerandomfile"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("s3://" + "/somerandomfile"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__("s3://" + "/somerandomfile", "somerandomcontent"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__("s3://" + self.standard_bucket + "I'amABadUrl/", "somerandomcontent"))
self.assertRaises(IOError, lambda: self.graph.save(prefix + "/x.graph"))
self.assertRaises(IOError, lambda: self.sframe.save(prefix + "/x.frame_idx"))
self.assertRaises(IOError, lambda: self.model.save(prefix + "/x.model"))
self.assertRaises(IOError, lambda: graphlab.load_graph(prefix + "/x.graph"))
self.assertRaises(IOError, lambda: graphlab.load_sframe(prefix + "/x.frame_idx"))
self.assertRaises(IOError, lambda: graphlab.load_model(prefix + "/x.model"))
else:
logging.getLogger(__name__).info("No s3 bucket avaiable. Test pass.")
| agpl-3.0 |
rhattersley/iris | lib/iris/tests/unit/plot/__init__.py | 9 | 4522 | # (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the :mod:`iris.plot` module."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
from iris.plot import _broadcast_2d as broadcast
from iris.coords import AuxCoord
from iris.tests.stock import simple_2d, lat_lon_cube
@tests.skip_plot
class TestGraphicStringCoord(tests.GraphicsTest):
def setUp(self):
super(TestGraphicStringCoord, self).setUp()
self.cube = simple_2d(with_bounds=True)
self.cube.add_aux_coord(AuxCoord(list('abcd'),
long_name='str_coord'), 1)
self.lat_lon_cube = lat_lon_cube()
def tick_loc_and_label(self, axis_name, axes=None):
# Intentional lazy import so that subclasses can have an opportunity
# to change the backend.
import matplotlib.pyplot as plt
# Draw the plot to 'fix' the ticks.
if axes:
axes.figure.canvas.draw()
else:
axes = plt.gca()
plt.draw()
axis = getattr(axes, axis_name)
locations = axis.get_majorticklocs()
labels = [tick.get_text() for tick in axis.get_ticklabels()]
return list(zip(locations, labels))
def assertBoundsTickLabels(self, axis, axes=None):
actual = self.tick_loc_and_label(axis, axes)
expected = [(-1.0, ''), (0.0, 'a'), (1.0, 'b'),
(2.0, 'c'), (3.0, 'd'), (4.0, '')]
self.assertEqual(expected, actual)
def assertPointsTickLabels(self, axis, axes=None):
actual = self.tick_loc_and_label(axis, axes)
expected = [(0.0, 'a'), (1.0, 'b'), (2.0, 'c'), (3.0, 'd')]
self.assertEqual(expected, actual)
@tests.skip_plot
class MixinCoords(object):
"""
Mixin class of common plotting tests providing 2-dimensional
permutations of coordinates and anonymous dimensions.
"""
def _check(self, u, v, data=None):
self.assertEqual(self.mpl_patch.call_count, 1)
if data is not None:
(actual_u, actual_v, actual_data), _ = self.mpl_patch.call_args
self.assertArrayEqual(actual_data, data)
else:
(actual_u, actual_v), _ = self.mpl_patch.call_args
self.assertArrayEqual(actual_u, u)
self.assertArrayEqual(actual_v, v)
def test_foo_bar(self):
self.draw_func(self.cube, coords=('foo', 'bar'))
u, v = broadcast(self.foo, self.bar)
self._check(u, v, self.data)
def test_bar_foo(self):
self.draw_func(self.cube, coords=('bar', 'foo'))
u, v = broadcast(self.bar, self.foo)
self._check(u, v, self.dataT)
def test_foo_0(self):
self.draw_func(self.cube, coords=('foo', 0))
u, v = broadcast(self.foo, self.bar_index)
self._check(u, v, self.data)
def test_1_bar(self):
self.draw_func(self.cube, coords=(1, 'bar'))
u, v = broadcast(self.foo_index, self.bar)
self._check(u, v, self.data)
def test_1_0(self):
self.draw_func(self.cube, coords=(1, 0))
u, v = broadcast(self.foo_index, self.bar_index)
self._check(u, v, self.data)
def test_0_foo(self):
self.draw_func(self.cube, coords=(0, 'foo'))
u, v = broadcast(self.bar_index, self.foo)
self._check(u, v, self.dataT)
def test_bar_1(self):
self.draw_func(self.cube, coords=('bar', 1))
u, v = broadcast(self.bar, self.foo_index)
self._check(u, v, self.dataT)
def test_0_1(self):
self.draw_func(self.cube, coords=(0, 1))
u, v = broadcast(self.bar_index, self.foo_index)
self._check(u, v, self.dataT)
| lgpl-3.0 |
ctogle/dilapidator | test/geometry/quat_tests.py | 1 | 6809 | from dilap.geometry.quat import quat
from dilap.geometry.vec3 import vec3
import dilap.geometry.tools as dpr
import matplotlib.pyplot as plt
import unittest,numpy,math
import pdb
#python3 -m unittest discover -v ./ "*tests.py"
class test_quat(unittest.TestCase):
def test_av(self):
a = 3*dpr.PI4
u1,u2,u3 = vec3(1,0,0),vec3(0,-1,0),vec3(0,0,1)
q1,q2 = quat(0,0,0,0).av(a,u1),quat(0,0,0,0).av(a,u2)
q3,q4 = quat(0,0,0,0).av(-a,u3),quat(0,0,0,0).av(-a,u2)
self.assertTrue(q1.w > 0.1)
self.assertTrue(q1.x > 0.1)
self.assertTrue(dpr.isnear(q1.y,0))
self.assertTrue(dpr.isnear(q1.z,0))
self.assertTrue(q2.w > 0.1)
self.assertTrue(dpr.isnear(q2.x,0))
self.assertTrue(q2.y < -0.1)
self.assertTrue(dpr.isnear(q2.z,0))
self.assertTrue(q3.w > 0.1)
self.assertTrue(dpr.isnear(q3.x,0))
self.assertTrue(dpr.isnear(q3.y,0))
self.assertTrue(q3.z < -0.1)
self.assertFalse(q2 == q4.cp().flp())
self.assertTrue(q2 == q4.cnj())
def test_uu(self):
u1,u2,u3 = vec3(1,0,0),vec3(0,-1,0),vec3(0,0,1)
q1,q2 = quat(0,0,0,0).uu(u1,u2),quat(0,0,0,0).uu(u1,u3)
q3,q4 = quat(0,0,0,0).uu(u2,u3),quat(0,0,0,0).uu(u3,u2)
self.assertTrue(q1.w > 0.1)
self.assertTrue(dpr.isnear(q1.x,0))
self.assertTrue(dpr.isnear(q1.y,0))
self.assertTrue(q1.z < -0.1)
self.assertTrue(q2.w > 0.1)
self.assertTrue(dpr.isnear(q2.x,0))
self.assertTrue(q2.y < -0.1)
self.assertTrue(dpr.isnear(q2.z,0))
self.assertTrue(q3 == q4.cnj())
def test_toxy(self):
q1 = quat(0,0,0,0).toxy(vec3(0,0,-1))
#print('toxy\v\t',q1)
self.assertEqual(q1.w,0)
self.assertEqual(q1.x,1)
def test_cp(self):
q1 = quat(1,2,3,4)
self.assertTrue(q1 is q1)
self.assertFalse(q1 is q1.cp())
self.assertTrue(q1 == q1.cp())
#def test_cpf(self):
def test_isnear(self):
q1,q2 = quat(1,1,1,0),quat(1,1,1,0.1)
q3,q4 = quat(1,1,1,1),quat(1,1.000001,1,1)
self.assertEqual(q1.isnear(q1),1)
self.assertEqual(q3.isnear(q3),1)
self.assertEqual(q1.isnear(q2),0)
self.assertEqual(q2.isnear(q1),0)
self.assertEqual(q1.isnear(q3),0)
self.assertEqual(q2.isnear(q3),0)
self.assertEqual(q2.isnear(q4),0)
self.assertEqual(q3.isnear(q4),1)
def test_mag2(self):
q1,q2,q3 = quat(1,0,0,0),quat(1,1,1,0),quat(0,2,5,11)
self.assertEqual(dpr.isnear(q1.mag2(),1),1)
self.assertEqual(dpr.isnear(q2.mag2(),3),1)
self.assertEqual(dpr.isnear(q3.mag2(),150),1)
def test_mag(self):
q1,q2,q3 = quat(1,0,0,0),quat(1,1,1,0),quat(0,2,5,11)
self.assertEqual(dpr.isnear(q1.mag(),1),1)
self.assertEqual(dpr.isnear(q2.mag(),math.sqrt(3)),1)
self.assertEqual(dpr.isnear(q3.mag(),math.sqrt(150)),1)
def test_nrm(self):
q1,q2,q3 = quat(1,0,0,0),quat(1,1,1,0),quat(0,2,5,11)
self.assertEqual(dpr.isnear(q1.cp().nrm().mag(),1),1)
self.assertEqual(dpr.isnear(q2.cp().nrm().mag(),1),1)
self.assertEqual(dpr.isnear(q3.cp().nrm().mag(),1),1)
self.assertTrue(q1.cp().nrm().mag() == q1.mag())
self.assertTrue(q1.nrm() is q1)
self.assertFalse(q2.cp().nrm().mag() == q2.mag())
self.assertTrue(q2.nrm() is q2)
self.assertFalse(q3.cp().nrm().mag() == q3.mag())
self.assertTrue(q3.nrm() is q3)
def test_flp(self):
q1,q2 = quat(1,0,0,0),quat(1,1,1,0)
q3,q4 = quat(0,2,5,11),quat(-1,1,1,0)
self.assertFalse(q1.cp().flp() == q1)
self.assertFalse(q2.cp().flp() == q2)
self.assertTrue(q3.cp().flp() == q3)
self.assertFalse(q4.cp().flp() == q4)
self.assertTrue(q2.cp().flp() == q4)
self.assertTrue(q1.flp() is q1)
self.assertTrue(q2.flp() is q2)
self.assertTrue(q3.flp() is q3)
self.assertTrue(q4.flp() is q4)
def test_uscl(self):
q1,q2 = quat(1,0,0,0),quat(1,1,1,0)
q3,q4 = quat(0,2,5,11),quat(0,1,2.5,5.5)
self.assertTrue(q1.cp().uscl(1) == q1)
self.assertFalse(q1.cp().uscl(3) == q1)
self.assertTrue(q2.cp().uscl(1) == q2)
self.assertFalse(q2.cp().uscl(3) == q2)
self.assertTrue(q3.cp().uscl(0.5) == q4)
self.assertTrue(q1.uscl(1) is q1)
def test_cnj(self):
q1,q2 = quat(1,0,0,0),quat(1,1,1,0)
q3,q4 = quat(-1,2,5,11),quat(1,-2,-5,-11)
self.assertTrue(q1.cp().cnj() == q1)
self.assertTrue(q1.cnj() is q1)
self.assertFalse(q2.cp().cnj() == q2)
self.assertFalse(q3.cnj() == q4)
def test_inv(self):
a1,v1 = dpr.PI4,vec3(0,0,1)
a2,v2 = dpr.threePI4,vec3(0,0,1)
q1,q2 = quat(1,0,0,0).av(a1,v1),quat(1,1,1,0).av(a2,v2)
self.assertEqual(q1.cp().cnj(),q1.inv())
self.assertEqual(q2.cp().cnj(),q2.inv())
self.assertFalse(q1.inv() is q1)
def test_add(self):
q1,q2 = quat(0.5,0.3,-2.2,3),quat(1,1.1,2,-0.5)
q3 = quat(1.5,1.4,-0.2,2.5)
self.assertEqual(q1.add(q2),q3)
self.assertFalse(q1.add(q2) is q1)
def test_sub(self):
q1,q2 = quat(0.5,0.3,-2.2,3),quat(1,1.1,2,-0.5)
q3 = quat(-0.5,-0.8,-4.2,3.5)
self.assertEqual(q1.sub(q2),q3)
self.assertFalse(q1.sub(q2) is q1)
def test_mul(self):
a1,v1 = dpr.PI4,vec3(0,0,1)
a2,v2 = dpr.threePI4,vec3(0,0,1)
q1,q2 = quat(1,0,0,0).av(a1,v1),quat(1,1,1,0).av(a2,v1)
q3 = quat(0,1,0,0).av(a1+a2,v2)
self.assertTrue(q1.mul(q2) == q3)
self.assertFalse(q1.mul(q2) is q1)
def test_rot(self):
a1,v1 = dpr.PI4,vec3(0,0,1)
a2,v2 = dpr.PI2,vec3(0,0,1)
q1,q2 = quat(1,0,0,0).av(a1,v1),quat(1,1,1,0).av(a1,v1)
q3 = quat(0,1,0,0).av(a2,v2)
self.assertTrue(q1.rot(q2) == q3)
self.assertTrue(q1.rot(q2) is q1)
#def test_rotps(self):
def test_dot(self):
a1,v1 = dpr.PI4,vec3(0,0,1)
a2,v2 = dpr.PI2,vec3(0,1,0)
q1,q2 = quat(1,0,0,0).av(a1,v1),quat(1,1,1,0).av(a1,v1)
q3 = quat(0,1,0,0).av(a2,v2)
q4 = quat(0,1,0,0).av(0,v1)
self.assertTrue(dpr.isnear(q1.dot(q2),q1.mag2()))
self.assertFalse(dpr.isnear(q1.dot(q3),0))
self.assertTrue(dpr.isnear(q3.dot(q4),q3.w))
def test_slerp(self):
a1,v1 = dpr.PI4,vec3(0,0,1)
a2,v2 = dpr.PI,vec3(0,0,1)
q1,q2 = quat(1,0,0,0).av(0,v1),quat(1,1,1,0).av(a1,v1)
q3 = quat(0,1,0,0).av(a2,v2)
self.assertEqual(q1.slerp(q3,0.25),q2)
self.assertFalse(q1.slerp(q3,0.25) is q1)
if __name__ == '__main__':
unittest.main()
| mit |
Fireblend/scikit-learn | sklearn/decomposition/tests/test_pca.py | 199 | 10949 | import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
# PCA on dense arrays
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components))
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 3)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_,
np.var(X_rpca, axis=0))
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for n_components in [-1, 3]:
assert_raises(ValueError, PCA(n_components).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by RandomizedPCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by RandomizedPCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that RandomizedPCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
| bsd-3-clause |
deehzee/cs231n | assignment2/cs231n/classifiers/neural_net.py | 2 | 13071 | import numpy as np
import matplotlib.pyplot as plt
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network. The net has an input
dimension of N, a hidden layer dimension of H, and performs
classification over C classes. We train the network with a softmax
loss function and L2 regularization on the weight matrices. The
network uses a ReLU nonlinearity after the first fully connected
layer.
In other words, the network has the following architecture:
input - fully connected layer - ReLU - fully connected layer -
- softmax
The outputs of the second fully-connected layer are the scores for
each class.
"""
def __init__(self, input_size, hidden_size,
output_size, std=1e-4):
"""
Initialize the model. Weights are initialized to small random
values and biases are initialized to zero. Weights and biases
are stored in the variable self.params, which is a dictionary
with the following keys:
W1: First layer weights; has shape (D, H)
b1: First layer biases; has shape (H,)
W2: Second layer weights; has shape (H, C)
b2: Second layer biases; has shape (C,)
Inputs:
- input_size: The dimension D of the input data.
- hidden_size: The number of neurons H in the hidden layer.
- output_size: The number of classes C.
"""
self.params = {}
self.params['W1'] = std * np.random.randn(
input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = std * np.random.randn(
hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
def loss(self, X, y=None, reg=0.0):
"""
Compute the loss and gradients for a two layer fully connected
neural network.
Inputs:
- X: Input data of shape (N, D). Each X[i] is a training
sample.
- y: Vector of training labels. y[i] is the label for X[i],
and each y[i] is an integer in the range 0 <= y[i] < C. This
parameter is optional; if it is not passed then we only
return scores, and if it is passed then we instead return
the loss and gradients.
- reg: Regularization strength.
Returns:
If y is None, return a matrix scores of shape (N, C) where
scores[i, c] is the score for class c on input X[i].
If y is not None, instead return a tuple of:
- loss: Loss (data loss and regularization loss) for this
batch of training samples.
- grads: Dictionary mapping parameter names to gradients of
those parameters with respect to the loss function; has the
same keys as self.params.
"""
# Unpack variables from the params dictionary
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
N, D = X.shape
H, C = W2.shape
# Compute the forward pass
scores = None
##############################################################
# TODO: Perform the forward pass, computing the class scores #
# for the input. Store the result in the scores variable, #
# which should be an array of shape (N, C). #
##############################################################
# X = Input (N x D) [input]
# X1 = X.W1 + b1 (N x H) [FC]
# X2 = ReLU(X1) (N x H) [ReLU]
# X3 = X2.W2 + b2 (N x C) [FC]
# X4 = softmax(X3) (N x C) [softmax]
X1 = X.dot(W1) + b1 # output of layer1 (FC)
X2 = np.maximum(0, X1) # output of layer2 (ReLU)
X3 = X2.dot(W2) + b2 # output of layer3 (FC)
scores = X3
##############################################################
# END OF YOUR CODE #
##############################################################
# If the targets are not given then jump out, we're done
if y is None:
return scores
# Compute the loss
loss = None
##############################################################
# TODO: Finish the forward pass, and compute the loss. This #
# should include both the data loss and L2 regularization #
# for W1 and W2. Store the result in the variable loss, #
# which should be a scalar. Use the Softmax classifier loss. #
# So that your results match ours, multiply the #
# regularization loss by 0.5 #
##############################################################
cs = -np.max(scores, axis=1, keepdims=True)
# ^^ Needed for numerical stability
exps = np.exp(scores + cs)
expsum = np.sum(exps, axis=1, keepdims=True)
probs = exps / expsum
X4 = probs
losses = -np.log(probs[np.arange(N), y])
loss = np.sum(losses)
# Normalize loss
loss /= N
# Add regularization loss
loss += 0.5 * reg * (np.sum(W1 * W1) + np.sum(W2 * W2))
##############################################################
# END OF YOUR CODE #
##############################################################
# Backward pass: compute gradients
grads = {}
##############################################################
# TODO: Compute the backward pass, computing the derivatives #
# of the weights and biases. Store the results in the grads #
# dictionary. For example, grads['W1'] should store the #
# gradient on W1, and be a matrix of same size #
##############################################################
# X = Input (N x D) [input]
# X1 = X.W1 + b1 (N x H) [FC]
# X2 = ReLU(X1) (N x H) [ReLU]
# X3 = X2.W2 + b2 (N x C) [FC]
# X4 = softmax(X3) (N x C) [softmax]
# dX3 := dL/dX3 (N x C)
# dX3[i,j] = -(j==y[i]) + X4[i,j]
indicator = np.zeros_like(X4)
indicator[np.arange(N), y] += 1
dX3 = -indicator + X4
dX3 /= N
# dW2 := dL/dW2 (H x C)
# dW2 = X2^t . dX3
dW2 = np.dot(X2.T, dX3)
# db2 := dL/db2 (1 x C)
# db2 = [1, ..., 1] . dX3
db2 = np.sum(dX3, axis=0)
# dX2 := dL/dX2 (N x H)
# dL/dX2 = dL/dX3 . W2^t
dX2 = np.dot(dX3, W2.T)
# dX1 := dL/dX1 (N x H)
# dX1 = (X1 > 0) * dX2
dX1 = (X1 > 0) * dX2
# dW1 := dL/dW1 (D x H)
# dW1 = X^t . dX1
dW1 = np.dot(X.T, dX1)
# db1 := dL/db1 (1 x H)
# db1 = [1, ..., 1] . dX1
db1 = np.sum(dX1, axis=0)
# Add regulariation to gradients
dW1 += reg * W1
dW2 += reg * W2
# Put the results in the return dict
grads = {
'W1': dW1,
'b1': db1,
'W2': dW2,
'b2': db2,
}
##############################################################
# END OF YOUR CODE #
##############################################################
return loss, grads
def train(self, X, y, X_val, y_val,
learning_rate=1e-3, learning_rate_decay=0.95,
reg=1e-5, num_iters=100,
batch_size=200, verbose=False):
"""
Train this neural network using stochastic gradient descent.
Inputs:
- X: A numpy array of shape (N, D) giving training data.
- y: A numpy array f shape (N,) giving training labels;
y[i] = c means that X[i] has label c, where 0 <= c < C.
- X_val: A numpy array of shape (N_val, D) giving validation
data.
- y_val: A numpy array of shape (N_val,) giving validation
labels.
- learning_rate: Scalar giving learning rate for optimization.
- learning_rate_decay: Scalar giving factor used to decay the
learning rate after each epoch.
- reg: Scalar giving regularization strength.
- num_iters: Number of steps to take when optimizing.
- batch_size: Number of training examples to use per step.
- verbose: boolean; if true print progress during
optimization.
"""
num_train = X.shape[0]
iterations_per_epoch = max(num_train // batch_size, 1)
# Use SGD to optimize the parameters in self.model
loss_history = []
train_acc_history = []
val_acc_history = []
for it in xrange(num_iters):
X_batch = None
y_batch = None
##########################################################
# TODO: Create a random minibatch of training data and #
# labels, storing them in X_batch and y_batch #
# respectively. #
##########################################################
batch_idxs = np.random.choice(num_train, batch_size)
X_batch = X[batch_idxs]
y_batch = y[batch_idxs]
##########################################################
# END OF YOUR CODE #
##########################################################
# Compute loss and gradients using the current minibatch
loss, grads = self.loss(X_batch, y=y_batch, reg=reg)
loss_history.append(loss)
##########################################################
# TODO: Use the gradients in the grads dictionary to #
# update the parameters of the network (stored in the #
# dictionary self.params) using stochastic gradient #
# descent. You'll need to use the gradients stored in #
# the grads dictionary defined above. #
##########################################################
self.params['W1'] -= learning_rate * grads['W1']
self.params['b1'] -= learning_rate * grads['b1']
self.params['W2'] -= learning_rate * grads['W2']
self.params['b2'] -= learning_rate * grads['b2']
##########################################################
# END OF YOUR CODE #
##########################################################
if verbose and it % 100 == 0:
print('iteration {}/{}: loss {:f}'.format(
it, num_iters, loss))
# Every epoch, check train and val accuracy and decay
# learning rate.
if it % iterations_per_epoch == 0:
# Check accuracy
train_acc = (self.predict(X_batch) == y_batch).mean()
val_acc = (self.predict(X_val) == y_val).mean()
train_acc_history.append(train_acc)
val_acc_history.append(val_acc)
# Decay learning rate
learning_rate *= learning_rate_decay
return {
'loss_history': loss_history,
'train_acc_history': train_acc_history,
'val_acc_history': val_acc_history,
}
def predict(self, X):
"""
Use the trained weights of this two-layer network to predict
labels for data points. For each data point we predict scores
for each of the C classes, and assign each data point to the
class with the highest score.
Inputs:
- X: A numpy array of shape (N, D) giving N D-dimensional data
points to classify.
Returns:
- y_pred: A numpy array of shape (N,) giving predicted labels
for each of the elements of X. For all i, y_pred[i] = c
means that X[i] is predicted to have class c, where
0 <= c < C.
"""
y_pred = None
##############################################################
# TODO: Implement this function; it should be VERY simple! #
##############################################################
scores = self.loss(X)
y_pred = np.argmax(scores, axis=1)
##############################################################
# END OF YOUR CODE #
##############################################################
return y_pred
| mit |
rvraghav93/scikit-learn | examples/calibration/plot_calibration_multiclass.py | 95 | 6971 | """
==================================================
Probability Calibration for 3-class classification
==================================================
This example illustrates how sigmoid calibration changes predicted
probabilities for a 3-class classification problem. Illustrated is the
standard 2-simplex, where the three corners correspond to the three classes.
Arrows point from the probability vectors predicted by an uncalibrated
classifier to the probability vectors predicted by the same classifier after
sigmoid calibration on a hold-out validation set. Colors indicate the true
class of an instance (red: class 1, green: class 2, blue: class 3).
The base classifier is a random forest classifier with 25 base estimators
(trees). If this classifier is trained on all 800 training datapoints, it is
overly confident in its predictions and thus incurs a large log-loss.
Calibrating an identical classifier, which was trained on 600 datapoints, with
method='sigmoid' on the remaining 200 datapoints reduces the confidence of the
predictions, i.e., moves the probability vectors from the edges of the simplex
towards the center. This calibration results in a lower log-loss. Note that an
alternative would have been to increase the number of base estimators which
would have resulted in a similar decrease in log-loss.
"""
print(__doc__)
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import log_loss
np.random.seed(0)
# Generate data
X, y = make_blobs(n_samples=1000, n_features=2, random_state=42,
cluster_std=5.0)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:800], y[600:800]
X_train_valid, y_train_valid = X[:800], y[:800]
X_test, y_test = X[800:], y[800:]
# Train uncalibrated random forest classifier on whole train and validation
# data and evaluate on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
clf_probs = clf.predict_proba(X_test)
score = log_loss(y_test, clf_probs)
# Train random forest classifier, calibrate on validation data and evaluate
# on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
sig_clf.fit(X_valid, y_valid)
sig_clf_probs = sig_clf.predict_proba(X_test)
sig_score = log_loss(y_test, sig_clf_probs)
# Plot changes in predicted probabilities via arrows
plt.figure(0)
colors = ["r", "g", "b"]
for i in range(clf_probs.shape[0]):
plt.arrow(clf_probs[i, 0], clf_probs[i, 1],
sig_clf_probs[i, 0] - clf_probs[i, 0],
sig_clf_probs[i, 1] - clf_probs[i, 1],
color=colors[y_test[i]], head_width=1e-2)
# Plot perfect predictions
plt.plot([1.0], [0.0], 'ro', ms=20, label="Class 1")
plt.plot([0.0], [1.0], 'go', ms=20, label="Class 2")
plt.plot([0.0], [0.0], 'bo', ms=20, label="Class 3")
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
# Annotate points on the simplex
plt.annotate(r'($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)',
xy=(1.0/3, 1.0/3), xytext=(1.0/3, .23), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.plot([1.0/3], [1.0/3], 'ko', ms=5)
plt.annotate(r'($\frac{1}{2}$, $0$, $\frac{1}{2}$)',
xy=(.5, .0), xytext=(.5, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $\frac{1}{2}$, $\frac{1}{2}$)',
xy=(.0, .5), xytext=(.1, .5), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($\frac{1}{2}$, $\frac{1}{2}$, $0$)',
xy=(.5, .5), xytext=(.6, .6), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $0$, $1$)',
xy=(0, 0), xytext=(.1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($1$, $0$, $0$)',
xy=(1, 0), xytext=(1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $1$, $0$)',
xy=(0, 1), xytext=(.1, 1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
# Add grid
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Change of predicted probabilities after sigmoid calibration")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.legend(loc="best")
print("Log-loss of")
print(" * uncalibrated classifier trained on 800 datapoints: %.3f "
% score)
print(" * classifier trained on 600 datapoints and calibrated on "
"200 datapoint: %.3f" % sig_score)
# Illustrate calibrator
plt.figure(1)
# generate grid over 2-simplex
p1d = np.linspace(0, 1, 20)
p0, p1 = np.meshgrid(p1d, p1d)
p2 = 1 - p0 - p1
p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()]
p = p[p[:, 2] >= 0]
calibrated_classifier = sig_clf.calibrated_classifiers_[0]
prediction = np.vstack([calibrator.predict(this_p)
for calibrator, this_p in
zip(calibrated_classifier.calibrators_, p.T)]).T
prediction /= prediction.sum(axis=1)[:, None]
# Plot modifications of calibrator
for i in range(prediction.shape[0]):
plt.arrow(p[i, 0], p[i, 1],
prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1],
head_width=1e-2, color=colors[np.argmax(p[i])])
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Illustration of sigmoid calibrator")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.show()
| bsd-3-clause |
xiaoxiamii/scikit-learn | examples/preprocessing/plot_function_transformer.py | 161 | 1949 | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
plt.scatter(X[:, 0], X[:, 1], c=y, s=50)
plt.show()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
s=50,
)
plt.show()
| bsd-3-clause |
PepSalehi/scipy_2015_sklearn_tutorial | notebooks/figures/plot_digits_datasets.py | 19 | 2750 | # Taken from example in scikit-learn examples
# Authors: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble, lda,
random_projection)
def digits_plot():
digits = datasets.load_digits(n_class=6)
n_digits = 500
X = digits.data[:n_digits]
y = digits.target[:n_digits]
n_samples, n_features = X.shape
n_neighbors = 30
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(X.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 1e5:
# don't show points that are too close
# set a high threshold to basically turn this off
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
n_img_per_row = 10
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
print("Computing PCA projection")
pca = decomposition.PCA(n_components=2).fit(X)
X_pca = pca.transform(X)
plot_embedding(X_pca, "Principal Components projection of the digits")
plt.figure()
plt.matshow(pca.components_[0, :].reshape(8, 8), cmap="gray")
plt.axis('off')
plt.figure()
plt.matshow(pca.components_[1, :].reshape(8, 8), cmap="gray")
plt.axis('off')
plt.show()
| cc0-1.0 |
pkruskal/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
themrmax/scikit-learn | sklearn/ensemble/tests/test_weight_boosting.py | 28 | 18031 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true, assert_greater
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.base import BaseEstimator
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_oneclass_adaboost_proba():
# Test predict_proba robustness for one class label input.
# In response to issue #7501
# https://github.com/scikit-learn/scikit-learn/issues/7501
y_t = np.ones(len(X))
clf = AdaBoostClassifier().fit(X, y_t)
assert_array_equal(clf.predict_proba(X), np.ones((len(X), 1)))
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Check we used multiple estimators
assert_greater(len(clf.estimators_), 1)
# Check for distinct random states (see issue #7408)
assert_equal(len(set(est.random_state for est in clf.estimators_)),
len(clf.estimators_))
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
reg = AdaBoostRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
assert score > 0.85
# Check we used multiple estimators
assert_true(len(reg.estimators_) > 1)
# Check for distinct random states (see issue #7408)
assert_equal(len(set(est.random_state for est in reg.estimators_)),
len(reg.estimators_))
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
| bsd-3-clause |
stemblab/intuitive-cs | py/recon.py | 2 | 4493 | #!puzlet
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import proj3d
# Plot vector as line segement will ball at one end.
def plot_vec(start,end,label,color):
ax.plot([start[0],end[0]],[start[1],end[1]],[start[2],end[2]],
label=label,color=color,linewidth=2)
ax.scatter(end[0],end[1],end[2],marker='o',color=color)
def set_axes():
ax.set_autoscale_on(False)
ax.set_xlabel(r'$x_0$',fontsize=20)
ax.set_ylabel(r'$x_1$',fontsize=20)
ax.set_zlabel(r'$x_2$',fontsize=20)
ax.set_xlim(-0.8, 1.2)
ax.set_ylim(-0.2, 1.2)
ax.set_zlim(0, 1.4)
ax.set_xticks([-0.5,0,0.5,1])
ax.set_yticks([0,0.5,1])
ax.set_zticks([0,0.5,1])
fig = plt.figure()
ax = fig.gca(projection='3d')
set_axes()
origin=[0,0,0]
plot_vec(origin,[1,0,0],label='constant',color='blue')
plot_vec(origin,[0,1,0],label='line',color='green')
plot_vec(origin,[0,0,1],label='parabola',color='red')
ax.legend()
fig.savefig("recon1_1.svg", transparent=True, bbox_inches='tight', pad_inches=0.15)
ax.text(1.3,0,0.9, r'$Ax=b$', backgroundcolor='#fcffc9',
ha='center', va='bottom', size=18, zorder=10)
# Ax=b
A = np.array([[1, -1, 1], [1, 2, 4]])
b = np.array([1, 4])
# planes
x0 = np.arange(-1, 1, 0.1)
x1 = np.arange(-1, 1, 0.1)
xx0, yy0 = np.meshgrid(x0, x1)
ax.plot_surface(xx0, yy0, 1-xx0+yy0,
rstride=2, cstride=2, alpha=0.1, color='y')
ax.plot_surface(xx0, yy0, 1-xx0/4.-yy0/2.,
rstride=2, cstride=2, alpha=0.1,color='m')
# solutuon to Ax=b
pinvA = np.linalg.pinv(A)
U = np.array([0,0,1]) # np.dot(pinvA,b) # One solution
w = np.array([1.5, 0, 0]) # Arbitrary vector to get another solution
N = np.dot((np.eye(3) - np.dot(pinvA,A)),w)
ax.plot(*zip(U+N*2/3.,U-N*4/3.),color='k',linewidth=3,
label=r'$x$ (not sparse)')
ax.legend()
fig.savefig("recon1_2.svg", transparent=True, bbox_inches='tight', pad_inches=0.15)
ax.text(0,0,1.2, r'1-sparse $x$', backgroundcolor='#fcffc9',
ha='center', va='bottom', size=16, zorder=10)
ax.scatter(0,0,1,marker='*',s=400,color='r',label=r'$x$ (1-sparse)')
ax.legend()
fig.savefig("recon1_3.svg", transparent=True, bbox_inches='tight', pad_inches=0.15)
def plot_norms(l, h, U):
Nl = len(l)
f, axarr = plt.subplots(Nl, 1)
def plot(l, n):
ax.set_title('$l_%s$-norm' % str(n))
ax.scatter(np.array(h), np.array(l))
# Show U value for minimum of norm
#m = np.argmin(l)
#l_min = l[m]
#h_min = h[m]
#U_min = U[m]
#ax_lim = ax.axis()
#offset = -0.05*(ax_lim[3] - ax_lim[2])
#v = lambda d: '{0:.2f}'.format(U_min[d])
#label = "U=[%s, %s, %s]" % (v(0), v(1), v(2))
#ax.text(h_min, l_min+offset, label, va='top')
for n in range(Nl):
ax = axarr[n]
plot(l[n], n)
plt.tight_layout()
Np = 201 # number of points to plot (Must be odd to include 0!)
h = np.array(np.linspace(-1, 1, Np)) # null vector multiplier
Y=U.reshape(3,1)+np.dot(N.reshape(3,1),h.reshape(1,Np))
# 3xNp array of x candidates
l=np.zeros((3,Np))
for n in range(3):
l[n,:] = np.apply_along_axis(np.linalg.norm, 0, Y, n)
# See: http://matplotlib.org/examples/axes_grid/demo_parasite_axes2.html
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
plt.clf()
host = host_subplot(111, axes_class=AA.Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
par2 = host.twinx()
offset = 60
new_fixed_axis = par2.get_grid_helper().new_fixed_axis
par2.axis["right"] = new_fixed_axis(loc="right",
axes=par2,
offset=(offset, 0))
par2.axis["right"].toggle(all=True)
host.set_xlim(-1, 1)
host.set_ylim(1, 3.5)
host.set_xlabel("Distance from Sparse Sorution ($d$)")
host.set_ylabel(r"$\||x\||_0$")
par1.set_ylabel(r"$\||x\||_1$")
par2.set_ylabel(r"$\||x\||_2$")
p1, = host.plot(h, l[0], label=r"$\||x\||_0$")
p2, = par1.plot(h, l[1], label=r"$\||x\||_1$")
p3, = par2.plot(h, l[2], label=r"$\||x\||_2$")
par1.set_ylim(1, 3)
par2.set_ylim(0, 2)
host.legend()
host.axis["left"].label.set_color(p1.get_color())
host.axis["left"].label.set_fontsize(20)
par1.axis["right"].label.set_color(p2.get_color())
par1.axis["right"].label.set_fontsize(20)
par2.axis["right"].label.set_color(p3.get_color())
par2.axis["right"].label.set_fontsize(20)
fig.savefig("norms.svg", transparent=True, bbox_inches='tight', pad_inches=0.15)
| mit |
Crompulence/cpl-library | examples/interactive_plot_example/python/CFD_recv_and_plot_grid_interactive.py | 1 | 3724 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
from mpi4py import MPI
from cplpy import CPL
from draw_grid import draw_grid
#initialise MPI and CPL
comm = MPI.COMM_WORLD
CPL = CPL()
CFD_COMM = CPL.init(CPL.CFD_REALM)
nprocs_realm = CFD_COMM.Get_size()
# Parameters of the cpu topology (cartesian grid)
npxyz = np.array([1, 1, 1], order='F', dtype=np.int32)
NProcs = np.product(npxyz)
xyzL = np.array([10.0, 10.0, 10.0], order='F', dtype=np.float64)
xyz_orig = np.array([0.0, 0.0, 0.0], order='F', dtype=np.float64)
ncxyz = np.array([16, 6, 16], order='F', dtype=np.int32)
if (nprocs_realm != NProcs):
print("Non-coherent number of processes in CFD ", nprocs_realm,
" no equal to ", npxyz[0], " X ", npxyz[1], " X ", npxyz[2])
MPI.Abort(errorcode=1)
#Setup coupled simulation
cart_comm = CFD_COMM.Create_cart([npxyz[0], npxyz[1], npxyz[2]])
CPL.setup_cfd(cart_comm, xyzL, xyz_orig, ncxyz)
#Plot output
fig, ax = plt.subplots(1,1)
plt.subplots_adjust(bottom=0.25)
axslider = plt.axes([0.25, 0.1, 0.65, 0.03])
freq = 1.
sfreq = Slider(axslider, 'Freq', 0.1, 2.0, valinit=freq)
def update(val):
freq = sfreq.val
global freq
print("CHANGED", freq)
sfreq.on_changed(update)
plt.ion()
plt.show()
# === Plot both grids ===
dx = CPL.get("xl_cfd")/float(CPL.get("ncx"))
dy = CPL.get("yl_cfd")/float(CPL.get("ncy"))
dz = CPL.get("zl_cfd")/float(CPL.get("ncz"))
ioverlap = (CPL.get("icmax_olap")-CPL.get("icmin_olap")+1)
joverlap = (CPL.get("jcmax_olap")-CPL.get("jcmin_olap")+1)
koverlap = (CPL.get("kcmax_olap")-CPL.get("kcmin_olap")+1)
xoverlap = ioverlap*dx
yoverlap = joverlap*dy
zoverlap = koverlap*dz
for time in range(100000):
# recv data to plot
olap_limits = CPL.get_olap_limits()
portion = CPL.my_proc_portion(olap_limits)
[ncxl, ncyl, nczl] = CPL.get_no_cells(portion)
recv_array = np.zeros((1, ncxl, ncyl, nczl), order='F', dtype=np.float64)
recv_array, ierr = CPL.recv(recv_array, olap_limits)
#Plot CFD and coupler Grid
draw_grid(ax,
nx=CPL.get("ncx"),
ny=CPL.get("ncy"),
nz=CPL.get("ncz"),
px=CPL.get("npx_cfd"),
py=CPL.get("npy_cfd"),
pz=CPL.get("npz_cfd"),
xmin=CPL.get("x_orig_cfd"),
ymin=CPL.get("y_orig_cfd"),
zmin=CPL.get("z_orig_cfd"),
xmax=(CPL.get("icmax_olap")+1)*dx,
ymax=CPL.get("yl_cfd"),
zmax=(CPL.get("kcmax_olap")+1)*dz,
lc = 'r',
label='CFD')
#Plot MD domain
draw_grid(ax, nx=1, ny=1, nz=1,
px=CPL.get("npx_md"),
py=CPL.get("npy_md"),
pz=CPL.get("npz_md"),
xmin=CPL.get("x_orig_md"),
ymin=-CPL.get("yl_md")+yoverlap,
zmin=CPL.get("z_orig_md"),
xmax=(CPL.get("icmax_olap")+1)*dx,
ymax=yoverlap,
zmax=(CPL.get("kcmax_olap")+1)*dz,
label='MD')
#Plot x component on grid
x = np.linspace(CPL.get("x_orig_cfd")+.5*dx,xoverlap-.5*dx,ioverlap)
z = np.linspace(CPL.get("z_orig_cfd")+.5*dz,zoverlap-.5*dz,koverlap)
for j in range(joverlap):
ax.plot(x, 0.5*dy*(recv_array[0,:,j,0]+1.+2*j), 's-')
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
print(time, freq)
plt.pause(0.1)
ax.cla()
# send data to update
olap_limits = CPL.get_olap_limits()
portion = CPL.my_proc_portion(olap_limits)
[ncxl, ncyl, nczl] = CPL.get_no_cells(portion)
send_array = freq*np.ones((1, ncxl, ncyl, nczl), order='F', dtype=np.float64)
CPL.send(send_array, olap_limits)
CPL.finalize()
MPI.Finalize()
| gpl-3.0 |
mfjb/scikit-learn | sklearn/decomposition/tests/test_nmf.py | 130 | 6059 | import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
random_state = np.random.mtrand.RandomState(0)
@raises(ValueError)
def test_initialize_nn_input():
# Test NNDSVD behaviour on negative input
nmf._initialize_nmf(-np.ones((2, 2)), 2)
def test_initialize_nn_output():
# Test that NNDSVD does not return negative values
data = np.abs(random_state.randn(10, 10))
for var in (None, 'a', 'ar'):
W, H = nmf._initialize_nmf(data, 10, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10)
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'a' and 'ar' differ from basic NNDSVD only where
# the basic version has zeros.
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, variant=None)
Wa, Ha = nmf._initialize_nmf(data, 10, variant='a')
War, Har = nmf._initialize_nmf(data, 10, variant='ar', random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@raises(ValueError)
def test_projgrad_nmf_fit_nn_input():
# Test model fit behaviour on negative input
A = -np.ones((2, 2))
m = nmf.ProjectedGradientNMF(n_components=2, init=None, random_state=0)
m.fit(A)
def test_projgrad_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.ProjectedGradientNMF(n_components=2, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_projgrad_nmf_fit_close():
# Test that the fit is not too far away
pnmf = nmf.ProjectedGradientNMF(5, init='nndsvda', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
# Test that NLS solver doesn't return negative values
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
# Test that the NLS results should be close
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
def test_projgrad_nmf_transform():
# Test that NMF.transform returns close values
# (transform uses scipy.optimize.nnls for now)
A = np.abs(random_state.randn(6, 5))
m = nmf.ProjectedGradientNMF(n_components=5, init='nndsvd', random_state=0)
transf = m.fit_transform(A)
assert_true(np.allclose(transf, m.transform(A), atol=1e-2, rtol=0))
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
A = np.abs(random_state.randn(30, 10))
nmf.ProjectedGradientNMF(n_components=15, sparseness='data',
random_state=0).fit(A)
def test_projgrad_nmf_sparseness():
# Test sparseness
# Test that sparsity constraints actually increase sparseness in the
# part where they are applied.
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
def test_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
T1 = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999).fit_transform(A)
A_sparse = csc_matrix(A)
pg_nmf = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999)
T2 = pg_nmf.fit_transform(A_sparse)
assert_array_almost_equal(pg_nmf.reconstruction_err_,
linalg.norm(A - np.dot(T2, pg_nmf.components_),
'fro'))
assert_array_almost_equal(T1, T2)
# same with sparseness
T2 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A_sparse)
T1 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A)
def test_sparse_transform():
# Test that transform works on sparse data. Issue #2124
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(5, 4))
A[A > 1.0] = 0
A = csc_matrix(A)
model = nmf.NMF(random_state=42)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
# This solver seems pretty inconsistent
assert_array_almost_equal(A_fit_tr, A_tr, decimal=2)
| bsd-3-clause |
meprogrammerguy/pyMadness | scrape_stats.py | 1 | 2098 | #!/usr/bin/env python3
from urllib.request import urlopen
from bs4 import BeautifulSoup
import pandas as pd
import html5lib
import pdb
from collections import OrderedDict
import json
import csv
import contextlib
url = "https://kenpom.com/index.php"
#url = "https://kenpom.com/index.php?y=2017" #past year testing override
print ("Scrape Statistics Tool")
print ("**************************")
print ("data is from {0}".format(url))
print ("**************************")
with contextlib.closing(urlopen(url)) as page:
soup = BeautifulSoup(page, "html5lib")
ratings_table=soup.find('table', id='ratings-table')
IDX=[]
A=[]
B=[]
C=[]
D=[]
E=[]
F=[]
G=[]
H=[]
I=[]
J=[]
K=[]
L=[]
M=[]
index=0
for row in ratings_table.findAll("tr"):
col=row.findAll('td')
if len(col)>0:
index+=1
IDX.append(index)
A.append(col[0].find(text=True))
B.append(col[1].find(text=True))
C.append(col[2].find(text=True))
D.append(col[3].find(text=True))
E.append(col[4].find(text=True))
F.append(col[5].find(text=True))
G.append(col[7].find(text=True))
H.append(col[9].find(text=True))
I.append(col[11].find(text=True))
J.append(col[13].find(text=True))
K.append(col[15].find(text=True))
L.append(col[17].find(text=True))
M.append(col[19].find(text=True))
df=pd.DataFrame(IDX,columns=['Index'])
df['Rank']=A
df['Team']=B
df['Conf']=C
df['W-L']=D
df['AdjEM']=E
df['AdjO']=F
df['AdjD']=G
df['AdjT']=H
df['Luck']=I
df['AdjEMSOS']=J
df['OppOSOS']=K
df['OppDSOS']=L
df['AdjEMNCSOS']=M
with open('stats.json', 'w') as f:
f.write(df.to_json(orient='index'))
with open("stats.json") as stats_json:
dict_stats = json.load(stats_json, object_pairs_hook=OrderedDict)
stats_sheet = open('stats.csv', 'w', newline='')
csvwriter = csv.writer(stats_sheet)
count = 0
for row in dict_stats.values():
#pdb.set_trace()
if (count == 0):
header = row.keys()
csvwriter.writerow(header)
count += 1
csvwriter.writerow(row.values())
stats_sheet.close()
print ("done.")
| mit |
berkeley-stat159/project-alpha | code/utils/scripts/glm_script.py | 1 | 3957 | """ Script for GLM functions.
Run with:
python glm_script.py
"""
# Loading modules.
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
import nibabel as nib
import os
import sys
# Relative paths to subject 1 data.
project_path = "../../../"
pathtodata = project_path + "data/ds009/sub001/"
condition_location = pathtodata+"model/model001/onsets/task001_run001/"
location_of_images = project_path+"images/"
location_of_functions = project_path+"code/utils/functions/"
sys.path.append(location_of_functions)
# Load events2neural from the stimuli module.
from stimuli import events2neural
from event_related_fMRI_functions import hrf_single, convolution_specialized
# Load our GLM functions.
from glm import glm, glm_diagnostics, glm_multiple
# Load the image data for subject 1.
img = nib.load(pathtodata+"BOLD/task001_run001/bold.nii.gz")
data = img.get_data()
data = data[...,6:] # Knock off the first 6 observations.
cond1=np.loadtxt(condition_location+"cond001.txt")
cond2=np.loadtxt(condition_location+"cond002.txt")
cond3=np.loadtxt(condition_location+"cond003.txt")
#######################
# a. (my) convolution #
#######################
all_stimuli=np.array(sorted(list(cond2[:,0])+list(cond3[:,0])+list(cond1[:,0]))) # could also just x_s_array
my_hrf = convolution_specialized(all_stimuli,np.ones(len(all_stimuli)),hrf_single,np.linspace(0,239*2-2,239))
##################
# b. np.convolve #
##################
# initial needed values
TR = 2
tr_times = np.arange(0, 30, TR)
hrf_at_trs = np.array([hrf_single(x) for x in tr_times])
n_vols=data.shape[-1]
# creating the .txt file for the events2neural function
cond_all=np.row_stack((cond1,cond2,cond3))
cond_all=sorted(cond_all,key= lambda x:x[0])
np.savetxt(condition_location+"cond_all.txt",cond_all)
neural_prediction = events2neural(condition_location+"cond_all.txt",TR,n_vols)
convolved = np.convolve(neural_prediction, hrf_at_trs) # hrf_at_trs sample data
N = len(neural_prediction) # N == n_vols == 173
M = len(hrf_at_trs) # M == 12
np_hrf=convolved[:N]
#############################
#############################
# Analysis and diagonistics #
#############################
#############################
#######################
# a. (my) convolution #
#######################
# Now get the estimated coefficients and design matrix for doing
# regression on the convolved time course.
B_my, X_my = glm(data, my_hrf)
# Some diagnostics.
MRSS_my, fitted_my, residuals_my = glm_diagnostics(B_my, X_my, data)
# Print out the mean MRSS.
print("MRSS using 'my' convolution function: "+str(np.mean(MRSS_my)))
# Plot the time course for a single voxel with the fitted values.
# Looks pretty bad.
plt.plot(data[41, 47, 2]) #change from cherry-picking
plt.plot(fitted_my[41, 47, 2])
plt.savefig(location_of_images+"glm_plot_my.png")
plt.close()
##################
# b. np.convolve #
##################
# Now get the estimated coefficients and design matrix for doing
# regression on the convolved time course.
B_np, X_np = glm(data, np_hrf)
# Some diagnostics.
MRSS_np, fitted_np, residuals_np = glm_diagnostics(B_np, X_np, data)
# Print out the mean MRSS.
print("MRSS using np convolution function: "+str(np.mean(MRSS_np)))
# Plot the time course for a single voxel with the fitted values.
# Looks pretty bad.
plt.plot(data[41, 47, 2])
plt.plot(fitted_np[41, 47, 2])
plt.savefig(location_of_images+"glm_plot_np.png")
plt.close()
X_my3=np.ones((data.shape[-1],4))
for i in range(2):
X_my3[:,i+1]=my_hrf**(i+1)
B_my3, X_my3 = glm_multiple(data, X_my3)
MRSS_my3, fitted_my3, residuals_my3 = glm_diagnostics(B_my3, X_my3, data)
print("MRSS using 'my' convolution function, 3rd degree polynomial: "+str(np.mean(MRSS_my3))+ ", but the chart looks better")
plt.plot(data[41, 47, 2])
plt.plot(fitted_my3[41, 47, 2])
plt.savefig(location_of_images+"glm_plot_my3.png")
plt.close()
| bsd-3-clause |
jundongl/scikit-feature | skfeature/function/sparse_learning_based/NDFS.py | 3 | 4908 | import numpy as np
import sys
import math
import sklearn.cluster
from skfeature.utility.construct_W import construct_W
def ndfs(X, **kwargs):
"""
This function implement unsupervised feature selection using nonnegative spectral analysis, i.e.,
min_{F,W} Tr(F^T L F) + alpha*(||XW-F||_F^2 + beta*||W||_{2,1}) + gamma/2 * ||F^T F - I||_F^2
s.t. F >= 0
Input
-----
X: {numpy array}, shape (n_samples, n_features)
input data
kwargs: {dictionary}
W: {sparse matrix}, shape {n_samples, n_samples}
affinity matrix
alpha: {float}
Parameter alpha in objective function
beta: {float}
Parameter beta in objective function
gamma: {float}
a very large number used to force F^T F = I
F0: {numpy array}, shape (n_samples, n_clusters)
initialization of the pseudo label matirx F, if not provided
n_clusters: {int}
number of clusters
verbose: {boolean}
True if user want to print out the objective function value in each iteration, false if not
Output
------
W: {numpy array}, shape(n_features, n_clusters)
feature weight matrix
Reference:
Li, Zechao, et al. "Unsupervised Feature Selection Using Nonnegative Spectral Analysis." AAAI. 2012.
"""
# default gamma is 10e8
if 'gamma' not in kwargs:
gamma = 10e8
else:
gamma = kwargs['gamma']
# use the default affinity matrix
if 'W' not in kwargs:
W = construct_W(X)
else:
W = kwargs['W']
if 'alpha' not in kwargs:
alpha = 1
else:
alpha = kwargs['alpha']
if 'beta' not in kwargs:
beta = 1
else:
beta = kwargs['beta']
if 'F0' not in kwargs:
if 'n_clusters' not in kwargs:
print >>sys.stderr, "either F0 or n_clusters should be provided"
else:
# initialize F
n_clusters = kwargs['n_clusters']
F = kmeans_initialization(X, n_clusters)
else:
F = kwargs['F0']
if 'verbose' not in kwargs:
verbose = False
else:
verbose = kwargs['verbose']
n_samples, n_features = X.shape
# initialize D as identity matrix
D = np.identity(n_features)
I = np.identity(n_samples)
# build laplacian matrix
L = np.array(W.sum(1))[:, 0] - W
max_iter = 1000
obj = np.zeros(max_iter)
for iter_step in range(max_iter):
# update W
T = np.linalg.inv(np.dot(X.transpose(), X) + beta * D + 1e-6*np.eye(n_features))
W = np.dot(np.dot(T, X.transpose()), F)
# update D
temp = np.sqrt((W*W).sum(1))
temp[temp < 1e-16] = 1e-16
temp = 0.5 / temp
D = np.diag(temp)
# update M
M = L + alpha * (I - np.dot(np.dot(X, T), X.transpose()))
M = (M + M.transpose())/2
# update F
denominator = np.dot(M, F) + gamma*np.dot(np.dot(F, F.transpose()), F)
temp = np.divide(gamma*F, denominator)
F = F*np.array(temp)
temp = np.diag(np.sqrt(np.diag(1 / (np.dot(F.transpose(), F) + 1e-16))))
F = np.dot(F, temp)
# calculate objective function
obj[iter_step] = np.trace(np.dot(np.dot(F.transpose(), M), F)) + gamma/4*np.linalg.norm(np.dot(F.transpose(), F)-np.identity(n_clusters), 'fro')
if verbose:
print('obj at iter {0}: {1}'.format(iter_step+1, obj[iter_step]))
if iter_step >= 1 and math.fabs(obj[iter_step] - obj[iter_step-1]) < 1e-3:
break
return W
def kmeans_initialization(X, n_clusters):
"""
This function uses kmeans to initialize the pseudo label
Input
-----
X: {numpy array}, shape (n_samples, n_features)
input data
n_clusters: {int}
number of clusters
Output
------
Y: {numpy array}, shape (n_samples, n_clusters)
pseudo label matrix
"""
n_samples, n_features = X.shape
kmeans = sklearn.cluster.KMeans(n_clusters=n_clusters, init='k-means++', n_init=10, max_iter=300,
tol=0.0001, precompute_distances=True, verbose=0,
random_state=None, copy_x=True, n_jobs=1)
kmeans.fit(X)
labels = kmeans.labels_
Y = np.zeros((n_samples, n_clusters))
for row in range(0, n_samples):
Y[row, labels[row]] = 1
T = np.dot(Y.transpose(), Y)
F = np.dot(Y, np.sqrt(np.linalg.inv(T)))
F = F + 0.02*np.ones((n_samples, n_clusters))
return F
def calculate_obj(X, W, F, L, alpha, beta):
"""
This function calculates the objective function of NDFS
"""
# Tr(F^T L F)
T1 = np.trace(np.dot(np.dot(F.transpose(), L), F))
T2 = np.linalg.norm(np.dot(X, W) - F, 'fro')
T3 = (np.sqrt((W*W).sum(1))).sum()
obj = T1 + alpha*(T2 + beta*T3)
return obj | gpl-2.0 |
SISC2014/JobAnalysis | MongoRetrieval/src/EfficiencyHistogram.py | 1 | 6076 | '''
Created on Jun 19, 2014
@author: Erik Halperin
List of Keys
_id
JobStartDate
Requirements
TransferInput
TotalSuspensions
LastJobStatus
BufferBlockSize
OrigMaxHosts
RequestMemory
WantRemoteSyscalls
LastHoldReasonCode
ExitStatus
Args
JobFinishedHookDone
JobCurrentStartDate
CompletionDate
JobLeaseDuration
Err
RemoteWallClockTime
JobUniverse
RequestCpus
RemoveReason
StreamErr
Rank
WantRemoteIO
LocalSysCpu
UsedOCWrapper
CumulativeSlotTime
TransferIn
MachineAttrCpus0
CondorPlatform
CurrentTime
ExitReason
StreamOut
WantCheckpoint
GlobalJobId
TransferInputSizeMB
JobStatus
LastPublicClaimId
MemoryUsage
NumSystemHolds
TransferOutput
PeriodicRemove
NumShadowStarts
LastHoldReasonSubCode
LastSuspensionTime
ShouldTransferFiles
QDate
RemoteSysCpu
ImageSize_RAW
LastRemoteHost
CondorVersion
DiskUsage_RAW
PeriodicRelease
NumCkpts_RAW
JobCurrentStartExecutingDate
ProjectName
CoreSize
RemoteUserCpu
BytesSent
Owner
BytesRecvd
ExitCode
NumJobStarts
ExecutableSize_RAW
Notification
ExecutableSize
Environment
StartdPrincipal
RootDir
MinHosts
CumulativeSuspensionTime
JOBGLIDEIN_ResourceName
ProcId
MATCH_EXP_JOBGLIDEIN_ResourceName
OnExitRemove
User
UserLog
CommittedSuspensionTime
NumRestarts
JobCoreDumped
Cmd
NumJobMatches
DiskUsage
LastRemotePool
CommittedSlotTime
ResidentSetSize
WhenToTransferOutput
ExitBySignal
Out
RequestDisk
ImageSize
NumCkpts
LastJobLeaseRenewal
MachineAttrSlotWeight0
ResidentSetSize_RAW
JobPrio
JobRunCount
PeriodicHold
ClusterId
NiceUser
MyType
LocalUserCpu
BufferSize
LastHoldReason
CurrentHosts
LeaveJobInQueue
OnExitHold
EnteredCurrentStatus
MaxHosts
CommittedTime
LastMatchTime
In
JobNotification
'''
import re
import matplotlib.pyplot as plt
from pymongo import MongoClient
#takes a list of dictionaries and returns a list of floats
def parseList(l):
l = map(str, l)
newlist = []
for k in l:
newlist.append(re.sub('[RemoteWallClockTimeUsrpu_id\"\'{}: ]', '', k))
newlist = map(float, newlist)
return list(newlist)
#returns a list of dictionaries
#item is from list of keys, username: "example@login01.osgconnect.net", cluster: "123456", site: "phys.ucconn.edu",
#coll: MongoDB collection
#username/cluster/site may be None, in which case they will not be used
#item should be _id
def dbFindItemFromUser(item, username, cluster, site, coll):
mylist = []
rgx = "$regex"
if(username != None):
username = '\"' + username + '\"'
dicU = {'User': username }
else:
dicU = {}
if(cluster != None):
dicC = { 'ClusterId': cluster }
else:
dicC = {}
if(site != None):
dicS = { 'LastRemoteHost': { rgx: site } }
else:
dicS = {}
dicU.update(dicC)
dicU.update(dicS)
pr = { item: 1, '_id': 0 }
for condor_history in coll.find(dicU, pr):
mylist.append(condor_history)
return mylist
#returns a list of dictionaries
#username and coll are same as above
def dbFindIdFromUser(username, coll):
mylist = []
username = '\"' + username + '\"'
cr = { 'User': username }
pr = { '_id': 1 }
for condor_history in coll.find(cr, pr):
mylist.append(condor_history)
return mylist
#creates a scatterplot of two items
def plotScatter(item1, item2, username, cluster, coll, xlab, ylab, title):
lst1 = parseList(dbFindItemFromUser(item1, username, cluster, coll))
lst2 = parseList(dbFindItemFromUser(item2, username, cluster, coll))
plt.plot(lst1, lst2, 'bo')
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.title(title)
plt.show()
#creates a histogram of a list
#l: list to plot, bs: number of bins
def plotHist(l, bs, xlab, ylab, title):
plt.hist(l, bins=bs)
plt.title(title)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.show()
def getEfficiency(username, cluster, site, coll):
ruc = parseList(dbFindItemFromUser("RemoteUserCpu", username, cluster, site, coll))
rwct = parseList(dbFindItemFromUser("RemoteWallClockTime", username, cluster, site, coll))
efflist = []
totcount = 0
goodcount = 0 #certain efficiency values are >1 due to a condor error. these values are discarded
zerocount = 0 #testing possible condor bug where RemoteUserCpu is 0 but RemoteWallClockTime is quite large
for x,y in zip(ruc, rwct):
if(y == 0):
totcount += 1
elif(x/y > 1):
totcount += 1
else:
if(x == 0):
zerocount +=1
efflist.append(x/y)
totcount += 1
goodcount +=1
return [efflist, goodcount, totcount]
#Given at least one input for username/cluster/site, creates a histogram of the RemoteUserCpu/RemoteWallClockTime for the results
def efficiencyHistogram(username, cluster, site, coll, bins, xlab, ylab, title):
retlist = getEfficiency(username, cluster, site, coll) #0: efflist, 1: goodcount, 2: totcount
print("Jobs Plotted:", retlist[1], "/", retlist[2])
plotHist(retlist[0], bins, xlab, ylab, title)
def fourEffHists(lst1, lst2, lst3, lst4, lab1, lab2, lab3, lab4, bs, xlab, ylab, title):
plt.hist(lst1, bins=bs, histtype='stepfilled', label=lab1)
plt.hist(lst2, bins=bs, histtype='stepfilled', label=lab2)
plt.hist(lst3, bins=bs, histtype='stepfilled', label=lab3)
plt.hist(lst4, bins=bs, histtype='stepfilled', label=lab4)
plt.title(title)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.legend()
plt.show()
def mainEH(host, port):
client = MongoClient(host, port)
db = client.condor_history
coll = db.history_records
#sites: uc.mwt2.org, phys.uconn.edu, hpc.smu.edu, usatlas.bnl.gov
#names (@login01.osgconnect.net): lfzhao, sthapa, echism, wcatino, bamitchell
str_name = "bamitchell@login01.osgconnect.net"
efficiencyHistogram(str_name, None, None, coll, 75, "UserCPU/WallClockTime", "Frequency", "Efficiencies for " + str_name)
mainEH('mc.mwt2.org', 27017) | mit |
pmediano/ComputationalNeurodynamics | Fall2016/Exercise_1/Solutions/IzNeuronRK4.py | 1 | 1897 | """
Computational Neurodynamics
Exercise 1
Simulates Izhikevich's neuron model using the Runge-Kutta 4 method.
Parameters for regular spiking, fast spiking and bursting
neurons extracted from:
http://www.izhikevich.org/publications/spikes.htm
(C) Murray Shanahan et al, 2016
"""
import numpy as np
import matplotlib.pyplot as plt
# Create time points
Tmin = 0
Tmax = 200 # Simulation time
dt = 0.01 # Step size
T = np.arange(Tmin, Tmax+dt, dt)
# Base current
I = 10
## Parameters of Izhikevich's model (regular spiking)
a = 0.02
b = 0.2
c = -65
d = 8
## Parameters of Izhikevich's model (fast spiking)
# a = 0.02
# b = 0.25
# c = -65
# d = 2
## Parameters of Izhikevich's model (bursting)
# a = 0.02
# b = 0.2
# c = -50
# d = 2
## Make a state vector that has a (v, u) pair for each timestep
s = np.zeros((len(T), 2))
## Initial values
s[0, 0] = -65
s[0, 1] = -1
# Note that s1[0] is v, s1[1] is u. This is Izhikevich equation in vector form
def s_dt(s1, I):
v_dt = 0.04*(s1[0]**2) + 5*s1[0] + 140 - s1[1] + I
u_dt = a*(b*s1[0] - s1[1])
return np.array([v_dt, u_dt])
## SIMULATE
for t in range(len(T)-1):
# Calculate the four constants of Runge-Kutta method
k_1 = s_dt(s[t], I)
k_2 = s_dt(s[t] + 0.5*dt*k_1, I)
k_3 = s_dt(s[t] + 0.5*dt*k_2, I)
k_4 = s_dt(s[t] + dt*k_3, I)
s[t+1] = s[t] + (1.0/6)*dt*(k_1 + 2*k_2 + 2*k_3 + k_4)
# Reset the neuron if it has spiked
if s[t+1, 0] >= 30:
s[t, 0] = 30 # Add a Dirac pulse for visualisation
s[t+1, 0] = c # Reset to resting potential
s[t+1, 1] += d # Update recovery variable
v = s[:, 0]
u = s[:, 1]
## Plot the membrane potential
plt.subplot(211)
plt.plot(T, v)
plt.xlabel('Time (ms)')
plt.ylabel('Membrane potential v (mV)')
plt.title('Izhikevich Neuron')
# Plot the reset variable
plt.subplot(212)
plt.plot(T, u)
plt.xlabel('Time (ms)')
plt.ylabel('Reset variable u')
plt.show()
| gpl-3.0 |
ningchi/scikit-learn | sklearn/mixture/tests/test_gmm.py | 7 | 17493 | import unittest
import copy
import sys
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
print(x)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
n_features, n_samples = 2, 10
rng = np.random.RandomState(0)
X = rng.randint(10) * rng.rand(n_samples, n_features)
mu = np.mean(X, 0)
cv = np.array([[[-1, 0], [0, 1]]])
expected_message = "'covars' must be symmetric, positive-definite"
assert_raise_message(ValueError, expected_message,
mixture.log_multivariate_normal_density,
X, mu, cv, 'full')
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
# X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
# Test that the right number of parameters is estimated
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
model2 = copy.deepcopy(model)
predictions_1 = model.fit(X).predict(X)
predictions_2 = model2.fit_predict(X)
assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
def test_fit_predict():
"""
test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
"""
lrng = np.random.RandomState(101)
n_samples, n_dim, n_comps = 100, 2, 2
mu = np.array([[8, 8]])
component_0 = lrng.randn(n_samples, n_dim)
component_1 = lrng.randn(n_samples, n_dim) + mu
X = np.vstack((component_0, component_1))
for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
model = m_constructor(n_components=n_comps, covariance_type='full',
min_covar=1e-7, n_iter=5,
random_state=np.random.RandomState(0))
assert_fit_predict_correct(model, X)
model = mixture.GMM(n_components=n_comps, n_iter=0)
z = model.fit_predict(X)
assert np.all(z == 0), "Quick Initialization Failed!"
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
def test_verbose_first_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
gems-uff/noworkflow | capture/noworkflow/resources/demo/annual_precipitation/step4/precipitation.py | 4 | 2619 | #!/usr/bin/python2
import csv
import numpy as np
import matplotlib.pyplot as plt
import time
from itertools import chain
from collections import defaultdict
def read(filename):
result = defaultdict(list)
with open(filename, "r") as c:
reader = csv.reader(c, delimiter=";")
for row in reader:
month = int(row[1].split("/")[1])
precipitation = float(row[3])
result[month].append(precipitation)
return result
def write(filename, data, year):
with open(filename, "w") as c:
writer = csv.writer(c, delimiter=";")
for month in sorted(data.keys()):
for day, value in enumerate(data[month]):
writer.writerow([
83743, "{:02}/{:02}/{}".format(day + 1, month, year),
1200, value])
def remove_outliers(data, thresh=2.5):
full_data = np.asarray(tuple(chain.from_iterable(data[i]
for i in sorted(data.keys()))))
non_zeros = full_data != 0
median = np.median(full_data[non_zeros])
result = {}
for month in data:
values = np.asarray(data[month])[:, None]
diff = np.sum((values - median)**2, axis=-1)
diff = np.sqrt(diff)
med_abs_deviation = np.median(diff)
modified_z_score = 0.6745 * diff / med_abs_deviation
outliers = modified_z_score > thresh
non_outliers = modified_z_score <= thresh
new_data = np.zeros(len(values))
new_data[non_outliers] = np.transpose(values[non_outliers])[0]
new_data[outliers] = median
result[month] = new_data.tolist()
return result
def prepare(series, months, names, div=.1, colors=["b", "g", "r"]):
fig, ax = plt.subplots()
ax.set_ylabel("Precipitation (mm)")
ax.set_xlabel("Month")
ax.set_title("Precipitation by Month")
ax.set_xticks(months + .5)
ax.set_xticklabels(list(map(str, months)))
ax.set_ylim([0, 400])
half_div = div / 2.0
width = (1.0 - div) / len(series)
bars = []
for i, data in enumerate(series):
bars.append(ax.bar(months + half_div + i * width, data, width,
color=colors[i]))
ax.legend(bars, names)
def create_bargraph(output, months, years, *prec):
prepare(prec, months, years)
plt.savefig(output)
def sum_by_month(data, months):
time.sleep(1)
return [sum(data[i]) for i in months]
__VERSION__ = "1.1.0"
if __name__ == "__main__":
import sys
filename = sys.argv[1]
month = sys.argv[2]
data = read(filename)
print(";".join(map(str, data[int(month)])))
| mit |
yanchen036/tensorflow | tensorflow/contrib/learn/python/learn/estimators/multioutput_test.py | 136 | 1696 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-output tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.estimators._sklearn import mean_squared_error
from tensorflow.python.platform import test
class MultiOutputTest(test.TestCase):
"""Multi-output tests."""
def testMultiRegression(self):
random.seed(42)
rng = np.random.RandomState(1)
x = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(x).ravel(), np.pi * np.cos(x).ravel()]).T
regressor = learn.LinearRegressor(
feature_columns=learn.infer_real_valued_columns_from_input(x),
label_dimension=2)
regressor.fit(x, y, steps=100)
score = mean_squared_error(np.array(list(regressor.predict_scores(x))), y)
self.assertLess(score, 10, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
sk413025/tilitools | latentsvdd.py | 1 | 3222 | from cvxopt import matrix,spmatrix,sparse,uniform,normal,setseed
from cvxopt.blas import dot,dotu
from cvxopt.solvers import qp
from cvxopt.lapack import syev
import numpy as np
import math as math
from kernel import Kernel
from svdd import SVDD
from ocsvm import OCSVM
import pylab as pl
import matplotlib.pyplot as plt
class LatentSVDD:
""" Latent variable support vector data description.
Written by Nico Goernitz, TU Berlin, 2014
For more information see:
'Learning and Evaluation with non-i.i.d Label Noise'
Goernitz et al., AISTATS & JMLR W&CP, 2014
"""
PRECISION = 10**-3 # important: effects the threshold, support vectors and speed!
C = 1.0 # (scalar) the regularization constant > 0
sobj = [] # structured object contains various functions
# i.e. get_num_dims(), get_num_samples(), get_sample(i), argmin(sol,i)
sol = [] # (vector) solution vector (after training, of course)
def __init__(self, sobj, C=1.0):
self.C = C
self.sobj = sobj
def train_dc(self, max_iter=50):
""" Solve the LatentSVDD optimization problem with a
sequential convex programming/DC-programming
approach:
Iteratively, find the most likely configuration of
the latent variables and then, optimize for the
model parameter using fixed latent states.
"""
N = self.sobj.get_num_samples()
DIMS = self.sobj.get_num_dims()
# intermediate solutions
# latent variables
latent = [0]*N
sol = 10.0*normal(DIMS,1)
psi = matrix(0.0, (DIMS,N)) # (dim x exm)
old_psi = matrix(0.0, (DIMS,N)) # (dim x exm)
threshold = 0
obj = -1
iter = 0
# terminate if objective function value doesn't change much
while iter<max_iter and (iter<2 or sum(sum(abs(np.array(psi-old_psi))))>=0.001):
print('Starting iteration {0}.'.format(iter))
print(sum(sum(abs(np.array(psi-old_psi)))))
iter += 1
old_psi = matrix(psi)
# 1. linearize
# for the current solution compute the
# most likely latent variable configuration
for i in range(N):
# min_z ||sol - Psi(x,z)||^2 = ||sol||^2 + min_z -2<sol,Psi(x,z)> + ||Psi(x,z)||^2
# Hence => ||sol||^2 - max_z 2<sol,Psi(x,z)> - ||Psi(x,z)||^2
(foo, latent[i], psi[:,i]) = self.sobj.argmax(sol, i, opt_type='quadratic')
# 2. solve the intermediate convex optimization problem
kernel = Kernel.get_kernel(psi,psi)
svdd = SVDD(kernel, self.C)
svdd.train_dual()
threshold = svdd.get_threshold()
inds = svdd.get_support_dual()
alphas = svdd.get_support_dual_values()
sol = psi[:,inds]*alphas
self.sol = sol
self.latent = latent
return (sol, latent, threshold)
def apply(self, pred_sobj):
""" Application of the LatentSVDD:
anomaly_score = min_z ||c*-\Psi(x,z)||^2
latent_state = argmin_z ||c*-\Psi(x,z)||^2
"""
N = pred_sobj.get_num_samples()
norm2 = self.sol.trans()*self.sol
vals = matrix(0.0, (1,N))
lats = matrix(0.0, (1,N))
for i in range(N):
# min_z ||sol - Psi(x,z)||^2 = ||sol||^2 + min_z -2<sol,Psi(x,z)> + ||Psi(x,z)||^2
# Hence => ||sol||^2 - max_z 2<sol,Psi(x,z)> - ||Psi(x,z)||^2
(max_obj, lats[i], foo) = pred_sobj.argmax(self.sol, i, opt_type='quadratic')
vals[i] = norm2 - max_obj
return (vals, lats)
| mit |
xfaxca/pymlkit | pymlkit/models/regressors.py | 1 | 4199 | """
Module for custom regression model classes.
"""
from sklearn.base import BaseEstimator, RegressorMixin
"""
Rolling todo:
1. For AvgReg: Modify how parameters are used. Put them all into a dict. Also change X_train, y_train to just X,y
"""
class AveragingRegressor(BaseEstimator, RegressorMixin):
"""
Summary: A Meta-regressor that averages all predictions of it's consituent regressors. Analogous to
a majority vote classifer, but for regressoion
Attributes:
-------------
- regs: Base/Constituent regressors from which the average predictions are calculated
- reg_names: Names of the constituent regressors
- params: Optionally user-supplied initialization parameters for the
- base_predictions: Predictions of the constituent classifiers. This attribute is None until the predict method
is called
- avg_predictions: Average predictions calculated from the predictions of the constituent regressors.
"""
def __init__(self, regressors=None, regressor_names=None, init_parameters=None, verbose=0):
"""
Initialization
:param regressors: (obj list) - Constituent regressors of AveragingRegressor
:param regressor_names: (str list) - Names of the constituent regressors
:param init_parameters: (dict list) - initialization parameters for the corresponding regressors. These
must be passed as a list of dictionaries s.t. the parameters in each index are the corresponding
paramters for the regressor at the same index in the 'regressors' parameter. Can provide a partial
list, containing parameter dictionaries only for the first few regressors.
"""
self.params = {'regressors': regressors,
'regressor_names': regressor_names,
'init_parameters': init_parameters,
'verbose': verbose}
self.regs = regressors
self.reg_names = regressor_names
self.reg_params = init_parameters
self.verbose = verbose
self.base_predictions = None
self.avg_predictions = None
super().__init__()
super().set_params(**self.params)
# Return error if no constituent regressors are supplied
if regressors is None:
raise TypeError("Parameter 'regressors' should be a list of estimators with base scikit-learn regressor"
" methods.")
# Initialize constituent regressors with custom parameters if they are provided
if init_parameters is not None:
for i in range(len(self.reg_params)):
self.regs[i] = self.regs[i](**self.reg_params[i])
def fit(self, X_train, y_train=None):
"""
Method to fit all Regressors
:param X_train: (pandas df) - Training features
:param y_train: (pandas series) - Training target variable
:return: None
"""
print('=> Fitting AveragingRegressor:')
for i in range(len(self.regs)):
if self.verbose > 0:
print('==> Fitting %s' % self.reg_names[i])
self.regs[i].fit(X_train, y_train)
def predict(self, X_test):
"""
Method to predict target variable values. Final results are the average of all predictions
:param X_test: (pandas df) - Test features
:return: self.avg_predictions: (np.array) Average target variable predictions
"""
predictions = {}
average_predictions = np.zeros(shape=(len(X_test)), dtype=np.float64)
if len(self.reg_names) == len(self.regs):
add_names = True
for i in range(len(self.regs)):
y_pred = self.regs[i].predict(X_test)
average_predictions += y_pred
name = self.reg_names[i] if add_names else ('Regressor%i' % i)
predictions.setdefault(name, y_pred)
average_predictions /= float(len(self.regs))
predictions.setdefault('Average', average_predictions)
self.base_predictions = predictions
self.avg_predictions = average_predictions
return self.avg_predictions | gpl-3.0 |
JungeAlexander/cocoscore | setup.py | 1 | 2522 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import io
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
with io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
) as fh:
return fh.read()
setup(
name='cocoscore',
version='1.0.0',
license='MIT license',
description='CoCoScore: context-aware co-occurrence scores for text mining applications',
long_description='%s\n%s' % (
re.compile('^.. start-badges.*^.. end-badges', re.M | re.S).sub('', read('README.rst')),
re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))
),
author='Alexander Junge',
author_email='alexander.junge@posteo.net',
url='https://github.com/JungeAlexander/cocoscore',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
# uncomment if you test on these interpreters:
# 'Programming Language :: Python :: Implementation :: IronPython',
# 'Programming Language :: Python :: Implementation :: Jython',
# 'Programming Language :: Python :: Implementation :: Stackless',
'Topic :: Utilities',
],
keywords=[
# eg: 'keyword1', 'keyword2', 'keyword3',
],
install_requires=[
# eg: 'aspectlib==1.1.1', 'six>=1.7',
'pandas>=0.23.3',
'scikit-learn>=0.20.1',
],
extras_require={
# eg:
# 'rst': ['docutils>=0.11'],
# ':python_version=="2.6"': ['argparse'],
},
)
| mit |
hlin117/scikit-learn | examples/ensemble/plot_forest_iris.py | 18 | 6190 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
plt.scatter(X[:, 0], X[:, 1], c=y,
cmap=ListedColormap(['r', 'y', 'b']))
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
mediaProduct2017/learn_NeuralNet | neural_network_design.py | 1 | 1568 | """
In order to decide how many hidden nodes the hidden layer should have,
split up the data set into training and testing data and create networks
with various hidden node counts (5, 10, 15, ... 45), testing the performance
for each.
The best-performing node count is used in the actual system. If multiple counts
perform similarly, choose the smallest count for a smaller network with fewer computations.
"""
import numpy as np
from ocr import OCRNeuralNetwork
from sklearn.cross_validation import train_test_split
def test(data_matrix, data_labels, test_indices, nn):
avg_sum = 0
for j in xrange(100):
correct_guess_count = 0
for i in test_indices:
test = data_matrix[i]
prediction = nn.predict(test)
if data_labels[i] == prediction:
correct_guess_count += 1
avg_sum += (correct_guess_count / float(len(test_indices)))
return avg_sum / 100
# Load data samples and labels into matrix
data_matrix = np.loadtxt(open('data.csv', 'rb'), delimiter = ',').tolist()
data_labels = np.loadtxt(open('dataLabels.csv', 'rb')).tolist()
# Create training and testing sets.
train_indices, test_indices = train_test_split(list(range(5000)))
print "PERFORMANCE"
print "-----------"
# Try various number of hidden nodes and see what performs best
for i in xrange(5, 50, 5):
nn = OCRNeuralNetwork(i, data_matrix, data_labels, train_indices, False)
performance = str(test(data_matrix, data_labels, test_indices, nn))
print "{i} Hidden Nodes: {val}".format(i=i, val=performance) | mit |
bioinformatics-IBCH/logloss-beraf | logloss_beraf/model_ops/trainer.py | 1 | 12714 | # coding=utf-8
import copy
import logging
import os
# https://github.com/matplotlib/matplotlib/issues/3466/#issuecomment-195899517
import itertools
import matplotlib
matplotlib.use('agg')
import numpy as np
import pandas
from sklearn import (
preprocessing,
model_selection,
)
from sklearn.cross_validation import (
LeaveOneOut,
StratifiedKFold,
)
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import RandomizedLogisticRegression
import cPickle as pickle
from utils.constants import (
PREFILTER_PCA_PLOT_NAME,
POSTFILTER_PCA_PLOT_NAME,
FEATURE_IMPORTANCE_PLOT_NAME,
FEATURE_COLUMN,
FEATURE_IMPORTANCE_COLUMN,
TRAINED_MODEL_NAME,
)
from visualization.plotting import plot_pca_by_annotation
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
from settings import logger
class LLBModelTrainer(object):
"""
Class implementing main steps of the algorithm:
1. Initial regions filtering with a user-specified delta beta-values threshold
2. Applying randomized logistic regression in order to additionally pre-filter input regions
3. Extracting highly correlated sites
4. Reconstructing logloss function on the interval of user specified limit of number of sites
5. Detecting optimal panel of regions and training final model
Also does some visualizations
"""
def __init__(self, threads=0, max_num_of_features=20,
cv_method="SKFold", class_weights="balanced", final_clf_estimators_num=3000,
intermediate_clf_estimators_num=1000, logloss_estimates=50, min_beta_threshold=0.2,
rr_iterations=5000, correlation_threshold=0.85, output_folder=None):
"""
:param threads:
:type threads: int
:param max_num_of_features: maximum number of features a model can contain
:type max_num_of_features: int
:param cv_method: Supported cross-validation methods: "LOO", "SKFold"
:type cv_method: str
:param class_weights: Class balancing strategy
:type class_weights: dict, str
:param final_clf_estimators_num: number of estimators used in a final classifier
:type final_clf_estimators_num: int
:param intermediate_clf_estimators_num: number of estimators used in intermediate classifiers
:type intermediate_clf_estimators_num: int
:param logloss_estimates: Number of LogLoss estimates on number of sites limited interval
:type logloss_estimates: int
:param min_beta_threshold: Minimum beta-values difference threshold
:type min_beta_threshold: float
:param rr_iterations: Number of randomized regression iterations
"""
self.threads = threads
self.max_num_of_features = max_num_of_features
self.min_beta_threshold = min_beta_threshold
# train process configuration
self.cv_method = cv_method
self.class_weights = class_weights
self.final_clf_estimators_num = final_clf_estimators_num
self.intermediate_clf_estimators_num = intermediate_clf_estimators_num
self.rr_iterations = rr_iterations
self.logloss_estimates = logloss_estimates
# common
self.correlation_threshold = correlation_threshold
self.output_folder = output_folder if output_folder is not None else "results"
if not os.path.exists(self.output_folder):
os.makedirs(self.output_folder)
def _run_randomized_regression(self, feature_df, annotation, clinical_column, sample_fraction=0.7):
annotation = copy.deepcopy(annotation)
# Encode labels of the classes
le = preprocessing.LabelEncoder()
annotation[clinical_column] = le.fit_transform(annotation[clinical_column])
clf = RandomizedLogisticRegression(
n_resampling=self.rr_iterations,
sample_fraction=sample_fraction,
n_jobs=1,
verbose=1,
).fit(feature_df, annotation[clinical_column])
selected_features = feature_df.T[clf.scores_ != 0].index
logger.info("Number of selected features: %d", len(selected_features))
return selected_features, clf
def _train_clf(self, X, y, n_estimators=10):
clf = RandomForestClassifier(n_estimators, n_jobs=self.threads, class_weight=self.class_weights)
scores = scores_accuracy = np.array([0])
cv_algo = None
if self.cv_method is not None:
if self.cv_method == "LOO":
cv_algo = LeaveOneOut(len(y))
elif self.cv_method == "SKFold":
cv_algo = StratifiedKFold(y)
logger.info("Running cross-validation...")
scores = model_selection.cross_val_score(
clf,
X,
y,
cv=cv_algo,
scoring='neg_log_loss',
n_jobs=self.threads,
verbose=1,
)
clf.fit(X, y)
return clf, scores.mean(), scores.std()
def _describe_and_filter_regions(self, basic_region_df, annotation, clinical_column, sample_name_column):
logger.info("Initial number of regions: {0}".format(basic_region_df.shape))
# Initial filtering based on min_beta_threshold
class_combinations = itertools.combinations(annotation[clinical_column].unique(), 2)
for combination in class_combinations:
first_class_samples = annotation[annotation[clinical_column] == combination[0]][sample_name_column]
second_class_samples = annotation[annotation[clinical_column] == combination[1]][sample_name_column]
mean_difference = (basic_region_df.loc[first_class_samples].mean()
- basic_region_df.loc[second_class_samples].mean())
basic_region_df = basic_region_df[mean_difference[abs(mean_difference) > self.min_beta_threshold].index.tolist()]
basic_region_df = basic_region_df.dropna(how="any", axis=1)
logger.info("Number of features after initial filtration: {0}".format(basic_region_df.shape))
plot_pca_by_annotation(
basic_region_df,
annotation,
clinical_column,
sample_name_column,
outfile=os.path.join(self.output_folder, PREFILTER_PCA_PLOT_NAME),
)
logger.info("Starting feature selection with RLR...")
selected_features, model = self._run_randomized_regression(
basic_region_df,
annotation,
clinical_column,
)
plot_pca_by_annotation(
basic_region_df[selected_features],
annotation,
clinical_column,
sample_name_column,
outfile=os.path.join(self.output_folder, POSTFILTER_PCA_PLOT_NAME),
)
return selected_features, model
def plot_fi_distribution(self, feature_importances):
ax = feature_importances[FEATURE_IMPORTANCE_COLUMN].hist()
ax.set_xlabel("Feature Importance")
ax.set_ylabel("Number of features")
fig = ax.get_figure()
fig.savefig(os.path.join(self.output_folder, FEATURE_IMPORTANCE_PLOT_NAME))
def _apply_feature_imp_thresh(self, features, feature_imp, thresh):
return [
feature[0] for feature in
zip(features.values, feature_imp)
if feature[1] > thresh
]
def get_threshold(self, logloss_df):
# Standard error
ll_se = logloss_df["mean"].std() / np.sqrt(len(logloss_df["mean"]))
# Restricting search to desired number of features.
logloss_df = logloss_df[logloss_df["len"] <= int(self.max_num_of_features)]
ll_max = logloss_df[logloss_df["mean"] == logloss_df["mean"].max()].iloc[0]
ll_interval = logloss_df[logloss_df["mean"] > (ll_max["mean"] - 0.5 * ll_se)]
res = ll_interval[ll_interval["len"] == ll_interval["len"].min()].iloc[0]
return res
def train(self, train_regions, anndf, sample_class_column, sample_name_column):
"""
Main functionality
:param train_regions: input dataframe with all regions methylation
:type train_regions: pandas.DataFrame
:param anndf: annotation dataframe, containing at least sample name and sample class
:type anndf: pandas.DataFrame
:param sample_class_column: name of the sample class column
:type sample_class_column: str
:param sample_name_column: name of the sample name column
:type sample_name_column: str
:return:
"""
# train_regions = train_regions.T
# First sort both train_regions and annotation according to sample names
train_regions = train_regions.sort_index(ascending=True)
# Ensure annotation contains only samples from the train_regions
anndf = anndf[anndf[sample_name_column].isin(train_regions.index.tolist())].sort_values(
by=[sample_name_column],
ascending=True
).dropna(subset=[sample_name_column])
train_regions = train_regions.ix[anndf[sample_name_column].tolist()]
assert anndf[sample_name_column].tolist() == train_regions.index.tolist(), \
"Samples in the annotations table are diferrent from those in feature table"
# Prefilter regions
selected_regions, clf = self._describe_and_filter_regions(
train_regions,
anndf,
sample_class_column,
sample_name_column,
)
# Estimate feature importances (FI)
first_clf, mean, std = self._train_clf(
train_regions[selected_regions.values],
anndf[sample_class_column],
n_estimators=self.final_clf_estimators_num,
)
feature_importances = pandas.DataFrame.from_records(
zip(selected_regions.values, first_clf.feature_importances_),
columns=[FEATURE_COLUMN, FEATURE_IMPORTANCE_COLUMN],
)
# Visualizing feature importance distribution
self.plot_fi_distribution(feature_importances)
# Extracting correlated site
feature_importances = feature_importances[
abs(feature_importances[FEATURE_IMPORTANCE_COLUMN]) > 0
]
corr_matrix = train_regions[feature_importances[FEATURE_COLUMN]].corr().applymap(
lambda x: 1 if abs(x) >= self.correlation_threshold else 0
)
logloss_df_cols = ["thresh", "mean", "std", "len"]
logloss_di = pandas.DataFrame(columns=logloss_df_cols)
for thresh in np.arange(
feature_importances[FEATURE_IMPORTANCE_COLUMN].quantile(0.99),
feature_importances[FEATURE_IMPORTANCE_COLUMN].max(),
(
feature_importances[FEATURE_IMPORTANCE_COLUMN].max() -
feature_importances[FEATURE_IMPORTANCE_COLUMN].min()
) / self.logloss_estimates
):
selected_features = self._apply_feature_imp_thresh(selected_regions, first_clf.feature_importances_, thresh)
if len(selected_features) < 2:
continue
logger.info(
"Estimating %d features on feature importance threshold %f",
len(selected_features),
thresh
)
clf, mean, std = self._train_clf(
train_regions[selected_features],
anndf[sample_class_column],
n_estimators=self.intermediate_clf_estimators_num,
)
logloss_di = logloss_di.append(
pandas.Series([thresh, mean, std, len(selected_features)], index=logloss_df_cols),
ignore_index=True,
)
logger.info("LogLoss mean=%f, std=%f on threshold %f", mean, std, thresh)
logger.info("Detecting optimal feature subset...")
thresh = self.get_threshold(logloss_di)
logger.info("Selected threshold")
logger.info(thresh)
selected_features = self._apply_feature_imp_thresh(
selected_regions,
first_clf.feature_importances_,
thresh["thresh"],
)
logger.info("Trainig final model...")
clf, mean, std = self._train_clf(
train_regions[selected_features],
anndf[sample_class_column],
n_estimators=self.final_clf_estimators_num,
)
logger.info("Selected features: {0}".format(selected_features))
pickle.dump((clf, selected_features), open(os.path.join(self.output_folder, TRAINED_MODEL_NAME), 'w'))
return selected_features, clf, mean, std
| gpl-3.0 |
tomlof/scikit-learn | sklearn/utils/tests/test_linear_assignment.py | 421 | 1349 | # Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
| bsd-3-clause |
thomasgibson/tabula-rasa | verification/LDGH/LDGH.py | 1 | 14704 | """
This module runs a convergence history for a hybridized-DG
discretization of a model elliptic problem (detailed in the main
function). The method used is the LDG-H method.
"""
from firedrake import *
from firedrake.petsc import PETSc
from firedrake import COMM_WORLD
import numpy as np
import pandas as pd
def run_LDG_H_problem(r, degree, tau_order, write=False):
"""
Solves the Dirichlet problem for the elliptic equation:
-div(grad(u)) = f in [0, 1]^2, u = g on the domain boundary.
The source function f and g are chosen such that the analytic
solution is:
u(x, y) = sin(x*pi)*sin(y*pi).
This problem was crafted so that we can test the theoretical
convergence rates for the hybridized DG method: LDG-H. This
is accomplished by introducing the numerical fluxes:
u_hat = lambda,
q_hat = q + tau*(u - u_hat).
The Slate DLS in Firedrake is used to perform the static condensation
of the full LDG-H formulation of the Poisson problem to a single
system for the trace u_hat (lambda) on the mesh skeleton:
S * Lambda = E.
The resulting linear system is solved via a direct method (LU) to
ensure an accurate approximation to the trace variable. Once
the trace is solved, the Slate DSL is used again to solve the
elemental systems for the scalar solution u and its flux q.
Post-processing of the scalar variable, as well as its flux, is
performed using Slate to form and solve the elemental-systems for
new approximations u*, q*. Depending on the choice of tau, these
new solutions have superconvergent properties.
The post-processed scalar u* superconverges at a rate of k+2 when
two conditions are satisfied:
(1) q converges at a rate of k+1, and
(2) the cell average of u, ubar, superconverges at a rate of k+2.
The choice of tau heavily influences these two conditions. For all
tau > 0, the post-processed flux q* has enhanced convervation
properties! The new solution q* has the following three properties:
(1) q* converges at the same rate as q. However,
(2) q* is in H(Div), meaning that the interior jump of q* is zero!
And lastly,
(3) div(q - q*) converges at a rate of k+1.
The expected (theoretical) rates for the LDG-H method are
summarized below for various orders of tau:
-----------------------------------------------------------------
u q ubar u* q* div(p*)
-----------------------------------------------------------------
tau = O(1) (k>0) k+1 k+1 k+2 k+2 k+1 k+1
tau = O(h) (k>0) k k+1 k+2 k+2 k+1 k+1
tau = O(1/h) (k>0) k+1 k k+1 k+1 k k+1
-----------------------------------------------------------------
Note that the post-processing used for the flux q only holds for
simplices (triangles and tetrahedra). If someone knows of a local
post-processing method valid for quadrilaterals, please contact me!
For these numerical results, we chose the following values of tau:
tau = O(1) -> tau = 1,
tau = O(h) -> tau = h,
tau = O(1/h) -> tau = 1/h,
where h here denotes the facet area.
This demo was written by: Thomas H. Gibson (t.gibson15@imperial.ac.uk)
"""
if tau_order is None or tau_order not in ("1", "1/h", "h"):
raise ValueError(
"Must specify tau to be of order '1', '1/h', or 'h'"
)
assert degree > 0, "Provide a degree >= 1"
# Set up problem domain
mesh = UnitSquareMesh(2**r, 2**r, quadrilateral=False)
x = SpatialCoordinate(mesh)
n = FacetNormal(mesh)
# Set up function spaces
U = VectorFunctionSpace(mesh, "DG", degree)
V = FunctionSpace(mesh, "DG", degree)
T = FunctionSpace(mesh, "HDiv Trace", degree)
# Mixed space and test/trial functions
W = U * V * T
s = Function(W, name="solutions").assign(0.0)
q, u, uhat = split(s)
v, w, mu = TestFunctions(W)
# Analytical solutions for u and q
V_a = FunctionSpace(mesh, "DG", degree + 3)
U_a = VectorFunctionSpace(mesh, "DG", degree + 3)
u_a = Function(V_a, name="Analytic Scalar")
a_scalar = sin(pi*x[0])*sin(pi*x[1])
u_a.interpolate(a_scalar)
q_a = Function(U_a, name="Analytic Flux")
a_flux = -grad(a_scalar)
q_a.project(a_flux)
Vh = FunctionSpace(mesh, "DG", degree + 3)
f = Function(Vh).interpolate(-div(grad(a_scalar)))
# Determine stability parameter tau
if tau_order == "1":
tau = Constant(1)
elif tau_order == "1/h":
tau = 1/FacetArea(mesh)
elif tau_order == "h":
tau = FacetArea(mesh)
else:
raise ValueError("Invalid choice of tau")
# Numerical flux
qhat = q + tau*(u - uhat)*n
# Formulate the LDG-H method in UFL
a = ((dot(v, q) - div(v)*u)*dx
+ uhat('+')*jump(v, n=n)*dS
+ uhat*dot(v, n)*ds
- dot(grad(w), q)*dx
+ jump(qhat, n=n)*w('+')*dS
+ dot(qhat, n)*w*ds
# Transmission condition
+ mu('+')*jump(qhat, n=n)*dS)
L = w*f*dx
F = a - L
PETSc.Sys.Print("Solving using static condensation.\n")
params = {'snes_type': 'ksponly',
'mat_type': 'matfree',
'pmat_type': 'matfree',
'ksp_type': 'preonly',
'pc_type': 'python',
# Use the static condensation PC for hybridized problems
# and use a direct solve on the reduced system for u_hat
'pc_python_type': 'firedrake.SCPC',
'pc_sc_eliminate_fields': '0, 1',
'condensed_field': {'ksp_type': 'preonly',
'pc_type': 'lu',
'pc_factor_mat_solver_type': 'mumps'}}
bcs = DirichletBC(W.sub(2), Constant(0.0), "on_boundary")
problem = NonlinearVariationalProblem(F, s, bcs=bcs)
solver = NonlinearVariationalSolver(problem, solver_parameters=params)
solver.solve()
PETSc.Sys.Print("Solver finished.\n")
# Computed flux, scalar, and trace
q_h, u_h, uhat_h = s.split()
# Now we compute the various metrics. First we
# simply compute the L2 error between the analytic
# solutions and the computed ones.
scalar_error = errornorm(a_scalar, u_h, norm_type="L2")
flux_error = errornorm(a_flux, q_h, norm_type="L2")
# We keep track of all metrics using a Python dictionary
error_dictionary = {"scalar_error": scalar_error,
"flux_error": flux_error}
# Now we use Slate to perform element-wise post-processing
# Scalar post-processing:
# This gives an approximation in DG(k+1) via solving for
# the solution of the local Neumman data problem:
#
# (grad(u), grad(w))*dx = -(q_h, grad(w))*dx
# m(u) = m(u_h) for all elements K, where
#
# m(v) := measure(K)^-1 * int_K v dx.
# NOTE: It is currently not possible to correctly formulate this
# in UFL. However, we can introduce a Lagrange multiplier and
# transform the local problem above into a local mixed system:
#
# find (u, psi) in DG(k+1) * DG(0) such that:
#
# (grad(u), grad(w))*dx + (psi, grad(w))*dx = -(q_h, grad(w))*dx
# (u, phi)*dx = (u_h, phi)*dx,
#
# for all w, phi in DG(k+1) * DG(0).
DGk1 = FunctionSpace(mesh, "DG", degree + 1)
DG0 = FunctionSpace(mesh, "DG", 0)
Wpp = DGk1 * DG0
up, psi = TrialFunctions(Wpp)
wp, phi = TestFunctions(Wpp)
# Create mixed tensors:
K = Tensor((inner(grad(up), grad(wp)) +
inner(psi, wp) +
inner(up, phi))*dx)
F = Tensor((-inner(q_h, grad(wp)) +
inner(u_h, phi))*dx)
E = K.inv * F
PETSc.Sys.Print("Local post-processing of the scalar variable.\n")
u_pp = Function(DGk1, name="Post-processed scalar")
assemble(E.blocks[0], tensor=u_pp)
# Now we compute the error in the post-processed solution
# and update our error dictionary
scalar_pp_error = errornorm(a_scalar, u_pp, norm_type="L2")
error_dictionary.update({"scalar_pp_error": scalar_pp_error})
# Post processing of the flux:
# This is a modification of the local Raviart-Thomas projector.
# We solve the local problem: find 'q_pp' in RT(k+1)(K) such that
#
# (q_pp, v)*dx = (q_h, v)*dx,
# (q_pp.n, gamma)*dS = (qhat.n, gamma)*dS
#
# for all v, gamma in DG(k-1) * DG(k)|_{trace}. The post-processed
# solution q_pp converges at the same rate as q_h, but is HDiv
# conforming. For all LDG-H methods,
# div(q_pp) converges at the rate k+1. This is a way to obtain a
# flux with better conservation properties. For tau of order 1/h,
# div(q_pp) converges faster than q_h.
qhat_h = q_h + tau*(u_h - uhat_h)*n
local_RT = FiniteElement("RT", triangle, degree + 1)
RTd = FunctionSpace(mesh, BrokenElement(local_RT))
DGkn1 = VectorFunctionSpace(mesh, "DG", degree - 1)
# Use the trace space already defined
Npp = DGkn1 * T
n_p = TrialFunction(RTd)
vp, mu = TestFunctions(Npp)
# Assemble the local system and invert using Slate
A = Tensor(inner(n_p, vp)*dx +
jump(n_p, n=n)*mu*dS + dot(n_p, n)*mu*ds)
B = Tensor(inner(q_h, vp)*dx +
jump(qhat_h, n=n)*mu*dS + dot(qhat_h, n)*mu*ds)
PETSc.Sys.Print("Local post-processing of the flux.\n")
q_pp = assemble(A.inv * B)
# And check the error in our new flux
flux_pp_error = errornorm(a_flux, q_pp, norm_type="L2")
# To verify our new flux is HDiv conforming, we also
# evaluate its jump over mesh interiors. This should be
# approximately zero if everything worked correctly.
flux_pp_jump = assemble(jump(q_pp, n=n)*dS)
error_dictionary.update({"flux_pp_error": flux_pp_error})
error_dictionary.update({"flux_pp_jump": np.abs(flux_pp_jump)})
PETSc.Sys.Print("Post-processing finished.\n")
PETSc.Sys.Print("Finished test case for h=1/2^%d.\n" % r)
# If write specified, then write output
if write:
if tau_order == "1/h":
o = "hneg1"
else:
o = tau_order
File("results/LDGH_tauO%s_deg%d.pvd" %
(o, degree)).write(q_a, u_a, q_h, u_h, u_pp)
# Return all error metrics
return error_dictionary, mesh
def compute_conv_rates(u):
"""Computes the convergence rate for this particular test case
:arg u: a list of errors.
Returns a list of convergence rates. Note the first element of
the list will be empty, as there is no previous computation to
compare with. '---' will be inserted into the first component.
"""
u_array = np.array(u)
rates = list(np.log2(u_array[:-1] / u_array[1:]))
rates.insert(0, '---')
return rates
def run_single_test(r, degree, tau_order, write=False):
# Run a quick test given a degree, tau order, and resolution
resolution_param = r
PETSc.Sys.Print("Running LDG-H method (triangles) of degree %d with tau=O('%s') "
"and mesh parameter h=1/2^%d." %
(degree, tau_order, resolution_param))
error_dict, _ = run_LDG_H_problem(r=resolution_param,
degree=degree,
tau_order=tau_order,
write=write)
PETSc.Sys.Print("Error in scalar: %0.8f" %
error_dict["scalar_error"])
PETSc.Sys.Print("Error in post-processed scalar: %0.8f" %
error_dict["scalar_pp_error"])
PETSc.Sys.Print("Error in flux: %0.8f" %
error_dict["flux_error"])
PETSc.Sys.Print("Error in post-processed flux: %0.8f" %
error_dict["flux_pp_error"])
PETSc.Sys.Print("Interior jump of post-processed flux: %0.8f" %
np.abs(error_dict["flux_pp_jump"]))
def run_LDG_H_convergence(degree, tau_order, start, end):
PETSc.Sys.Print("Running convergence test for LDG-H method (triangles) "
"of degree %d with tau order '%s'"
% (degree, tau_order))
# Create arrays to write to CSV file
r_array = []
scalar_errors = []
scalar_pp_errors = []
flux_errors = []
flux_pp_errors = []
flux_pp_jumps = []
num_cells = []
# Run over mesh parameters and collect error metrics
for r in range(start, end + 1):
r_array.append(r)
error_dict, mesh = run_LDG_H_problem(r=r,
degree=degree,
tau_order=tau_order,
write=False)
# Extract errors and metrics
scalar_errors.append(error_dict["scalar_error"])
scalar_pp_errors.append(error_dict["scalar_pp_error"])
flux_errors.append(error_dict["flux_error"])
flux_pp_errors.append(error_dict["flux_pp_error"])
flux_pp_jumps.append(error_dict["flux_pp_jump"])
num_cells.append(mesh.num_cells())
# Now that all error metrics are collected, we can compute the rates:
scalar_rates = compute_conv_rates(scalar_errors)
scalar_pp_rates = compute_conv_rates(scalar_pp_errors)
flux_rates = compute_conv_rates(flux_errors)
flux_pp_rates = compute_conv_rates(flux_pp_errors)
PETSc.Sys.Print("Error in scalar: %0.13f" %
scalar_errors[-1])
PETSc.Sys.Print("Error in post-processed scalar: %0.13f" %
scalar_pp_errors[-1])
PETSc.Sys.Print("Error in flux: %0.13f" %
flux_errors[-1])
PETSc.Sys.Print("Error in post-processed flux: %0.13f" %
flux_pp_errors[-1])
PETSc.Sys.Print("Interior jump of post-processed flux: %0.13f" %
np.abs(flux_pp_jumps[-1]))
if COMM_WORLD.rank == 0:
degrees = [degree] * len(r_array)
data = {"Mesh": r_array,
"Degree": degrees,
"NumCells": num_cells,
"ScalarErrors": scalar_errors,
"ScalarConvRates": scalar_rates,
"PostProcessedScalarErrors": scalar_pp_errors,
"PostProcessedScalarRates": scalar_pp_rates,
"FluxErrors": flux_errors,
"FluxConvRates": flux_rates,
"PostProcessedFluxErrors": flux_pp_errors,
"PostProcessedFluxRates": flux_pp_rates}
if tau_order == "1/h":
o = "hneg1"
else:
o = tau_order
df = pd.DataFrame(data)
result = "results/LDG-H-d%d-tau_order-%s.csv" % (degree, o)
df.to_csv(result, index=False, mode="w")
| mit |
molly24Huang/Cents_trip | Recommendation/attr_food_distance.py | 1 | 2978 | import pandas as pd
from math import sin, cos, sqrt, asin, radians
#import ibm_db
def cal_dist(lon1, lat1, lon2, lat2):
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * asin(sqrt(a))
distance = 6378.137 * c
return distance
food = 'D:\\Dropbox\\Mcomp\\CS5224\\Project\\Cents_trip-master\\dataset\\food.csv'
tourism_attractions = 'D:\\Dropbox\\Mcomp\\CS5224\\Project\\Cents_trip-master\\dataset\\TOURISM_ATTRACTIONS.csv'
food_df = pd.read_csv(food)
tourism_attractions_df = pd.read_csv(tourism_attractions)
food_data = food_df.iloc[:,[0,6,7]]
tourism_attractions_data = tourism_attractions_df.iloc[:,[0,2,3]]
foodid = food_data['FOODID'].as_matrix()
#print(len(roomid))
lat_food = food_data['LATITUDE'].as_matrix()
lng_food = food_data['LONGITUDE'].as_matrix()
attractionid = tourism_attractions_data['ATTRACTIONID'].as_matrix()
#print(attractionid)
lat_attractions = tourism_attractions_data['LATITUDE'].as_matrix()
lng_attractions = tourism_attractions_data['LONGITUDE'].as_matrix()
distances = []
# conn = ibm_db.connect("DATABASE=BLUDB;HOSTNAME=dashdb-entry-yp-dal09-09.services.dal.bluemix.net;\
# PORT=50000;PROTOCOL=TCPIP;UID=dash9787;\
# PWD=X_c03EeYTe#u;", "", "")
for i in range(len(tourism_attractions_data)):
for k in range(len(food_data)):
distance = cal_dist(lng_attractions[i], lat_attractions[i], lng_food[k], lat_food[k])
# print(distance)
distances.append(distance)
output = open('rating.txt','w')
k = 1
for i in range(len(tourism_attractions_data)):
for j in range(len(food_data)):
this_attractid = str(attractionid[i])
this_foodid = str(foodid[j])
this_distance = str(distances[(i + 1)* j])
output.write(this_attractid)
output.write('\t')
output.write(this_foodid)
output.write('\t')
output.write(this_distance)
output.write('\n')
output.close()
#print(len(distances))
# k = 1
# for i in range(len(tourism_attractions_data)):
# for j in range(len(food_data)):
# this_attractid = attractionid[i]
# this_foodid = foodid[j]
# this_distance = distances[(i + 1)* j]
# sql = r'INSERT INTO DISTANCE_FOOD_ATTRACTION(ATTRACTIONID, FOODID, DISTANCE) VALUES({attractionID}, {foodID}, {distance})'.format(
# attractionID=this_attractid, foodID=this_foodid, distance=this_distance
# )
# print(sql, '>>')
# try:
# stmt = ibm_db.exec_immediate(conn, sql)
# except Exception as e:
# print(e)
# print("Inserting couldn't be completed.")
# ibm_db.rollback(conn)
# else:
# ibm_db.commit(conn)
# print("Inserting complete.")
# print('-----' + str(k) + '-----')
# k += 1
# #
| apache-2.0 |
timqian/sms-tools | lectures/3-Fourier-properties/plots-code/zero-padding.py | 26 | 1083 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming
from scipy.fftpack import fft, fftshift
plt.figure(1, figsize=(9.5, 6))
M = 8
N1 = 8
N2 = 16
N3 = 32
x = np.cos(2*np.pi*2/M*np.arange(M)) * np.hanning(M)
plt.subplot(4,1,1)
plt.title('x, M=8')
plt.plot(np.arange(-M/2.0,M/2), x, 'b', marker='x', lw=1.5)
plt.axis([-M/2,M/2-1,-1,1])
mX = 20 * np.log10(np.abs(fftshift(fft(x, N1))))
plt.subplot(4,1,2)
plt.plot(np.arange(-N1/2.0,N1/2), mX, marker='x', color='r', lw=1.5)
plt.axis([-N1/2,N1/2-1,-20,max(mX)+1])
plt.title('magnitude spectrum: mX1, N=8')
mX = 20 * np.log10(np.abs(fftshift(fft(x, N2))))
plt.subplot(4,1,3)
plt.plot(np.arange(-N2/2.0,N2/2),mX,marker='x',color='r', lw=1.5)
plt.axis([-N2/2,N2/2-1,-20,max(mX)+1])
plt.title('magnitude spectrum: mX2, N=16')
mX = 20 * np.log10(np.abs(fftshift(fft(x, N3))))
plt.subplot(4,1,4)
plt.plot(np.arange(-N3/2.0,N3/2),mX,marker='x',color='r', lw=1.5)
plt.axis([-N3/2,N3/2-1,-20,max(mX)+1])
plt.title('magnitude spectrum: mX3, N=32')
plt.tight_layout()
plt.savefig('zero-padding.png')
plt.show()
| agpl-3.0 |
CforED/Machine-Learning | sklearn/exceptions.py | 18 | 4332 | """
The :mod:`sklearn.exceptions` module includes all custom warnings and error
classes used across scikit-learn.
"""
__all__ = ['NotFittedError',
'ChangedBehaviorWarning',
'ConvergenceWarning',
'DataConversionWarning',
'DataDimensionalityWarning',
'EfficiencyWarning',
'FitFailedWarning',
'NonBLASDotWarning',
'UndefinedMetricWarning']
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples
--------
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
"""
class ChangedBehaviorWarning(UserWarning):
"""Warning class used to notify the user of any change in the behavior."""
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems"""
class DataConversionWarning(UserWarning):
"""Warning used to notify implicit data conversions happening in the code.
This warning occurs when some input data needs to be converted or
interpreted in a way that may not match the user's expectations.
For example, this warning may occur when the the user
- passes an integer array to a function which expects float input and
will convert the input
- requests a non-copying operation, but a copy is required to meet the
implementation's data-type expectations;
- passes an input whose shape can be interpreted ambiguously.
"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality.
For example, in random projection, this warning is raised when the
number of components, which quantifes the dimensionality of the target
projection space, is higher than the number of features, which quantifies
the dimensionality of the original source space, to imply that the
dimensionality of the problem will not be reduced.
"""
class EfficiencyWarning(UserWarning):
"""Warning used to notify the user of inefficient computation.
This warning notifies the user that the efficiency may not be optimal due
to some reason which may be included as a part of the warning message.
This may be subclassed into a more specific Warning class.
"""
class FitFailedWarning(RuntimeWarning):
"""Warning class used if there is an error while fitting the estimator.
This Warning is used in meta estimators GridSearchCV and RandomizedSearchCV
and the cross-validation helper function cross_val_score to warn when there
is an error while fitting the estimator.
Examples
--------
>>> from sklearn.model_selection import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import FitFailedWarning
>>> import warnings
>>> warnings.simplefilter('always', FitFailedWarning)
>>> gs = GridSearchCV(LinearSVC(), {'C': [-1, -2]}, error_score=0)
>>> X, y = [[1, 2], [3, 4], [5, 6], [7, 8], [8, 9]], [0, 0, 0, 1, 1]
>>> with warnings.catch_warnings(record=True) as w:
... try:
... gs.fit(X, y) # This will raise a ValueError since C is < 0
... except ValueError:
... pass
... print(repr(w[-1].message))
... # doctest: +NORMALIZE_WHITESPACE
FitFailedWarning("Classifier fit failed. The score on this train-test
partition for these parameters will be set to 0.000000. Details:
\\nValueError('Penalty term must be positive; got (C=-2)',)",)
"""
class NonBLASDotWarning(EfficiencyWarning):
"""Warning used when the dot operation does not use BLAS.
This warning is used to notify the user that BLAS was not used for dot
operation and hence the efficiency may be affected.
"""
class UndefinedMetricWarning(UserWarning):
"""Warning used when the metric is invalid"""
| bsd-3-clause |
proyan/sot-torque-control | python/dynamic_graph/sot/torque_control/identification/identify_motor_acc.py | 1 | 2771 | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 12 18:47:50 2017
@author: adelpret
"""
from scipy import signal
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt
from identification_utils import solve1stOrderLeastSquare
def identify_motor_acc(dt, dq, ddq, current, tau, Kt_p, Kv_p, ZERO_VELOCITY_THRESHOLD_SMALL,
ZERO_JERK_THRESHOLD, SHOW_THRESHOLD_EFFECT):
#Filter current*****************************************************
win = signal.hann(10)
filtered_current = signal.convolve(current, win, mode='same') / sum(win)
current = filtered_current
# Mask valid data***************************************************
#~ # remove high jerk
dddq = np.gradient(ddq,1)/dt
maskConstAcc = (abs (dddq)<ZERO_JERK_THRESHOLD)
#~ # erode to get only steady phases where acceleration is constant
maskConstAcc=ndimage.morphology.binary_erosion(maskConstAcc,None,100)
maskPosVel=(dq> ZERO_VELOCITY_THRESHOLD_SMALL)
maskNegVel=(dq<-ZERO_VELOCITY_THRESHOLD_SMALL)
maskConstPosAcc=np.logical_and( maskConstAcc ,maskPosVel )
maskConstNegAcc=np.logical_and( maskConstAcc ,maskNegVel )
if SHOW_THRESHOLD_EFFECT :
plt.figure()
plt.plot(ddq); plt.ylabel('ddq')
ddq_const=ddq.copy()
ddq_const[np.logical_not(maskConstAcc)]=np.nan
plt.plot(ddq_const); plt.ylabel('ddq_const')
plt.show()
#~ y = a. x + b
#~ i-Kt.tau-Kv.dq = Ka.ddq + Kf
#~
# Identification ***************************************************
y = current-Kt_p*tau - Kv_p*dq
y[maskConstPosAcc] = current[maskConstPosAcc]-Kt_p*tau[maskConstPosAcc] - Kv_p*dq[maskConstPosAcc]
y[maskConstNegAcc] = current[maskConstNegAcc]-Kt_p*tau[maskConstNegAcc] - Kv_p*dq[maskConstNegAcc]
y_label = r'$i(t)-{K_t}{\tau(t)}-{K_v}{\dot{q}(t)}$'
x = ddq
x_label = r'$\ddot{q}(t)$'
(Kap,Kfp)=solve1stOrderLeastSquare(x[maskConstPosAcc],y[maskConstPosAcc])
(Kan,b)=solve1stOrderLeastSquare(x[maskConstNegAcc],y[maskConstNegAcc])
Kfn=-b
# Plot *************************************************************
plt.figure()
plt.axhline(0, color='black',lw=1)
plt.axvline(0, color='black',lw=1)
plt.plot(x ,y ,'.' ,lw=3,markersize=1,c='0.5');
plt.plot(x[maskConstPosAcc],y[maskConstPosAcc],'rx',lw=3,markersize=1);
plt.plot(x[maskConstNegAcc],y[maskConstNegAcc],'bx',lw=3,markersize=1);
#plot identified lin model
plt.plot([min(x),max(x)],[Kap*min(x)+Kfp ,Kap*max(x)+Kfp],'g:',lw=3)
plt.plot([min(x),max(x)],[Kan*min(x)-Kfn ,Kan*max(x)-Kfn],'g:',lw=3)
plt.ylabel(y_label)
plt.xlabel(x_label)
plt.show()
return (Kap, Kan, Kfp, Kfn) | gpl-3.0 |
bjackman/trappy | trappy/utils.py | 2 | 3208 | # Copyright 2015-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Generic functions that can be used in multiple places in trappy
"""
def listify(to_select):
"""Utitlity function to handle both single and
list inputs
"""
if not isinstance(to_select, list):
to_select = [to_select]
return to_select
def handle_duplicate_index(data,
max_delta=0.000001):
"""Handle duplicate values in index
:param data: The timeseries input
:type data: :mod:`pandas.Series`
:param max_delta: Maximum interval adjustment value that
will be added to duplicate indices
:type max_delta: float
Consider the following case where a series needs to be reindexed
to a new index (which can be required when different series need to
be combined and compared):
::
import pandas
values = [0, 1, 2, 3, 4]
index = [0.0, 1.0, 1.0, 6.0, 7.0]
series = pandas.Series(values, index=index)
new_index = [0.0, 1.0, 2.0, 3.0, 4.0, 6.0, 7.0]
series.reindex(new_index)
The above code fails with:
::
ValueError: cannot reindex from a duplicate axis
The function :func:`handle_duplicate_axis` changes the duplicate values
to
::
>>> import pandas
>>> from trappy.utils import handle_duplicate_index
>>> values = [0, 1, 2, 3, 4]
index = [0.0, 1.0, 1.0, 6.0, 7.0]
series = pandas.Series(values, index=index)
series = handle_duplicate_index(series)
print series.index.values
>>> [ 0. 1. 1.000001 6. 7. ]
"""
index = data.index
new_index = index.values
dups = index.get_duplicates()
for dup in dups:
# Leave one of the values intact
dup_index_left = index.searchsorted(dup, side="left")
dup_index_right = index.searchsorted(dup, side="right") - 1
num_dups = dup_index_right - dup_index_left + 1
# Calculate delta that needs to be added to each duplicate
# index
try:
delta = (index[dup_index_right + 1] - dup) / num_dups
except IndexError:
# dup_index_right + 1 is outside of the series (i.e. the
# dup is at the end of the series).
delta = max_delta
# Clamp the maximum delta added to max_delta
if delta > max_delta:
delta = max_delta
# Add a delta to the others
dup_index_left += 1
while dup_index_left <= dup_index_right:
new_index[dup_index_left] += delta
delta += delta
dup_index_left += 1
return data.reindex(new_index)
| apache-2.0 |
stylianos-kampakis/scikit-learn | examples/exercises/plot_cv_diabetes.py | 231 | 2527 | """
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation, datasets, linear_model
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = linear_model.Lasso()
alphas = np.logspace(-4, -.5, 30)
scores = list()
scores_std = list()
for alpha in alphas:
lasso.alpha = alpha
this_scores = cross_validation.cross_val_score(lasso, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
plt.figure(figsize=(4, 3))
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
plt.semilogx(alphas, np.array(scores) + np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.semilogx(alphas, np.array(scores) - np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.ylabel('CV score')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = linear_model.LassoCV(alphas=alphas)
k_fold = cross_validation.KFold(len(X), 3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
| bsd-3-clause |
jviada/QuantEcon.py | quantecon/models/solow/impulse_response.py | 7 | 10840 | """
Classes for generating and plotting impulse response functions.
@author : David R. Pugh
@date : 2014-10-06
"""
from __future__ import division
from textwrap import dedent
import matplotlib.pyplot as plt
import numpy as np
class ImpulseResponse(object):
"""Base class representing an impulse response function for a Model."""
# number of points to use for "padding"
N = 10
# length of impulse response
T = 100
def __init__(self, model):
"""
Create an instance of the ImpulseResponse class.
Parameters
----------
model : model.Model
Instance of the model.Model class representing a Solow model.
"""
self.model = model
def __repr__(self):
"""Machine readable summary of a ImpulseResponse instance."""
return self.__str__()
def __str__(self):
"""Human readable summary of a ImpulseResponse instance."""
m = """
Impulse response function (IRF):
- N (number of points used for padding) : {N:d}
- T (length of the impulse response) : {T:d}
"""
formatted_str = dedent(m.format(N=self.N, T=self.T))
return formatted_str
@property
def _padding(self):
"""
Impulse response functions are "padded" for pretty plotting.
:getter: Return the current "padding" values.
:type: numpy.ndarray
"""
return np.hstack((self._padding_time, self._padding_variables))
@property
def _padding_scaling_factor(self):
"""
Scaling factor used in constructing the impulse response function
"padding".
:getter: Return the current scaling factor.
:type: numpy.ndarray
"""
# extract the relevant parameters
A0 = self.model.params['A0']
L0 = self.model.params['L0']
g = self.model.params['g']
n = self.model.params['n']
if self.kind == 'per_capita':
factor = A0 * np.exp(g * self._padding_time)
elif self.kind == 'levels':
factor = A0 * L0 * np.exp((g + n) * self._padding_time)
else:
factor = np.ones(self.N)
return factor.reshape((self.N, 1))
@property
def _padding_time(self):
"""
The independent variable, time, is "padded" using values from -N to -1.
:getter: Return the current "padding" values.
:type: numpy.ndarray
"""
return np.linspace(-self.N, -1, self.N).reshape((self.N, 1))
@property
def _padding_variables(self):
"""
Impulse response functions for endogenous variables are "padded" with
N periods of steady state values.
:getter: Return current "padding" values.
:kind: numpy.ndarray
"""
# economy is initial in steady state
k0 = self.model.steady_state
y0 = self.model.evaluate_intensive_output(k0)
c0 = self.model.evaluate_consumption(k0)
i0 = self.model.evaluate_actual_investment(k0)
intitial_condition = np.array([[k0, y0, c0, i0]])
return self._padding_scaling_factor * intitial_condition
@property
def _response(self):
"""
Response functions combined independent and endogenous variables.
:getter: Return the current response values.
:type: numpy.ndarray
"""
return np.hstack((self._response_time, self._response_variables))
@property
def _response_time(self):
"""
The independent variable, time, for the response ranges from 0 to T.
:getter: Return the current resonse time values.
:type: numpy.ndarray
"""
return np.linspace(0, self.T, self.T + 1).reshape((self.T + 1, 1))
@property
def _response_variables(self):
"""
Response of endogenous variables to exogenous impulse.
:getter: Return the current response.
:type: numpy.ndarray
"""
# economy is initial in steady state
k0 = self.model.steady_state
# apply the impulse...force validate params!
tmp_params = self.model.params.copy()
tmp_params.update(self.impulse)
self.model.params = tmp_params
# ...and generate the response
soln = self.model.ivp.solve(t0=0.0, y0=k0, h=1.0, T=self.T,
integrator='dop853')
# gather the results
k = soln[:, 1][:, np.newaxis]
y = self.model.evaluate_intensive_output(k)
c = self.model.evaluate_consumption(k)
i = self.model.evaluate_actual_investment(k)
return self._response_scaling_factor * np.hstack((k, y, c, i))
@property
def _response_scaling_factor(self):
"""
Scaling factor used in constructing the impulse response.
:getter: Return the current scaling factor.
:type: numpy.ndarray
"""
# extract the relevant parameters
A0 = self.model.params['A0']
L0 = self.model.params['L0']
g = self.model.params['g']
n = self.model.params['n']
if self.kind == 'per_capita':
factor = A0 * np.exp(g * self._response_time)
elif self.kind == 'levels':
factor = A0 * L0 * np.exp((g + n) * self._response_time)
else:
factor = np.ones(self.T + 1)
return factor.reshape((self.T + 1, 1))
@property
def impulse(self):
"""
Dictionary of new parameter values representing an impulse.
:getter: Return the current impulse dictionary.
:setter: Set a new impulse dictionary.
:type: dictionary
"""
return self._impulse
@property
def kind(self):
"""
The kind of impulse response function to generate. Must be one of:
'levels', 'per_capita', 'efficiency_units'.
:getter: Return the current kind of impulse responses.
:setter: Set a new value for the kind of impulse responses.
:type: str
"""
return self._kind
@property
def impulse_response(self):
"""
Impulse response functions generated by a shock to model parameter(s).
:getter: Return the current impulse response functions.
:type: numpy.ndarray
"""
orig_params = self.model.params.copy()
# create the irf
tmp_irf = np.vstack((self._padding, self._response))
# reset the model parameters
self.model.params = orig_params
return tmp_irf
@impulse.setter
def impulse(self, params):
"""Set a new impulse dictionary."""
self._impulse = self._validate_impulse(params)
@kind.setter
def kind(self, value):
"""Set a new value for the kind attribute."""
self._kind = self._validate_kind(value)
def _validate_impulse(self, params):
"""Validates the impulse attribute."""
if not isinstance(params, dict):
mesg = "ImpulseResponse.impulse must have type dict, not {}."
raise AttributeError(mesg.format(params.__class__))
elif not set(params.keys()) <= set(self.model.params.keys()):
mesg = "Invalid parameter included in the impulse dictionary."""
raise AttributeError(mesg)
else:
return params
@staticmethod
def _validate_kind(value):
"""Validates the kind attribute."""
valid_kinds = ['levels', 'per_capita', 'efficiency_units']
if value not in valid_kinds:
mesg = "The 'kind' attribute must be in {}."
raise AttributeError(mesg.format(valid_kinds))
else:
return value
def plot_impulse_response(self, ax, variable, log=False):
"""
Plot an impulse response function.
Parameters
----------
ax : `matplotlib.axes.AxesSubplot`
An instance of `matplotlib.axes.AxesSubplot`.
variable : str
Variable whose impulse response functions you wish to plot.
impulse : dict
Dictionary of new parameter values representing the impulse whose
model response you wish to plot.
kind : str (default='efficiency_units')
Whether you want impulse response functions in 'levels',
'per_capita', or 'efficiency_units'.
log : boolean (default=False)
Whether or not to have logarithmic scales on the vertical axes.
Useful when plotting impulse response functions with
kind='per_capita' or kind='levels'.
Returns
-------
A list containing:
irf_line : maplotlib.lines.Line2D
A Line2D object representing the impulse response for the requested
variable.
bgp_line : maplotlib.lines.Line2D
A Line2D object representing the pre-impulse balanced growth path
for the model.
"""
# create a mapping from variables to column indices
irf = self.impulse_response
irf_dict = {'capital': irf[:, [0, 1]],
'output': irf[:, [0, 2]],
'consumption': irf[:, [0, 3]],
'investment': irf[:, [0, 4]],
}
# create the plot
traj = irf_dict[variable]
irf_line = ax.plot(traj[:, 0], traj[:, 1])
# add the old balanced growth path
g = self.model.params['g']
n = self.model.params['n']
t = self.N + traj[:, 0]
if self.kind == 'per_capita':
bgp_line = ax.plot(traj[:, 0], traj[0, 1] * np.exp(g * t), 'k--',
label='Original BGP')
ax.set_ylabel(variable.title() + ' (per capita)', fontsize=15,
family='serif')
elif self.kind == 'levels':
bgp_line = ax.plot(traj[:, 0], traj[0, 1] * np.exp((g + n) * t),
'k--', label='Original BGP')
ax.set_ylabel(variable.title(), fontsize=15, family='serif')
else:
bgp_line = ax.axhline(traj[0, 1], linestyle='dashed', color='k',
label='Original BGP')
ax.set_ylabel(variable.title() + ' (per unit effective labor)',
fontsize=15, family='serif')
# format axes, labels, title, legend, etc
ax.set_xlabel('Time', fontsize=15, family='serif')
ax.set_ylim(0.95 * traj[:, 1].min(), 1.05 * traj[:, 1].max())
if log is True:
ax.set_yscale('log')
ax.set_title('Impulse response function', fontsize=20, family='serif')
ax.grid('on')
ax.legend(loc=0, frameon=False, bbox_to_anchor=(1.0, 1.0),
prop={'family': 'serif'})
return [irf_line, bgp_line]
| bsd-3-clause |
Fokko/incubator-airflow | airflow/hooks/hive_hooks.py | 1 | 39213 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import contextlib
import os
import re
import socket
import subprocess
import time
from collections import OrderedDict
from tempfile import NamedTemporaryFile
import unicodecsv as csv
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.security import utils
from airflow.utils.file import TemporaryDirectory
from airflow.utils.helpers import as_flattened_list
from airflow.utils.operator_helpers import AIRFLOW_VAR_NAME_FORMAT_MAPPING
HIVE_QUEUE_PRIORITIES = ['VERY_HIGH', 'HIGH', 'NORMAL', 'LOW', 'VERY_LOW']
def get_context_from_env_var():
"""
Extract context from env variable, e.g. dag_id, task_id and execution_date,
so that they can be used inside BashOperator and PythonOperator.
:return: The context of interest.
"""
return {format_map['default']: os.environ.get(format_map['env_var_format'], '')
for format_map in AIRFLOW_VAR_NAME_FORMAT_MAPPING.values()}
class HiveCliHook(BaseHook):
"""Simple wrapper around the hive CLI.
It also supports the ``beeline``
a lighter CLI that runs JDBC and is replacing the heavier
traditional CLI. To enable ``beeline``, set the use_beeline param in the
extra field of your connection as in ``{ "use_beeline": true }``
Note that you can also set default hive CLI parameters using the
``hive_cli_params`` to be used in your connection as in
``{"hive_cli_params": "-hiveconf mapred.job.tracker=some.jobtracker:444"}``
Parameters passed here can be overridden by run_cli's hive_conf param
The extra connection parameter ``auth`` gets passed as in the ``jdbc``
connection string as is.
:param mapred_queue: queue used by the Hadoop Scheduler (Capacity or Fair)
:type mapred_queue: str
:param mapred_queue_priority: priority within the job queue.
Possible settings include: VERY_HIGH, HIGH, NORMAL, LOW, VERY_LOW
:type mapred_queue_priority: str
:param mapred_job_name: This name will appear in the jobtracker.
This can make monitoring easier.
:type mapred_job_name: str
"""
def __init__(
self,
hive_cli_conn_id="hive_cli_default",
run_as=None,
mapred_queue=None,
mapred_queue_priority=None,
mapred_job_name=None):
conn = self.get_connection(hive_cli_conn_id)
self.hive_cli_params = conn.extra_dejson.get('hive_cli_params', '')
self.use_beeline = conn.extra_dejson.get('use_beeline', False)
self.auth = conn.extra_dejson.get('auth', 'noSasl')
self.conn = conn
self.run_as = run_as
if mapred_queue_priority:
mapred_queue_priority = mapred_queue_priority.upper()
if mapred_queue_priority not in HIVE_QUEUE_PRIORITIES:
raise AirflowException(
"Invalid Mapred Queue Priority. Valid values are: "
"{}".format(', '.join(HIVE_QUEUE_PRIORITIES)))
self.mapred_queue = mapred_queue or conf.get('hive',
'default_hive_mapred_queue')
self.mapred_queue_priority = mapred_queue_priority
self.mapred_job_name = mapred_job_name
def _get_proxy_user(self):
"""
This function set the proper proxy_user value in case the user overwtire the default.
"""
conn = self.conn
proxy_user_value = conn.extra_dejson.get('proxy_user', "")
if proxy_user_value == "login" and conn.login:
return "hive.server2.proxy.user={0}".format(conn.login)
if proxy_user_value == "owner" and self.run_as:
return "hive.server2.proxy.user={0}".format(self.run_as)
if proxy_user_value != "": # There is a custom proxy user
return "hive.server2.proxy.user={0}".format(proxy_user_value)
return proxy_user_value # The default proxy user (undefined)
def _prepare_cli_cmd(self):
"""
This function creates the command list from available information
"""
conn = self.conn
hive_bin = 'hive'
cmd_extra = []
if self.use_beeline:
hive_bin = 'beeline'
jdbc_url = "jdbc:hive2://{host}:{port}/{schema}".format(
host=conn.host, port=conn.port, schema=conn.schema)
if conf.get('core', 'security') == 'kerberos':
template = conn.extra_dejson.get(
'principal', "hive/_HOST@EXAMPLE.COM")
if "_HOST" in template:
template = utils.replace_hostname_pattern(
utils.get_components(template))
proxy_user = self._get_proxy_user()
jdbc_url += ";principal={template};{proxy_user}".format(
template=template, proxy_user=proxy_user)
elif self.auth:
jdbc_url += ";auth=" + self.auth
jdbc_url = '"{}"'.format(jdbc_url)
cmd_extra += ['-u', jdbc_url]
if conn.login:
cmd_extra += ['-n', conn.login]
if conn.password:
cmd_extra += ['-p', conn.password]
hive_params_list = self.hive_cli_params.split()
return [hive_bin] + cmd_extra + hive_params_list
@staticmethod
def _prepare_hiveconf(d):
"""
This function prepares a list of hiveconf params
from a dictionary of key value pairs.
:param d:
:type d: dict
>>> hh = HiveCliHook()
>>> hive_conf = {"hive.exec.dynamic.partition": "true",
... "hive.exec.dynamic.partition.mode": "nonstrict"}
>>> hh._prepare_hiveconf(hive_conf)
["-hiveconf", "hive.exec.dynamic.partition=true",\
"-hiveconf", "hive.exec.dynamic.partition.mode=nonstrict"]
"""
if not d:
return []
return as_flattened_list(
zip(["-hiveconf"] * len(d),
["{}={}".format(k, v) for k, v in d.items()])
)
def run_cli(self, hql, schema=None, verbose=True, hive_conf=None):
"""
Run an hql statement using the hive cli. If hive_conf is specified
it should be a dict and the entries will be set as key/value pairs
in HiveConf
:param hive_conf: if specified these key value pairs will be passed
to hive as ``-hiveconf "key"="value"``. Note that they will be
passed after the ``hive_cli_params`` and thus will override
whatever values are specified in the database.
:type hive_conf: dict
>>> hh = HiveCliHook()
>>> result = hh.run_cli("USE airflow;")
>>> ("OK" in result)
True
"""
conn = self.conn
schema = schema or conn.schema
if schema:
hql = "USE {schema};\n{hql}".format(schema=schema, hql=hql)
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
hql = hql + '\n'
f.write(hql.encode('UTF-8'))
f.flush()
hive_cmd = self._prepare_cli_cmd()
env_context = get_context_from_env_var()
# Only extend the hive_conf if it is defined.
if hive_conf:
env_context.update(hive_conf)
hive_conf_params = self._prepare_hiveconf(env_context)
if self.mapred_queue:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.queuename={}'
.format(self.mapred_queue),
'-hiveconf',
'mapred.job.queue.name={}'
.format(self.mapred_queue),
'-hiveconf',
'tez.queue.name={}'
.format(self.mapred_queue)
])
if self.mapred_queue_priority:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.priority={}'
.format(self.mapred_queue_priority)])
if self.mapred_job_name:
hive_conf_params.extend(
['-hiveconf',
'mapred.job.name={}'
.format(self.mapred_job_name)])
hive_cmd.extend(hive_conf_params)
hive_cmd.extend(['-f', f.name])
if verbose:
self.log.info("%s", " ".join(hive_cmd))
sp = subprocess.Popen(
hive_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmp_dir,
close_fds=True)
self.sp = sp
stdout = ''
while True:
line = sp.stdout.readline()
if not line:
break
stdout += line.decode('UTF-8')
if verbose:
self.log.info(line.decode('UTF-8').strip())
sp.wait()
if sp.returncode:
raise AirflowException(stdout)
return stdout
def test_hql(self, hql):
"""
Test an hql statement using the hive cli and EXPLAIN
"""
create, insert, other = [], [], []
for query in hql.split(';'): # naive
query_original = query
query = query.lower().strip()
if query.startswith('create table'):
create.append(query_original)
elif query.startswith(('set ',
'add jar ',
'create temporary function')):
other.append(query_original)
elif query.startswith('insert'):
insert.append(query_original)
other = ';'.join(other)
for query_set in [create, insert]:
for query in query_set:
query_preview = ' '.join(query.split())[:50]
self.log.info("Testing HQL [%s (...)]", query_preview)
if query_set == insert:
query = other + '; explain ' + query
else:
query = 'explain ' + query
try:
self.run_cli(query, verbose=False)
except AirflowException as e:
message = e.args[0].split('\n')[-2]
self.log.info(message)
error_loc = re.search(r'(\d+):(\d+)', message)
if error_loc and error_loc.group(1).isdigit():
lst = int(error_loc.group(1))
begin = max(lst - 2, 0)
end = min(lst + 3, len(query.split('\n')))
context = '\n'.join(query.split('\n')[begin:end])
self.log.info("Context :\n %s", context)
else:
self.log.info("SUCCESS")
def load_df(
self,
df,
table,
field_dict=None,
delimiter=',',
encoding='utf8',
pandas_kwargs=None, **kwargs):
"""
Loads a pandas DataFrame into hive.
Hive data types will be inferred if not passed but column names will
not be sanitized.
:param df: DataFrame to load into a Hive table
:type df: pandas.DataFrame
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param field_dict: mapping from column name to hive data type.
Note that it must be OrderedDict so as to keep columns' order.
:type field_dict: collections.OrderedDict
:param delimiter: field delimiter in the file
:type delimiter: str
:param encoding: str encoding to use when writing DataFrame to file
:type encoding: str
:param pandas_kwargs: passed to DataFrame.to_csv
:type pandas_kwargs: dict
:param kwargs: passed to self.load_file
"""
def _infer_field_types_from_df(df):
DTYPE_KIND_HIVE_TYPE = {
'b': 'BOOLEAN', # boolean
'i': 'BIGINT', # signed integer
'u': 'BIGINT', # unsigned integer
'f': 'DOUBLE', # floating-point
'c': 'STRING', # complex floating-point
'M': 'TIMESTAMP', # datetime
'O': 'STRING', # object
'S': 'STRING', # (byte-)string
'U': 'STRING', # Unicode
'V': 'STRING' # void
}
d = OrderedDict()
for col, dtype in df.dtypes.iteritems():
d[col] = DTYPE_KIND_HIVE_TYPE[dtype.kind]
return d
if pandas_kwargs is None:
pandas_kwargs = {}
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir, mode="w") as f:
if field_dict is None:
field_dict = _infer_field_types_from_df(df)
df.to_csv(path_or_buf=f,
sep=delimiter,
header=False,
index=False,
encoding=encoding,
date_format="%Y-%m-%d %H:%M:%S",
**pandas_kwargs)
f.flush()
return self.load_file(filepath=f.name,
table=table,
delimiter=delimiter,
field_dict=field_dict,
**kwargs)
def load_file(
self,
filepath,
table,
delimiter=",",
field_dict=None,
create=True,
overwrite=True,
partition=None,
recreate=False,
tblproperties=None):
"""
Loads a local file into Hive
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param filepath: local filepath of the file to load
:type filepath: str
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param delimiter: field delimiter in the file
:type delimiter: str
:param field_dict: A dictionary of the fields name in the file
as keys and their Hive types as values.
Note that it must be OrderedDict so as to keep columns' order.
:type field_dict: collections.OrderedDict
:param create: whether to create the table if it doesn't exist
:type create: bool
:param overwrite: whether to overwrite the data in table or partition
:type overwrite: bool
:param partition: target partition as a dict of partition columns
and values
:type partition: dict
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param tblproperties: TBLPROPERTIES of the hive table being created
:type tblproperties: dict
"""
hql = ''
if recreate:
hql += "DROP TABLE IF EXISTS {table};\n".format(table=table)
if create or recreate:
if field_dict is None:
raise ValueError("Must provide a field dict when creating a table")
fields = ",\n ".join(
['`{k}` {v}'.format(k=k.strip('`'), v=v) for k, v in field_dict.items()])
hql += "CREATE TABLE IF NOT EXISTS {table} (\n{fields})\n".format(
table=table, fields=fields)
if partition:
pfields = ",\n ".join(
[p + " STRING" for p in partition])
hql += "PARTITIONED BY ({pfields})\n".format(pfields=pfields)
hql += "ROW FORMAT DELIMITED\n"
hql += "FIELDS TERMINATED BY '{delimiter}'\n".format(delimiter=delimiter)
hql += "STORED AS textfile\n"
if tblproperties is not None:
tprops = ", ".join(
["'{0}'='{1}'".format(k, v) for k, v in tblproperties.items()])
hql += "TBLPROPERTIES({tprops})\n".format(tprops=tprops)
hql += ";"
self.log.info(hql)
self.run_cli(hql)
hql = "LOAD DATA LOCAL INPATH '{filepath}' ".format(filepath=filepath)
if overwrite:
hql += "OVERWRITE "
hql += "INTO TABLE {table} ".format(table=table)
if partition:
pvals = ", ".join(
["{0}='{1}'".format(k, v) for k, v in partition.items()])
hql += "PARTITION ({pvals})".format(pvals=pvals)
# As a workaround for HIVE-10541, add a newline character
# at the end of hql (AIRFLOW-2412).
hql += ';\n'
self.log.info(hql)
self.run_cli(hql)
def kill(self):
if hasattr(self, 'sp'):
if self.sp.poll() is None:
print("Killing the Hive job")
self.sp.terminate()
time.sleep(60)
self.sp.kill()
class HiveMetastoreHook(BaseHook):
""" Wrapper to interact with the Hive Metastore"""
# java short max val
MAX_PART_COUNT = 32767
def __init__(self, metastore_conn_id='metastore_default'):
self.conn_id = metastore_conn_id
self.metastore = self.get_metastore_client()
def __getstate__(self):
# This is for pickling to work despite the thirft hive client not
# being pickable
d = dict(self.__dict__)
del d['metastore']
return d
def __setstate__(self, d):
self.__dict__.update(d)
self.__dict__['metastore'] = self.get_metastore_client()
def get_metastore_client(self):
"""
Returns a Hive thrift client.
"""
import hmsclient
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
ms = self._find_valid_server()
if ms is None:
raise AirflowException("Failed to locate the valid server.")
auth_mechanism = ms.extra_dejson.get('authMechanism', 'NOSASL')
if conf.get('core', 'security') == 'kerberos':
auth_mechanism = ms.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = ms.extra_dejson.get('kerberos_service_name', 'hive')
conn_socket = TSocket.TSocket(ms.host, ms.port)
if conf.get('core', 'security') == 'kerberos' \
and auth_mechanism == 'GSSAPI':
try:
import saslwrapper as sasl
except ImportError:
import sasl
def sasl_factory():
sasl_client = sasl.Client()
sasl_client.setAttr("host", ms.host)
sasl_client.setAttr("service", kerberos_service_name)
sasl_client.init()
return sasl_client
from thrift_sasl import TSaslClientTransport
transport = TSaslClientTransport(sasl_factory, "GSSAPI", conn_socket)
else:
transport = TTransport.TBufferedTransport(conn_socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
return hmsclient.HMSClient(iprot=protocol)
def _find_valid_server(self):
conns = self.get_connections(self.conn_id)
for conn in conns:
host_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.log.info("Trying to connect to %s:%s", conn.host, conn.port)
if host_socket.connect_ex((conn.host, conn.port)) == 0:
self.log.info("Connected to %s:%s", conn.host, conn.port)
host_socket.close()
return conn
else:
self.log.info("Could not connect to %s:%s", conn.host, conn.port)
def get_conn(self):
return self.metastore
def check_for_partition(self, schema, table, partition):
"""
Checks whether a partition exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: str
:param table: Name of hive table @partition belongs to
:type schema: str
:partition: Expression that matches the partitions to check for
(eg `a = 'b' AND c = 'd'`)
:type schema: str
:rtype: bool
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_partition('airflow', t, "ds='2015-01-01'")
True
"""
with self.metastore as client:
partitions = client.get_partitions_by_filter(
schema, table, partition, 1)
if partitions:
return True
else:
return False
def check_for_named_partition(self, schema, table, partition_name):
"""
Checks whether a partition with a given name exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: str
:param table: Name of hive table @partition belongs to
:type schema: str
:partition: Name of the partitions to check for (eg `a=b/c=d`)
:type schema: str
:rtype: bool
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_named_partition('airflow', t, "ds=2015-01-01")
True
>>> hh.check_for_named_partition('airflow', t, "ds=xxx")
False
"""
with self.metastore as client:
return client.check_for_named_partition(schema, table, partition_name)
def get_table(self, table_name, db='default'):
"""Get a metastore table object
>>> hh = HiveMetastoreHook()
>>> t = hh.get_table(db='airflow', table_name='static_babynames')
>>> t.tableName
'static_babynames'
>>> [col.name for col in t.sd.cols]
['state', 'year', 'name', 'gender', 'num']
"""
if db == 'default' and '.' in table_name:
db, table_name = table_name.split('.')[:2]
with self.metastore as client:
return client.get_table(dbname=db, tbl_name=table_name)
def get_tables(self, db, pattern='*'):
"""
Get a metastore table object
"""
with self.metastore as client:
tables = client.get_tables(db_name=db, pattern=pattern)
return client.get_table_objects_by_name(db, tables)
def get_databases(self, pattern='*'):
"""
Get a metastore table object
"""
with self.metastore as client:
return client.get_databases(pattern)
def get_partitions(
self, schema, table_name, filter=None):
"""
Returns a list of all partitions in a table. Works only
for tables with less than 32767 (java short max val).
For subpartitioned table, the number might easily exceed this.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> parts = hh.get_partitions(schema='airflow', table_name=t)
>>> len(parts)
1
>>> parts
[{'ds': '2015-01-01'}]
"""
with self.metastore as client:
table = client.get_table(dbname=schema, tbl_name=table_name)
if len(table.partitionKeys) == 0:
raise AirflowException("The table isn't partitioned")
else:
if filter:
parts = client.get_partitions_by_filter(
db_name=schema, tbl_name=table_name,
filter=filter, max_parts=HiveMetastoreHook.MAX_PART_COUNT)
else:
parts = client.get_partitions(
db_name=schema, tbl_name=table_name,
max_parts=HiveMetastoreHook.MAX_PART_COUNT)
pnames = [p.name for p in table.partitionKeys]
return [dict(zip(pnames, p.values)) for p in parts]
@staticmethod
def _get_max_partition_from_part_specs(part_specs, partition_key, filter_map):
"""
Helper method to get max partition of partitions with partition_key
from part specs. key:value pair in filter_map will be used to
filter out partitions.
:param part_specs: list of partition specs.
:type part_specs: list
:param partition_key: partition key name.
:type partition_key: str
:param filter_map: partition_key:partition_value map used for partition filtering,
e.g. {'key1': 'value1', 'key2': 'value2'}.
Only partitions matching all partition_key:partition_value
pairs will be considered as candidates of max partition.
:type filter_map: map
:return: Max partition or None if part_specs is empty.
"""
if not part_specs:
return None
# Assuming all specs have the same keys.
if partition_key not in part_specs[0].keys():
raise AirflowException("Provided partition_key {} "
"is not in part_specs.".format(partition_key))
if filter_map:
is_subset = set(filter_map.keys()).issubset(set(part_specs[0].keys()))
if filter_map and not is_subset:
raise AirflowException("Keys in provided filter_map {} "
"are not subset of part_spec keys: {}"
.format(', '.join(filter_map.keys()),
', '.join(part_specs[0].keys())))
candidates = [p_dict[partition_key] for p_dict in part_specs
if filter_map is None or
all(item in p_dict.items() for item in filter_map.items())]
if not candidates:
return None
else:
return max(candidates).encode('utf-8')
def max_partition(self, schema, table_name, field=None, filter_map=None):
"""
Returns the maximum value for all partitions with given field in a table.
If only one partition key exist in the table, the key will be used as field.
filter_map should be a partition_key:partition_value map and will be used to
filter out partitions.
:param schema: schema name.
:type schema: str
:param table_name: table name.
:type table_name: str
:param field: partition key to get max partition from.
:type field: str
:param filter_map: partition_key:partition_value map used for partition filtering.
:type filter_map: map
>>> hh = HiveMetastoreHook()
>>> filter_map = {'ds': '2015-01-01', 'ds': '2014-01-01'}
>>> t = 'static_babynames_partitioned'
>>> hh.max_partition(schema='airflow',\
... table_name=t, field='ds', filter_map=filter_map)
'2015-01-01'
"""
with self.metastore as client:
table = client.get_table(dbname=schema, tbl_name=table_name)
key_name_set = {key.name for key in table.partitionKeys}
if len(table.partitionKeys) == 1:
field = table.partitionKeys[0].name
elif not field:
raise AirflowException("Please specify the field you want the max "
"value for.")
elif field not in key_name_set:
raise AirflowException("Provided field is not a partition key.")
if filter_map and not set(filter_map.keys()).issubset(key_name_set):
raise AirflowException("Provided filter_map contains keys "
"that are not partition key.")
part_names = \
client.get_partition_names(schema,
table_name,
max_parts=HiveMetastoreHook.MAX_PART_COUNT)
part_specs = [client.partition_name_to_spec(part_name)
for part_name in part_names]
return HiveMetastoreHook._get_max_partition_from_part_specs(part_specs,
field,
filter_map)
def table_exists(self, table_name, db='default'):
"""
Check if table exists
>>> hh = HiveMetastoreHook()
>>> hh.table_exists(db='airflow', table_name='static_babynames')
True
>>> hh.table_exists(db='airflow', table_name='does_not_exist')
False
"""
try:
self.get_table(table_name, db)
return True
except Exception:
return False
class HiveServer2Hook(BaseHook):
"""
Wrapper around the pyhive library
Notes:
* the default authMechanism is PLAIN, to override it you
can specify it in the ``extra`` of your connection in the UI
* the default for run_set_variable_statements is true, if you
are using impala you may need to set it to false in the
``extra`` of your connection in the UI
"""
def __init__(self, hiveserver2_conn_id='hiveserver2_default'):
self.hiveserver2_conn_id = hiveserver2_conn_id
def get_conn(self, schema=None):
"""
Returns a Hive connection object.
"""
db = self.get_connection(self.hiveserver2_conn_id)
auth_mechanism = db.extra_dejson.get('authMechanism', 'NONE')
if auth_mechanism == 'NONE' and db.login is None:
# we need to give a username
username = 'airflow'
kerberos_service_name = None
if conf.get('core', 'security') == 'kerberos':
auth_mechanism = db.extra_dejson.get('authMechanism', 'KERBEROS')
kerberos_service_name = db.extra_dejson.get('kerberos_service_name', 'hive')
# pyhive uses GSSAPI instead of KERBEROS as a auth_mechanism identifier
if auth_mechanism == 'GSSAPI':
self.log.warning(
"Detected deprecated 'GSSAPI' for authMechanism "
"for %s. Please use 'KERBEROS' instead",
self.hiveserver2_conn_id
)
auth_mechanism = 'KERBEROS'
from pyhive.hive import connect
return connect(
host=db.host,
port=db.port,
auth=auth_mechanism,
kerberos_service_name=kerberos_service_name,
username=db.login or username,
password=db.password,
database=schema or db.schema or 'default')
def _get_results(self, hql, schema='default', fetch_size=None, hive_conf=None):
from pyhive.exc import ProgrammingError
if isinstance(hql, str):
hql = [hql]
previous_description = None
with contextlib.closing(self.get_conn(schema)) as conn, \
contextlib.closing(conn.cursor()) as cur:
cur.arraysize = fetch_size or 1000
# not all query services (e.g. impala AIRFLOW-4434) support the set command
db = self.get_connection(self.hiveserver2_conn_id)
if db.extra_dejson.get('run_set_variable_statements', True):
env_context = get_context_from_env_var()
if hive_conf:
env_context.update(hive_conf)
for k, v in env_context.items():
cur.execute("set {}={}".format(k, v))
for statement in hql:
cur.execute(statement)
# we only get results of statements that returns
lowered_statement = statement.lower().strip()
if (lowered_statement.startswith('select') or
lowered_statement.startswith('with') or
lowered_statement.startswith('show') or
(lowered_statement.startswith('set') and
'=' not in lowered_statement)):
description = [c for c in cur.description]
if previous_description and previous_description != description:
message = '''The statements are producing different descriptions:
Current: {}
Previous: {}'''.format(repr(description),
repr(previous_description))
raise ValueError(message)
elif not previous_description:
previous_description = description
yield description
try:
# DB API 2 raises when no results are returned
# we're silencing here as some statements in the list
# may be `SET` or DDL
yield from cur
except ProgrammingError:
self.log.debug("get_results returned no records")
def get_results(self, hql, schema='default', fetch_size=None, hive_conf=None):
"""
Get results of the provided hql in target schema.
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:param fetch_size: max size of result to fetch.
:type fetch_size: int
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
:return: results of hql execution, dict with data (list of results) and header
:rtype: dict
"""
results_iter = self._get_results(hql, schema,
fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
results = {
'data': list(results_iter),
'header': header
}
return results
def to_csv(
self,
hql,
csv_filepath,
schema='default',
delimiter=',',
lineterminator='\r\n',
output_header=True,
fetch_size=1000,
hive_conf=None):
"""
Execute hql in target schema and write results to a csv file.
:param hql: hql to be executed.
:type hql: str or list
:param csv_filepath: filepath of csv to write results into.
:type csv_filepath: str
:param schema: target schema, default to 'default'.
:type schema: str
:param delimiter: delimiter of the csv file, default to ','.
:type delimiter: str
:param lineterminator: lineterminator of the csv file.
:type lineterminator: str
:param output_header: header of the csv file, default to True.
:type output_header: bool
:param fetch_size: number of result rows to write into the csv file, default to 1000.
:type fetch_size: int
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
"""
results_iter = self._get_results(hql, schema,
fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
message = None
i = 0
with open(csv_filepath, 'wb') as file:
writer = csv.writer(file,
delimiter=delimiter,
lineterminator=lineterminator,
encoding='utf-8')
try:
if output_header:
self.log.debug('Cursor description is %s', header)
writer.writerow([c[0] for c in header])
for i, row in enumerate(results_iter, 1):
writer.writerow(row)
if i % fetch_size == 0:
self.log.info("Written %s rows so far.", i)
except ValueError as exception:
message = str(exception)
if message:
# need to clean up the file first
os.remove(csv_filepath)
raise ValueError(message)
self.log.info("Done. Loaded a total of %s rows.", i)
def get_records(self, hql, schema='default', hive_conf=None):
"""
Get a set of records from a Hive query.
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
:return: result of hive execution
:rtype: list
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> len(hh.get_records(sql))
100
"""
return self.get_results(hql, schema=schema, hive_conf=hive_conf)['data']
def get_pandas_df(self, hql, schema='default'):
"""
Get a pandas dataframe from a Hive query
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:return: result of hql execution
:rtype: DataFrame
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> df = hh.get_pandas_df(sql)
>>> len(df.index)
100
:return: pandas.DateFrame
"""
import pandas as pd
res = self.get_results(hql, schema=schema)
df = pd.DataFrame(res['data'])
df.columns = [c[0] for c in res['header']]
return df
| apache-2.0 |
rbooth200/DiscEvolution | DiscEvolution/driver.py | 1 | 12630 | # driver.py
#
# Author: R. Booth
# Date: 17 - Nov - 2016
#
# Combined model for dust, gas and chemical evolution
################################################################################
from __future__ import print_function
import numpy as np
import os
from .photoevaporation import FixedExternalEvaporation
from .constants import yr
from . import io
class DiscEvolutionDriver(object):
"""Driver class for full evolution model.
Required Arguments:
disc : Disc model to update
Optional Physics update:
dust : Update the dust, i.e. radial drift
gas : Update due to gas effects, i.e. Viscous evolution
diffusion : Seperate diffusion update
internal_photo : Remove gas by internal photoevaporation
photoevaporation : Remove gas by external photoevaporation
chemistry : Solver for the chemical evolution
History:
history : Tracks values of key parameters over time
Note: Diffusion is usually handled in the dust dynamics module
Other options:
t0 : Starting time, default = 0, code units
t_out:Previous output times, default = None, years
"""
def __init__(self, disc, gas=None, dust=None, diffusion=None, chemistry=None, ext_photoevaporation=None, int_photoevaporation=None, history=None, t0=0.):
self._disc = disc
self._gas = gas
self._dust = dust
self._diffusion = diffusion
self._chemistry = chemistry
self._external_photo = ext_photoevaporation
self._internal_photo = int_photoevaporation
self._history = history
self._t = t0
self._nstep = 0
def __call__(self, tmax):
"""Evolve the disc for a single timestep
args:
dtmax : Upper limit to time-step
returns:
dt : Time step taken
"""
disc = self._disc
# Compute the maximum time-step
dt = tmax - self.t
if self._gas:
dt = min(dt, self._gas.max_timestep(self._disc))
if self._dust:
v_visc = self._gas.viscous_velocity(disc)
dt = min(dt, self._dust.max_timestep(self._disc, v_visc))
if self._dust._diffuse:
dt = min(dt, self._dust._diffuse.max_timestep(self._disc))
if self._diffusion:
dt = min(dt, self._diffusion.max_timestep(self._disc))
if self._external_photo and hasattr(self._external_photo,"_density"): # If we are using density to calculate mass loss rates, we need to limit the time step based on photoevaporation
(dM_dot, dM_gas) = self._external_photo.optically_thin_weighting(disc)
Dt = dM_gas[(dM_dot>0)] / dM_dot[(dM_dot>0)]
Dt_min = np.min(Dt)
dt = min(dt,Dt_min)
# Determine tracers for dust step
gas_chem, ice_chem = None, None
dust = None
try:
gas_chem = disc.chem.gas.data
ice_chem = disc.chem.ice.data
except AttributeError:
pass
# Do dust evolution
if self._dust:
self._dust(dt, disc,
gas_tracers=gas_chem,
dust_tracers=ice_chem, v_visc=v_visc)
# Determine tracers for gas steps
try:
gas_chem = disc.chem.gas.data
ice_chem = disc.chem.ice.data
except AttributeError:
pass
try:
dust = disc.dust_frac
except AttributeError:
pass
# Do Advection-diffusion update
if self._gas:
self._gas(dt, disc, [dust, gas_chem, ice_chem])
if self._diffusion:
if gas_chem is not None:
gas_chem[:] += dt * self._diffusion(disc, gas_chem)
if ice_chem is not None:
ice_chem[:] += dt * self._diffusion(disc, ice_chem)
if dust is not None:
dust[:] += dt * self._diffusion(disc, dust)
# Do external photoevaporation
if self._external_photo:
self._external_photo(disc, dt)
# Do internal photoevaporation
if self._internal_photo:
self._internal_photo(disc, dt/yr, self._external_photo)
# Pin the values to >= 0 and <=1:
disc.Sigma[:] = np.maximum(disc.Sigma, 0)
try:
disc.dust_frac[:] = np.maximum(disc.dust_frac, 0)
disc.dust_frac[:] /= np.maximum(disc.dust_frac.sum(0), 1.0)
except AttributeError:
pass
try:
disc.chem.gas.data[:] = np.maximum(disc.chem.gas.data, 0)
disc.chem.ice.data[:] = np.maximum(disc.chem.ice.data, 0)
except AttributeError:
pass
# Chemistry
if self._chemistry:
rho = disc.midplane_gas_density
eps = disc.dust_frac.sum(0)
grain_size = disc.grain_size[-1]
T = disc.T
self._chemistry.update(dt, T, rho, eps, disc.chem,
grain_size=grain_size)
# If we have dust, we should update it now the ice fraction has
# changed
disc.update_ices(disc.chem.ice)
# Now we should update the auxillary properties, do grain growth etc
disc.update(dt)
self._t += dt
self._nstep += 1
return dt
@property
def disc(self):
return self._disc
@property
def t(self):
return self._t
@property
def num_steps(self):
return self._nstep
@property
def gas(self):
return self._gas
@property
def dust(self):
return self._dust
@property
def diffusion(self):
return self._diffusion
@property
def chemistry(self):
return self._chemistry
@property
def photoevaporation_external(self):
return self._external_photo
@property
def photoevaporation_internal(self):
return self._internal_photo
@property
def history(self):
return self._history
def dump_ASCII(self, filename):
"""Write the current state to a file, including header information"""
# Put together a header containing information about the physics
# included
head = ''
if self._gas:
head += self._gas.ASCII_header() + '\n'
if self._dust:
head += self._dust.ASCII_header() + '\n'
if self._diffusion:
head += self._diffusion.ASCII_header() + '\n'
if self._chemistry:
head += self._chemistry.ASCII_header() + '\n'
if self._external_photo:
head += self._external_photo.ASCII_header() + '\n'
if self._internal_photo:
head += self._internal_photo.ASCII_header() + '\n'
# Write it all to disc
io.dump_ASCII(filename, self._disc, self.t, head)
def dump_hdf5(self, filename):
"""Write the current state in HDF5 format, with header information"""
headers = []
if self._gas: headers.append(self._gas.HDF5_attributes())
if self._dust: headers.append(self._dust.HDF5_attributes())
if self._diffusion: headers.append(self._diffusion.HDF5_attributes())
if self._chemistry: headers.append(self._chemistry.HDF5_attributes())
if self._external_photo: headers.append(self._external_photo.HDF5_attributes())
if self._internal_photo: headers.append(self._internal_photo.HDF5_attributes())
io.dump_hdf5(filename, self._disc, self.t, headers)
if __name__ == "__main__":
from .star import SimpleStar
from .grid import Grid
from .eos import IrradiatedEOS
from .viscous_evolution import ViscousEvolution
from .dust import DustGrowthTwoPop, SingleFluidDrift
from .opacity import Zhu2012, Tazzari2016
from .diffusion import TracerDiffusion
from .chemistry import TimeDepCOChemOberg, SimpleCOAtomAbund
from .constants import Msun, AU
from .disc_utils import mkdir_p
import matplotlib.pyplot as plt
alpha = 1e-3
Mdot = 1e-8
Rd = 100.
#kappa = Zhu2012
kappa = Tazzari2016()
N_cell = 250
R_in = 0.1
R_out = 500.
yr = 2*np.pi
output_dir = 'test_DiscEvo'
output_times = np.arange(0, 4) * 1e6 * yr
plot_times = np.array([0, 1e4, 1e5, 5e5, 1e6, 3e6])*yr
# Setup the initial conditions
Mdot *= (Msun / yr) / AU**2
grid = Grid(R_in, R_out, N_cell, spacing='natural')
star = SimpleStar(M=1, R=2.5, T_eff=4000.)
# Initial guess for Sigma:
R = grid.Rc
Sigma = (Mdot / (0.1 * alpha * R**2 * star.Omega_k(R))) * np.exp(-R/Rd)
# Iterate until constant Mdot
eos = IrradiatedEOS(star, alpha, kappa=kappa)
eos.set_grid(grid)
eos.update(0, Sigma)
for i in range(100):
Sigma = 0.5 * (Sigma + (Mdot / (3 * np.pi * eos.nu)) * np.exp(-R/Rd))
eos.update(0, Sigma)
# Create the disc object
disc = DustGrowthTwoPop(grid, star, eos, 0.01, Sigma=Sigma)
# Setup the chemistry
chemistry = TimeDepCOChemOberg(a=1e-5)
# Setup the dust-to-gas ratio from the chemistry
solar_abund = SimpleCOAtomAbund(N_cell)
solar_abund.set_solar_abundances()
# Iterate ice fractions to get the dust-to-gas ratio:
for i in range(10):
chem = chemistry.equilibrium_chem(disc.T,
disc.midplane_gas_density,
disc.dust_frac.sum(0),
solar_abund)
disc.initialize_dust_density(chem.ice.total_abund)
disc.chem = chem
# Setup the dynamics modules:
gas = ViscousEvolution()
dust = SingleFluidDrift(TracerDiffusion())
evo = DiscEvolutionDriver(disc, gas=gas, dust=dust, chemistry=chemistry)
# Setup the IO controller
IO = io.Event_Controller(save=output_times, plot=plot_times)
# Run the model!
while not IO.finished():
ti = IO.next_event_time()
while evo.t < ti:
dt = evo(ti)
if (evo.num_steps % 1000) == 0:
print('Nstep: {}'.format(evo.num_steps))
print('Time: {} yr'.format(evo.t / yr))
print('dt: {} yr'.format(dt / yr))
if IO.check_event(evo.t, 'save'):
from .disc_utils import mkdir_p
mkdir_p(output_dir)
snap_name = 'disc_{:04d}.dat'.format(IO.event_number('save'))
evo.dump_ASCII(os.path.join(output_dir, snap_name))
snap_name = 'disc_{:04d}.h5'.format(IO.event_number('save'))
evo.dump_hdf5(os.path.join(output_dir, snap_name))
if IO.check_event(evo.t, 'plot'):
err_state = np.seterr(all='warn')
print('Nstep: {}'.format(evo.num_steps))
print('Time: {} yr'.format(evo.t / (2 * np.pi)))
plt.subplot(321)
l, = plt.loglog(grid.Rc, evo.disc.Sigma_G)
plt.loglog(grid.Rc, evo.disc.Sigma_D.sum(0), '--', c=l.get_color())
plt.xlabel('$R$')
plt.ylabel('$\Sigma_\mathrm{G, D}$')
plt.subplot(322)
plt.loglog(grid.Rc, evo.disc.dust_frac.sum(0))
plt.xlabel('$R$')
plt.ylabel('$\epsilon$')
plt.subplot(323)
plt.loglog(grid.Rc, evo.disc.Stokes()[1])
plt.xlabel('$R$')
plt.ylabel('$St$')
plt.subplot(324)
plt.loglog(grid.Rc, evo.disc.grain_size[1])
plt.xlabel('$R$')
plt.ylabel('$a\,[\mathrm{cm}]$')
plt.subplot(325)
gCO = evo.disc.chem.gas.atomic_abundance()
sCO = evo.disc.chem.ice.atomic_abundance()
gCO.data[:] /= solar_abund.data
sCO.data[:] /= solar_abund.data
c = l.get_color()
plt.semilogx(grid.Rc, gCO['C'], '-', c=c, linewidth=1)
plt.semilogx(grid.Rc, gCO['O'], '-', c=c, linewidth=2)
plt.semilogx(grid.Rc, sCO['C'], ':', c=c, linewidth=1)
plt.semilogx(grid.Rc, sCO['O'], ':', c=c, linewidth=2)
plt.xlabel('$R\,[\mathrm{au}}$')
plt.ylabel('$[X]_\mathrm{solar}$')
plt.subplot(326)
plt.semilogx(grid.Rc, gCO['C'] / gCO['O'], '-', c=c)
plt.semilogx(grid.Rc, sCO['C'] / sCO['O'], ':', c=c)
plt.xlabel('$R\,[\mathrm{au}}$')
plt.ylabel('$[C/O]_\mathrm{solar}$')
np.seterr(**err_state)
IO.pop_events(evo.t)
if len(plot_times) > 0:
plt.show()
| gpl-3.0 |
mosbys/Clone | Cloning_v1/drive.py | 1 | 3838 | import argparse
import base64
import json
import numpy as np
import socketio
import eventlet
import eventlet.wsgi
import time
from PIL import Image
from PIL import ImageOps
from flask import Flask, render_template
from io import BytesIO
from random import randint
from keras.models import model_from_json
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array
import cv2
# Fix error with Keras and TensorFlow
import tensorflow as tf
import matplotlib.pyplot as plt
tf.python.control_flow_ops = tf
sio = socketio.Server()
app = Flask(__name__)
model = None
prev_image_array = None
iDebug = 0
def preprocess(image, top_offset=.375, bottom_offset=.125):
"""
Applies preprocessing pipeline to an image: crops `top_offset` and `bottom_offset`
portions of image, resizes to 32x128 px and scales pixel values to [0, 1].
"""
top = int(top_offset * image.shape[0])
bottom = int(bottom_offset * image.shape[0])
image = image[top:-bottom, :]
newShape = image.shape
image= cv2.resize(image,(int(newShape[1]/2), int(newShape[0]/2)), interpolation = cv2.INTER_CUBIC)
return image
@sio.on('telemetry')
def telemetry(sid, data):
# The current steering angle of the car
steering_angle = data["steering_angle"]
# The current throttle of the car
throttle = data["throttle"]
# The current speed of the car
speed = data["speed"]
# The current image from the center camera of the car
imgString = data["image"]
image = Image.open(BytesIO(base64.b64decode(imgString)))
image_array = np.asarray(image)
image_array=preprocess(image_array)
newShape = image_array.shape
#image_array=cv2.resize(image_array,(newShape[1], newShape[0]),interpolation=cv2.INTER_CUBIC)
transformed_image_array = image_array[None, :, :, :]
if (iDebug==1):
plt.imshow(image_array)
plt.show()
#transformed_image_array2 = np.zeros([1,2*64,64,3])
#transformed_image_array2[0]=cv2.resize(transformed_image_array[0],(2*64, 64),interpolation=cv2.INTER_CUBIC)
# This model currently assumes that the features of the model are just the images. Feel free to change this.
steering_angle = float(model.predict(transformed_image_array, batch_size=1))
# The driving model currently just outputs a constant throttle. Feel free to edit this.
#steering_angle = randint(0,100)/100*randint(-1,1);
throttle = 0.2
print(steering_angle, throttle)
send_control(steering_angle, throttle)
@sio.on('connect')
def connect(sid, environ):
print("connect ", sid)
send_control(0, 0)
def send_control(steering_angle, throttle):
sio.emit("steer", data={
'steering_angle': steering_angle.__str__(),
'throttle': throttle.__str__()
}, skip_sid=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument('model', type=str,
help='Path to model definition json. Model weights should be on the same path.')
args = parser.parse_args()
with open(args.model, 'r') as jfile:
# NOTE: if you saved the file by calling json.dump(model.to_json(), ...)
# then you will have to call:
#
# model = model_from_json(json.loads(jfile.read()))\
#
# instead.
#model = model_from_json(jfile.read())
model = model_from_json(json.loads(jfile.read()))
model.compile("adam", "mse")
weights_file = args.model.replace('json', 'h5')
model.load_weights(weights_file)
# wrap Flask application with engineio's middleware
app = socketio.Middleware(sio, app)
# deploy as an eventlet WSGI server
eventlet.wsgi.server(eventlet.listen(('', 4567)), app) | gpl-2.0 |
phoebe-project/phoebe2-docs | development/tutorials/general_concepts.py | 2 | 14093 | #!/usr/bin/env python
# coding: utf-8
# General Concepts: The PHOEBE Bundle
# ======================
#
# **HOW TO RUN THIS FILE**: if you're running this in a Jupyter notebook or Google Colab session, you can click on a cell and then shift+Enter to run the cell and automatically select the next cell. Alt+Enter will run a cell and create a new cell below it. Ctrl+Enter will run a cell but keep it selected. To restart from scratch, restart the kernel/runtime.
#
#
# All of these tutorials assume basic comfort with Python in general - particularly with the concepts of lists, dictionaries, and objects as well as basic comfort with using the numpy and matplotlib packages. This tutorial introduces all the general concepts of accessing parameters within the Bundle.
#
# Setup
# ----------------------------------------------
#
# Let's first make sure we have the latest version of PHOEBE 2.3 installed (uncomment this line if running in an online notebook session such as colab).
# In[1]:
#!pip install -I "phoebe>=2.3,<2.4"
# Let's get started with some basic imports:
# In[2]:
import phoebe
from phoebe import u # units
# If running in IPython notebooks, you may see a "ShimWarning" depending on the version of Jupyter you are using - this is safe to ignore.
#
# PHOEBE 2 uses constants defined in the IAU 2015 Resolution which conflict with the constants defined in astropy. As a result, you'll see the warnings as phoebe.u and phoebe.c "hijacks" the values in astropy.units and astropy.constants.
#
# Whenever providing units, please make sure to use `phoebe.u` instead of `astropy.units`, otherwise the conversions may be inconsistent.
# ### Logger
#
# Before starting any script, it is a good habit to initialize a logger and define which levels of information you want printed to the command line (clevel) and dumped to a file (flevel). A convenience function is provided at the top-level via [phoebe.logger](../api/phoebe.logger.md) to initialize the logger with any desired level.
#
# The levels from most to least information are:
#
# * DEBUG
# * INFO
# * WARNING
# * ERROR
# * CRITICAL
#
# In[3]:
logger = phoebe.logger(clevel='WARNING')
# All of these arguments are optional and will default to clevel='WARNING' if not provided. There is therefore no need to provide a filename if you don't provide a value for flevel.
#
# So with this logger, anything with INFO, WARNING, ERROR, or CRITICAL levels will be printed to the screen. All messages of any level will be written to a file named 'tutorial.log' in the current directory.
#
# Note: the logger messages are not included in the outputs shown below.
#
# ## Overview
#
# As a quick overview of what's to come, here is a quick preview of some of the steps used when modeling a binary system with PHOEBE. Each of these steps will be explained in more detail throughout these tutorials.
#
# First we need to create our binary system. For the sake of most of these tutorials, we'll use the default detached binary available through the [phoebe.default_binary](../api/phoebe.default_binary.md) constructor.
# In[4]:
b = phoebe.default_binary()
# This object holds all the parameters and their respective values. We'll see in this tutorial and the next tutorial on [constraints](constraints.ipynb) how to search through these parameters and set their values.
# In[5]:
b.set_value(qualifier='teff', component='primary', value=6500)
# Next, we need to define our datasets via [b.add_dataset](../api/phoebe.frontend.bundle.Bundle.add_dataset.md). This will be the topic of the following tutorial on [datasets](datasets.ipynb).
# In[6]:
b.add_dataset('lc', compute_times=phoebe.linspace(0,1,101))
# We'll then want to run our forward model to create a synthetic model of the observables defined by these datasets using [b.run_compute](../api/phoebe.frontend.bundle.Bundle.run_compute.md), which will be the topic of the [computing observables](compute.ipynb) tutorial.
# In[7]:
b.run_compute()
# We can access the value of any parameter, including the arrays in the synthetic model just generated. To export arrays to a file, we could call [b.export_arrays](../api/phoebe.parameters.ParameterSet.export_arrays.md)
# In[8]:
print(b.get_value(qualifier='fluxes', context='model'))
# We can then plot the resulting model with [b.plot](../api/phoebe.parameters.ParameterSet.plot.md), which will be covered in the [plotting](plotting.ipynb) tutorial.
# In[9]:
afig, mplfig = b.plot(show=True)
# And then lastly, if we wanted to solve the inverse problem and "fit" parameters to observational data, we may want to add [distributions](distributions.ipynb) to our system so that we can run [estimators, optimizers, or samplers](solver.ipynb).
# ## Default Binary Bundle
# For this tutorial, let's start over and discuss this `b` object in more detail and how to access and change the values of the input parameters.
#
# Everything for our system will be stored in this single Python object that we call the [Bundle](../api/phoebe.frontend.bundle.Bundle.md) which we'll call `b` (short for bundle).
# In[10]:
b = phoebe.default_binary()
# The Bundle is just a collection of [Parameter](../api/phoebe.parameters.Parameter.md) objects along with some callable methods. Here we can see that the default binary Bundle consists of over 100 individual parameters.
# In[11]:
b
# If we want to view or edit a Parameter in the Bundle, we first need to know how to access it. Each Parameter object has a number of tags which can be used to [filter](../api/phoebe.parameters.ParameterSet.filter.md) (similar to a database query). When filtering the Bundle, a [ParameterSet](../api/phoebe.parameters.ParameterSet.md) is returned - this is essentially just a subset of the Parameters in the Bundle and can be further filtered until eventually accessing a single Parameter.
# In[12]:
b.filter(context='compute')
# Here we filtered on the context tag for all Parameters with `context='compute'` (i.e. the options for computing a model). If we want to see all the available options for this tag in the Bundle, we can use the plural form of the tag as a property on the Bundle or any ParameterSet.
# In[13]:
b.contexts
# Although there is no strict hierarchy or order to the tags, it can be helpful to think of the context tag as the top-level tag and is often very helpful to filter by the appropriate context first.
#
# Other tags currently include:
# * kind
# * figure
# * component
# * feature
# * dataset
# * distribution
# * compute
# * model
# * solver
# * solution
# * time
# * qualifier
# Accessing the plural form of the tag as an attribute also works on a filtered ParameterSet
# In[14]:
b.filter(context='compute').components
# This then tells us what can be used to filter further.
# In[15]:
b.filter(context='compute').filter(component='primary')
# The qualifier tag is the shorthand name of the Parameter itself. If you don't know what you're looking for, it is often useful to list all the qualifiers of the Bundle or a given ParameterSet.
# In[16]:
b.filter(context='compute', component='primary').qualifiers
# Now that we know the options for the qualifier within this filter, we can choose to filter on one of those. Let's look filter by the 'ntriangles' qualifier.
# In[17]:
b.filter(context='compute', component='primary', qualifier='ntriangles')
# Once we filter far enough to get to a single Parameter, we can use [get_parameter](../api/phoebe.parameters.ParameterSet.get_parameter.md) to return the Parameter object itself (instead of a ParameterSet).
# In[18]:
b.filter(context='compute', component='primary', qualifier='ntriangles').get_parameter()
# As a shortcut, get_parameter also takes filtering keywords. So the above line is also equivalent to the following:
# In[19]:
b.get_parameter(context='compute', component='primary', qualifier='ntriangles')
# Each Parameter object contains several keys that provide information about that Parameter. The keys "description" and "value" are always included, with additional keys available depending on the type of Parameter.
# In[20]:
b.get_parameter(context='compute', component='primary', qualifier='ntriangles').get_value()
# In[21]:
b.get_parameter(context='compute', component='primary', qualifier='ntriangles').get_description()
# We can also see a top-level view of the filtered parameters and descriptions (note: the syntax with @ symbols will be explained further in the section on twigs below.
# In[22]:
print(b.filter(context='compute', component='primary').info)
# Since the Parameter for `ntriangles` is a FloatParameter, it also includes a key for the allowable limits.
# In[23]:
b.get_parameter(context='compute', component='primary', qualifier='ntriangles').get_limits()
# In this case, we're looking at the Parameter called `ntriangles` with the component tag set to 'primary'. This Parameter therefore defines how many triangles should be created when creating the mesh for the star named 'primary'. By default, this is set to 1500 triangles, with allowable values above 100.
#
# If we wanted a finer mesh, we could change the value.
# In[24]:
b.get_parameter(context='compute', component='primary', qualifier='ntriangles').set_value(2000)
# In[25]:
b.get_parameter(context='compute', component='primary', qualifier='ntriangles')
# If we choose the `distortion_method` qualifier from that same ParameterSet, we'll see that it has a few different keys in addition to description and value.
# In[26]:
b.get_parameter(context='compute', component='primary', qualifier='distortion_method')
# In[27]:
b.get_parameter(context='compute', component='primary', qualifier='distortion_method').get_value()
# In[28]:
b.get_parameter(context='compute', component='primary', qualifier='distortion_method').get_description()
# Since the distortion_method Parameter is a [ChoiceParameter](../api/phoebe.parameters.ChoiceParameter.md), it contains a key for the allowable choices.
# In[29]:
b.get_parameter(context='compute', component='primary', qualifier='distortion_method').get_choices()
# We can only set a value if it is contained within this list - if you attempt to set a non-valid value, an error will be raised.
# In[30]:
try:
b.get_parameter(context='compute', component='primary', qualifier='distortion_method').set_value('blah')
except Exception as e:
print(e)
# In[31]:
b.get_parameter(context='compute', component='primary', qualifier='distortion_method').set_value('rotstar')
# In[32]:
b.get_parameter(context='compute', component='primary', qualifier='distortion_method').get_value()
# [Parameter](../api/phoebe.parameters.Parameter.md) types include:
# * [IntParameter](../api/phoebe.parameters.IntParameter.md)
# * [FloatParameter](../api/phoebe.parameters.FloatParameter.md)
# * [FloatArrayParameter](../api/phoebe.parameters.FloatArrayParameter.md)
# * [BoolParameter](../api/phoebe.parameters.BoolParameter.md)
# * [StringParameter](../api/phoebe.parameters.StringParameter.md)
# * [ChoiceParameter](../api/phoebe.parameters.ChoiceParameter.md)
# * [SelectParameter](../api/phoebe.parameters.SelectParameter.md)
# * [DictParameter](../api/phoebe.parameters.DictParameter.md)
# * [ConstraintParameter](../api/phoebe.parameters.ConstraintParameter.md)
# * [DistributionParameter](../api/phoebe.parameters.DistributionParameter.md)
# * [HierarchyParameter](../api/phoebe.parameters.HierarchyParameter.md)
# * [UnitParameter](../api/phoebe.parameters.UnitParameter.md)
# * [JobParameter](../api/phoebe.parameters.JobParameter.md)
#
# these Parameter types and their available options are all described in great detail in [Advanced: Parameter Types](parameters.ipynb)
# ### Twigs
# As a shortcut to needing to filter by all these tags, the Bundle and ParameterSets can be filtered through what we call "twigs" (as in a Bundle of twigs). These are essentially a single string-representation of the tags, separated by `@` symbols.
#
# This is very useful as a shorthand when working in an interactive Python console, but somewhat obfuscates the names of the tags and can make it difficult if you use them in a script and make changes earlier in the script.
#
# For example, the following lines give identical results:
# In[33]:
b.filter(context='compute', component='primary')
# In[34]:
b['primary@compute']
# In[35]:
b['compute@primary']
# However, this dictionary-style twig access will never return a ParameterSet with a single Parameter, instead it will return the Parameter itself. This can be seen in the different output between the following two lines:
# In[36]:
b.filter(context='compute', component='primary', qualifier='distortion_method')
# In[37]:
b['distortion_method@primary@compute']
# Because of this, this dictionary-style twig access can also set the value directly:
# In[38]:
b['distortion_method@primary@compute'] = 'roche'
# In[39]:
print(b['distortion_method@primary@compute'])
# And can even provide direct access to the keys/attributes of the Parameter (value, description, limits, etc)
# In[40]:
print(b['value@distortion_method@primary@compute'])
# In[41]:
print(b['description@distortion_method@primary@compute'])
# As with the tags, you can call .twigs on any ParameterSet to see the "smallest unique twigs" of the contained Parameters
# In[42]:
b['compute'].twigs
# Since the more verbose method without twigs is a bit clearer to read, most of the tutorials will show that syntax, but feel free to use twigs if they make more sense to you.
# Next
# ----------
#
# Next up: let's learn about [constraints](constraints.ipynb).
#
# Or look at any of the following advanced topics:
# * [Advanced: Parameter Types](parameters.ipynb)
# * [Advanced: Parameter Units](units.ipynb)
# * [Advanced: Building a System](building_a_system.ipynb)
# * [Advanced: Contact Binary Hierarchy](contact_binary_hierarchy.ipynb)
# * [Advanced: Saving, Loading, and Exporting](saving_and_loading.ipynb)
| gpl-3.0 |
jayflo/scikit-learn | sklearn/utils/graph.py | 289 | 6239 | """
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <hagberg@lanl.gov>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
| bsd-3-clause |
snurkabill/pydeeplearn | code/lib/ann.py | 2 | 21874 | """ Implementation of a simple ANN. """
__author__ = "Mihaela Rosca"
__contact__ = "mihaela.c.rosca@gmail.com"
import numpy as np
import theano
from theano import tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
import matplotlib.pyplot as plt
theanoFloat = theano.config.floatX
"""In all the above topLayer does not mean the top most layer, but rather the
layer above the current one."""
# TODO: different activation function and try relu
# and fix this
from common import *
from debug import *
DEBUG = False
class MiniBatchTrainer(object):
# TODO: maybe creating the ring here might be better?
def __init__(self, input, nrLayers, initialWeights, initialBiases,
visibleDropout, hiddenDropout):
self.input = input
# Let's initialize the fields
# The weights and biases, make them shared variables
self.weights = []
self.biases = []
nrWeights = nrLayers - 1
for i in xrange(nrWeights):
w = theano.shared(value=np.asarray(initialWeights[i],
dtype=theanoFloat),
name='W')
self.weights.append(w)
b = theano.shared(value=np.asarray(initialBiases[i],
dtype=theanoFloat),
name='b')
self.biases.append(b)
# Set the parameters of the object
# Do not set more than this, these will be used for differentiation in the
# gradient
self.params = self.weights + self.biases
# Required for setting the norm constraint
# Note that only the hidden units have norm constraint
# The last layer (softmax) does not have it
self.hasNormConstraint = [True] * (nrWeights - 1) + [False] * (nrWeights + 1)
# Required for momentum
# The updates that were performed in the last batch
# It is important that the order in which
# we add the oldUpdates is the same as which we add the params
# TODO: add an assertion for this
self.oldUpdates = []
for i in xrange(nrWeights):
oldDw = theano.shared(value=np.zeros(shape=initialWeights[i].shape,
dtype=theanoFloat),
name='oldDw')
self.oldUpdates.append(oldDw)
for i in xrange(nrWeights):
oldDb = theano.shared(value=np.zeros(shape=initialBiases[i].shape,
dtype=theanoFloat),
name='oldDb')
self.oldUpdates.append(oldDb)
# Rmsprop
# The old mean that were performed in the last batch
self.oldMeanSquare = []
for i in xrange(nrWeights):
oldDw = theano.shared(value=np.zeros(shape=initialWeights[i].shape,
dtype=theanoFloat),
name='oldDw')
self.oldMeanSquare.append(oldDw)
for i in xrange(nrWeights):
oldDb = theano.shared(value=np.zeros(shape=initialBiases[i].shape,
dtype=theanoFloat),
name='oldDb')
self.oldMeanSquare.append(oldDb)
# Create a theano random number generator
# Required to sample units for dropout
# If it is not shared, does it update when we do the
# when we go to another function call?
self.theano_rng = RandomStreams(seed=np.random.randint(1, 1000))
# Sample from the visible layer
# Get the mask that is used for the visible units
dropout_mask = self.theano_rng.binomial(n=1, p=visibleDropout,
size=self.input.shape,
dtype=theanoFloat)
currentLayerValues = self.input * dropout_mask
for stage in xrange(nrWeights -1):
w = self.weights[stage]
b = self.biases[stage]
linearSum = T.dot(currentLayerValues, w) + b
# TODO: make this a function that you pass around
# it is important to make the classification activation functions outside
# Also check the Stamford paper again to what they did to average out
# the results with softmax and regression layers?
# Use hiddenDropout: give the next layer only some of the units
# from this layer
dropout_mask = self.theano_rng.binomial(n=1, p=hiddenDropout,
size=linearSum.shape,
dtype=theanoFloat)
currentLayerValues = dropout_mask * T.nnet.sigmoid(linearSum)
# Last layer operations
w = self.weights[nrWeights - 1]
b = self.biases[nrWeights - 1]
linearSum = T.dot(currentLayerValues, w) + b
# Do not use theano's softmax, it is numerically unstable
# and it causes Nans to appear
# Note that semantically this is the same
e_x = T.exp(linearSum - linearSum.max(axis=1, keepdims=True))
currentLayerValues = e_x / e_x.sum(axis=1, keepdims=True)
self.output = currentLayerValues
def cost(self, y):
return T.nnet.categorical_crossentropy(self.output, y)
""" Class that implements an artificial neural network."""
class ANN(object):
"""
Arguments:
nrLayers: the number of layers of the network. In case of discriminative
traning, also contains the classifcation layer
(the last softmax layer)
type: integer
layerSizes: the sizes of the individual layers.
type: list of integers of size nrLayers
"""
def __init__(self, nrLayers, layerSizes,
supervisedLearningRate=0.05,
nesterovMomentum=True,
rmsprop=True,
miniBatchSize=10,
hiddenDropout=0.5,
visibleDropout=0.8,
normConstraint=None):
self.nrLayers = nrLayers
self.layerSizes = layerSizes
assert len(layerSizes) == nrLayers
self.hiddenDropout = hiddenDropout
self.visibleDropout = visibleDropout
self.miniBatchSize = miniBatchSize
self.supervisedLearningRate = supervisedLearningRate
self.nesterovMomentum = nesterovMomentum
self.rmsprop = rmsprop
self.normConstraint = normConstraint
def initialize(self, data):
self.weights = [None] * (self.nrLayers - 1)
self.biases = [None] * (self.nrLayers - 1)
for i in xrange(self.nrLayers - 2):
self.weights[i] = np.asarray(np.random.normal(0, 0.01,
(self.layerSizes[i], self.layerSizes[i+1])),
dtype=theanoFloat)
self.biases[i] = np.zeros(shape=(self.layerSizes[i+1]),
dtype=theanoFloat)
lastLayerWeights = np.zeros(shape=(self.layerSizes[-2], self.layerSizes[-1]),
dtype=theanoFloat)
lastLayerBiases = np.zeros(shape=(self.layerSizes[-1]),
dtype=theanoFloat)
self.weights[-1] = lastLayerWeights
self.biases[-1] = lastLayerBiases
assert len(self.weights) == self.nrLayers - 1
assert len(self.biases) == self.nrLayers - 1
"""
Choose a percentage (percentValidation) of the data given to be
validation data, used for early stopping of the model.
"""
def train(self, data, labels, maxEpochs, validation=True, percentValidation=0.1):
if validation:
nrInstances = len(data)
validationIndices = np.random.choice(xrange(nrInstances),
percentValidation * nrInstances)
trainingIndices = list(set(xrange(nrInstances)) - set(validationIndices))
trainingData = data[trainingIndices, :]
trainingLabels = labels[trainingIndices, :]
validationData = data[validationIndices, :]
validationLabels = labels[validationIndices, :]
self.trainWithGivenValidationSet(trainingData, trainingLabels, validation,
validationData, validationLabels, maxEpochs)
else:
trainingData = data
trainingLabels = labels
self.trainNoValidation(trainingData, trainingLabels, maxEpochs)
def trainWithGivenValidationSet(self, data, labels,
validationData,
validationLabels,
maxEpochs):
sharedData = theano.shared(np.asarray(data, dtype=theanoFloat))
sharedLabels = theano.shared(np.asarray(labels, dtype=theanoFloat))
self.initialize(data)
self.nrMiniBatches = len(data) / self.miniBatchSize
sharedValidationData = theano.shared(np.asarray(validationData, dtype=theanoFloat))
sharedValidationLabels = theano.shared(np.asarray(validationLabels, dtype=theanoFloat))
# Does backprop for the data and a the end sets the weights
self.fineTune(sharedData, sharedLabels, validation,
sharedValidationData, sharedValidationLabels, maxEpochs)
# Get the classification weights
self.classifcationWeights = map(lambda x: x * self.hiddenDropout, self.weights)
self.classifcationBiases = self.biases
def trainNoValidation(self, data, labels, maxEpochs):
sharedData = theano.shared(np.asarray(data, dtype=theanoFloat))
sharedLabels = theano.shared(np.asarray(labels, dtype=theanoFloat))
self.initialize(data)
self.nrMiniBatches = len(data) / self.miniBatchSize
# Does backprop for the data and a the end sets the weights
self.fineTune(sharedData, sharedLabels, False, None, None, maxEpochs)
# Get the classification weights
self.classifcationWeights = map(lambda x: x * self.hiddenDropout, self.weights)
self.classifcationBiases = self.biases
"""Fine tunes the weigths and biases using backpropagation.
data and labels are shared
Arguments:
data: The data used for traning and fine tuning
data has to be a theano variable for it to work in the current version
labels: A numpy nd array. Each label should be transformed into a binary
base vector before passed into this function.
miniBatch: The number of instances to be used in a miniBatch
epochs: The number of epochs to use for fine tuning
"""
def fineTune(self, data, labels, validation, validationData, validationLabels,
maxEpochs):
print "supervisedLearningRate"
print self.supervisedLearningRate
batchLearningRate = self.supervisedLearningRate / self.miniBatchSize
batchLearningRate = np.float32(batchLearningRate)
# Let's build the symbolic graph which takes the data trough the network
# allocate symbolic variables for the data
# index of a mini-batch
miniBatchIndex = T.lscalar()
momentum = T.fscalar()
# The mini-batch data is a matrix
x = T.matrix('x', dtype=theanoFloat)
# labels[start:end] this needs to be a matrix because we output probabilities
y = T.matrix('y', dtype=theanoFloat)
batchTrainer = MiniBatchTrainer(input=x, nrLayers=self.nrLayers,
initialWeights=self.weights,
initialBiases=self.biases,
visibleDropout=0.8,
hiddenDropout=0.5)
# the error is the sum of the errors in the individual cases
error = T.sum(batchTrainer.cost(y))
if DEBUG:
mode = theano.compile.MonitorMode(post_func=detect_nan).excluding(
'local_elemwise_fusion', 'inplace')
else:
mode = None
if self.nesterovMomentum:
preDeltaUpdates, updates = self.buildUpdatesNesterov(batchTrainer, momentum,
batchLearningRate, error)
momentum_step = theano.function(
inputs=[momentum],
outputs=[],
updates=preDeltaUpdates,
mode = mode)
update_params = theano.function(
inputs =[miniBatchIndex, momentum],
outputs=error,
updates=updates,
givens={
x: data[miniBatchIndex * self.miniBatchSize:(miniBatchIndex + 1) * self.miniBatchSize],
y: labels[miniBatchIndex * self.miniBatchSize:(miniBatchIndex + 1) * self.miniBatchSize]},
mode=mode)
def trainModel(miniBatchIndex, momentum):
momentum_step(momentum)
return update_params(miniBatchIndex, momentum)
else:
updates = self.buildUpdatesSimpleMomentum(batchTrainer, momentum,
batchLearningRate, error)
trainModel = theano.function(
inputs=[miniBatchIndex, momentum],
outputs=error,
updates=updates,
givens={
x: data[miniBatchIndex * self.miniBatchSize:(miniBatchIndex + 1) * self.miniBatchSize],
y: labels[miniBatchIndex * self.miniBatchSize:(miniBatchIndex + 1) * self.miniBatchSize]})
theano.printing.pydotprint(trainModel)
if validation:
# Let's create the function that validates the model!
validateModel = theano.function(inputs=[],
outputs=batchTrainer.cost(y),
givens={x: validationData,
y: validationLabels})
self.trainLoopWithValidation(trainModel, validateModel, maxEpochs)
else:
if validationData is not None or validationLabels is not None:
raise Exception(("You provided validation data but requested a train method "
"that does not need validation"))
self.trainLoopModelFixedEpochs(batchTrainer, trainModel, maxEpochs)
# Set up the weights in the dbn object
for i in xrange(len(self.weights)):
self.weights[i] = batchTrainer.weights[i].get_value()
print self.weights
for i in xrange(len(self.biases)):
self.biases[i] = batchTrainer.biases[i].get_value()
print self.biases
def trainLoopModelFixedEpochs(self, batchTrainer, trainModel, maxEpochs):
for epoch in xrange(maxEpochs):
print "epoch " + str(epoch)
momentum = np.float32(min(np.float32(0.5) + epoch * np.float32(0.01),
np.float32(0.99)))
for batchNr in xrange(self.nrMiniBatches):
trainModel(batchNr, momentum)
for i in xrange(self.nrLayers - 2):
assert np.all(np.linalg.norm(batchTrainer.weights[i].get_value(), axis=0) <= self.normConstraint + 1e-8)
print "number of epochs"
print epoch
def trainLoopWithValidation(self, trainModel, validateModel, maxEpochs):
lastValidationError = np.inf
count = 0
epoch = 0
validationErrors = []
while epoch < maxEpochs and count < 8:
print "epoch " + str(epoch)
momentum = np.float32(min(np.float32(0.5) + epoch * np.float32(0.01),
np.float32(0.99)))
for batchNr in xrange(self.nrMiniBatches):
trainModel(batchNr, momentum)
meanValidation = np.mean(validateModel(), axis=0)
validationErrors += [meanValidation]
if meanValidation > lastValidationError:
count +=1
else:
count = 0
lastValidationError = meanValidation
epoch +=1
try:
plt.plot(validationErrors)
plt.show()
except e:
print "validation error plot not made"
print "number of epochs"
print epoch
# A very greedy approach to training
# Probably not the best idea but worth trying
# A more mild version would be to actually take 3 conescutive ones
# that give the best average (to ensure you are not in a luck place)
# and take the best of them
def trainModelGetBestWeights(self, trainModel, validateModel, maxEpochs):
bestValidationError = np.inf
validationErrors = []
bestWeights = None
bestBiases = None
for epoch in xrange(maxEpochs):
print "epoch " + str(epoch)
momentum = np.float32(min(np.float32(0.5) + epoch * np.float32(0.01),
np.float32(0.99)))
for batchNr in xrange(self.nrMiniBatches):
trainModel(batchNr, momentum)
meanValidation = np.mean(validateModel(), axis=0)
validationErrors += [meanValidation]
if meanValidation < bestValidationError:
bestValidationError = meanValidation
# Save the weights which are the best ones
bestWeights = batchTrainer.weights
bestBiases = biases.biases
# If we have improved at all during training
if bestWeights is not None and bestBiases is not None:
batchTrainer.weights = bestWeights
batchTrainer.biases = bestBiases
try:
plt.plot(validationErrors)
plt.show()
except e:
print "validation error plot not made"
print "number of epochs"
print epoch
def trainModelPatience(self, trainModel, validateModel, maxEpochs):
bestValidationError = np.inf
epoch = 0
doneTraining = False
improvmentTreshold = 0.995
patience = 10 # do at least 10 passes trough the data no matter what
while (epoch < maxEpochs) and not doneTraining:
# Train the net with all data
print "epoch " + str(epoch)
momentum = np.float32(min(np.float32(0.5) + epoch * np.float32(0.01),
np.float32(0.99)))
for batchNr in xrange(self.nrMiniBatches):
trainModel(batchNr, momentum)
# why axis = 0? this should be a number?!
meanValidation = np.mean(validateModel, maxEpochs())
print 'meanValidation'
print meanValidation
if meanValidation < bestValidationError:
# If we have improved well enough, then increase the patience
if meanValidation < bestValidationError * improvmentTreshold:
print "increasing patience"
patience = max(patience, epoch * 2)
bestValidationError = meanValidation
if patience <= epoch:
doneTraining = True
epoch += 1
print "number of epochs"
print epoch
def buildUpdatesNesterov(self, batchTrainer, momentum,
batchLearningRate, error):
preDeltaUpdates = []
for param, oldUpdate in zip(batchTrainer.params, batchTrainer.oldUpdates):
preDeltaUpdates.append((param, param + momentum * oldUpdate))
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs
deltaParams = T.grad(error, batchTrainer.params)
updates = []
parametersTuples = zip(batchTrainer.params,
deltaParams,
batchTrainer.oldUpdates,
batchTrainer.oldMeanSquare,
batchTrainer.hasNormConstraint)
for param, delta, oldUpdate, oldMeanSquare, hasNormConstraint in parametersTuples:
if self.rmsprop:
meanSquare = 0.9 * oldMeanSquare + 0.1 * delta ** 2
paramUpdate = - batchLearningRate * delta / T.sqrt(meanSquare + 1e-8)
updates.append((oldMeanSquare, meanSquare))
else:
paramUpdate = - batchLearningRate * delta
newParam = param + paramUpdate
if self.normConstraint is not None and hasNormConstraint:
norms = SquaredElementWiseNorm(newParam)
rescaled = norms > self.normConstraint
factors = T.ones(norms.shape, dtype=theanoFloat) / T.sqrt(norms) * np.sqrt(self.normConstraint, dtype='float32') - 1.0
replaceNewParam = (factors * rescaled) * newParam
replaceNewParam += newParam
newParam = replaceNewParam
# paramUpdate = newParam - param
updates.append((param, newParam))
updates.append((oldUpdate, momentum * oldUpdate + paramUpdate))
return preDeltaUpdates, updates
def buildUpdatesSimpleMomentum(self, batchTrainer, momentum,
batchLearningRate, error):
deltaParams = T.grad(error, batchTrainer.params)
updates = []
parametersTuples = zip(batchTrainer.params,
deltaParams,
batchTrainer.oldUpdates,
batchTrainer.oldMeanSquare,
batchTrainer.hasNormConstraint)
for param, delta, oldUpdate, oldMeanSquare, hasNormConstraint in parametersTuples:
paramUpdate = momentum * oldUpdate
if self.rmsprop:
meanSquare = 0.9 * oldMeanSquare + 0.1 * delta ** 2
paramUpdate += - batchLearningRate * delta / T.sqrt(meanSquare + 1e-8)
updates.append((oldMeanSquare, meanSquare))
else:
paramUpdate += - batchLearningRate * delta
newParam = param + paramUpdate
if self.normConstraint is not None and hasNormConstraint:
norms = SquaredElementWiseNorm(newParam)
rescaled = norms > self.normConstraint
factors = T.ones(norms.shape, dtype=theanoFloat) / T.sqrt(norms) * np.sqrt(self.normConstraint, dtype='float32') - 1.0
replaceNewParam = (factors * rescaled) * newParam
replaceNewParam += newParam
newParam = replaceNewParam
# paramUpdate = newParam - param
updates.append((param, newParam))
updates.append((oldUpdate, paramUpdate))
return updates
def classify(self, dataInstaces):
dataInstacesConverted = np.asarray(dataInstaces, dtype=theanoFloat)
x = T.matrix('x', dtype=theanoFloat)
# Use the classification weights because now we have hiddenDropout
# Ensure that you have no hiddenDropout in classification
# TODO: are the variables still shared? or can we make a new one?
batchTrainer = MiniBatchTrainer(input=x, nrLayers=self.nrLayers,
initialWeights=self.classifcationWeights,
initialBiases=self.classifcationBiases,
visibleDropout=1,
hiddenDropout=1)
classify = theano.function(
inputs=[],
outputs=batchTrainer.output,
updates={},
givens={x: dataInstacesConverted})
lastLayers = classify()
return lastLayers, np.argmax(lastLayers, axis=1)
# Element wise norm of the columns of a matrix
def SquaredElementWiseNorm(x):
return T.sum(T.sqr(x), axis=0)
| bsd-3-clause |